diff --git a/.gitignore b/.gitignore
index f460c9e..b0123c2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,15 +1,16 @@
 SOURCES/HAM-logo.png
 SOURCES/backports-3.6.8.gem
-SOURCES/eventmachine-1.2.0.1.gem
+SOURCES/ethon-0.10.1.gem
+SOURCES/ffi-1.9.17.gem
 SOURCES/mock-1.0.1.tar.gz
-SOURCES/multi_json-1.12.0.gem
+SOURCES/multi_json-1.12.1.gem
 SOURCES/open4-1.3.4.gem
 SOURCES/orderedhash-0.0.6.gem
-SOURCES/pcs-0.9.152.tar.gz
+SOURCES/pcs-0.9.158.tar.gz
 SOURCES/rack-1.6.4.gem
 SOURCES/rack-protection-1.5.3.gem
 SOURCES/rack-test-0.6.3.gem
 SOURCES/rpam-ruby19-1.2.1.gem
-SOURCES/sinatra-1.4.7.gem
+SOURCES/sinatra-1.4.8.gem
 SOURCES/sinatra-contrib-1.4.7.gem
-SOURCES/tilt-2.0.3.gem
+SOURCES/tilt-2.0.6.gem
diff --git a/.pcs.metadata b/.pcs.metadata
index e39b7ea..f56a5ce 100644
--- a/.pcs.metadata
+++ b/.pcs.metadata
@@ -1,16 +1,16 @@
+80dc7788a3468fb7dd362a4b8bedd9efb373de89 SOURCES/HAM-logo.png
 5c9dd0d5552d242ee6bb338a9097e85f0a0a45d5 SOURCES/backports-3.6.8.gem
-60b6f1d8391cd374c6a2ef3977cb1397ed89055a SOURCES/eventmachine-1.2.0.1.gem
+0e362edc1035fa4adc3e52fcc27d15e796e6e9cf SOURCES/ethon-0.10.1.gem
+499119750963bd1266b4184e169eb9da76462e2a SOURCES/ffi-1.9.17.gem
 baa3446eb63557a24c4522dc5a61cfad082fa395 SOURCES/mock-1.0.1.tar.gz
-46156f5a4ff17a23c15d0d2f0fc84cb5627ac70d SOURCES/multi_json-1.12.0.gem
+b418d7b93e99a6f7d1acb70453470aace4599d1a SOURCES/multi_json-1.12.1.gem
 41a7fe9f8e3e02da5ae76c821b89c5b376a97746 SOURCES/open4-1.3.4.gem
 709cc95025009e5d221e37cb0777e98582146809 SOURCES/orderedhash-0.0.6.gem
-2808df782cd1d269e1d94c36a52573023128c0a0 SOURCES/pcs-0.9.152.tar.gz
+20c9d1566e18693c291deb3a23c87cc86d23be3d SOURCES/pcs-0.9.158.tar.gz
 0a1eea6d7bb903d8c075688534480e87d4151470 SOURCES/rack-1.6.4.gem
 1c28529c1d7376c61faed80f3d3297905a14c2b3 SOURCES/rack-protection-1.5.3.gem
 6fd5a7f881a65ef93b66e21556ef67fbe08a2fcc SOURCES/rack-test-0.6.3.gem
 a90e5a60d99445404a3c29a66d953a5e9918976d SOURCES/rpam-ruby19-1.2.1.gem
-1c7f1ad8af670f4990373ebddb4d9fecd8f3c7d1 SOURCES/sinatra-1.4.7.gem
+3377f6140321523d7751bed3b2cc8a5201d8ec9f SOURCES/sinatra-1.4.8.gem
 83742328f21b684d6ce6c4747710c6e975b608e7 SOURCES/sinatra-contrib-1.4.7.gem
-49bee6e8614c1e991c1156150b0a2eaa28868f8d SOURCES/tilt-2.0.3.gem
-9c06bb646aba6330d4d85fe08415cdd2276fe918 SOURCES/HAM-logo.png
-062c9973625dced9a54a2f83a7baf7696ac37d60 SOURCES/favicon.ico
+f41d9747b29b38c1dc015bc71d5df691022d9716 SOURCES/tilt-2.0.6.gem
diff --git a/SOURCES/bz1158500-01-add-support-for-utilization-attributes.patch b/SOURCES/bz1158500-01-add-support-for-utilization-attributes.patch
deleted file mode 100644
index 4907fad..0000000
--- a/SOURCES/bz1158500-01-add-support-for-utilization-attributes.patch
+++ /dev/null
@@ -1,357 +0,0 @@
-From 1b6ed4d97198e7ca8c1fd5f76bfb8bfc95eeabdc Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Wed, 14 Sep 2016 09:37:06 +0200
-Subject: [PATCH] squash bz1158500 add support for utilization attri
-
-4ab84628f802 fix parsing of utilization attributes
-
-18d526f59679 support utilization on (non-cib) remote node
-
-f0b193a681e3 show error when show utilizat. on nonexistent node
-
-9907c123c225 web UI: fix setting utilization attributes
----
- .pylintrc                     |  2 +-
- pcs/node.py                   | 54 ++++++++++++++++++++++++++++++++++++-----
- pcs/resource.py               |  8 ++++---
- pcs/test/test_node.py         | 56 +++++++++++++++++++++++++++++++++++++++++++
- pcs/test/test_resource.py     | 18 ++++++++++++++
- pcs/test/test_utils.py        | 17 +++++++++----
- pcs/utils.py                  | 12 +++++++++-
- pcsd/public/js/nodes-ember.js |  4 ++--
- pcsd/remote.rb                |  2 +-
- 9 files changed, 155 insertions(+), 18 deletions(-)
-
-diff --git a/.pylintrc b/.pylintrc
-index 1dd6d5d..6101381 100644
---- a/.pylintrc
-+++ b/.pylintrc
-@@ -92,7 +92,7 @@ dummy-variables-rgx=_$|dummy
- 
- [FORMAT]
- # Maximum number of lines in a module
--max-module-lines=4584
-+max-module-lines=4616
- # Maximum number of characters on a single line.
- max-line-length=1291
- 
-diff --git a/pcs/node.py b/pcs/node.py
-index ed77d5d..729ea35 100644
---- a/pcs/node.py
-+++ b/pcs/node.py
-@@ -56,7 +56,10 @@ def node_cmd(argv):
-         elif len(argv) == 1:
-             print_node_utilization(argv.pop(0), filter_name=filter_name)
-         else:
--            set_node_utilization(argv.pop(0), argv)
-+            try:
-+                set_node_utilization(argv.pop(0), argv)
-+            except CmdLineInputError as e:
-+                utils.exit_on_cmdline_input_errror(e, "node", "utilization")
-     # pcs-to-pcsd use only
-     elif sub_cmd == "pacemaker-status":
-         node_pacemaker_status()
-@@ -150,17 +153,56 @@ def set_node_utilization(node, argv):
-     cib = utils.get_cib_dom()
-     node_el = utils.dom_get_node(cib, node)
-     if node_el is None:
--        utils.err("Unable to find a node: {0}".format(node))
-+        if utils.usefile:
-+            utils.err("Unable to find a node: {0}".format(node))
- 
--    utils.dom_update_utilization(
--        node_el, utils.convert_args_to_tuples(argv), "nodes-"
--    )
-+        for attrs in utils.getNodeAttributesFromPacemaker():
-+            if attrs.name == node and attrs.type == "remote":
-+                node_attrs = attrs
-+                break
-+        else:
-+            utils.err("Unable to find a node: {0}".format(node))
-+
-+        nodes_section_list = cib.getElementsByTagName("nodes")
-+        if len(nodes_section_list) == 0:
-+            utils.err("Unable to get nodes section of cib")
-+
-+        dom = nodes_section_list[0].ownerDocument
-+        node_el = dom.createElement("node")
-+        node_el.setAttribute("id", node_attrs.id)
-+        node_el.setAttribute("type", node_attrs.type)
-+        node_el.setAttribute("uname", node_attrs.name)
-+        nodes_section_list[0].appendChild(node_el)
-+
-+    utils.dom_update_utilization(node_el, prepare_options(argv), "nodes-")
-     utils.replace_cib_configuration(cib)
- 
- def print_node_utilization(filter_node=None, filter_name=None):
-     cib = utils.get_cib_dom()
-+
-+    node_element_list = cib.getElementsByTagName("node")
-+
-+
-+    if(
-+        filter_node
-+        and
-+        filter_node not in [
-+            node_element.getAttribute("uname")
-+            for node_element in node_element_list
-+        ]
-+        and (
-+            utils.usefile
-+            or
-+            filter_node not in [
-+                node_attrs.name for node_attrs
-+                in utils.getNodeAttributesFromPacemaker()
-+            ]
-+        )
-+    ):
-+        utils.err("Unable to find a node: {0}".format(filter_node))
-+
-     utilization = {}
--    for node_el in cib.getElementsByTagName("node"):
-+    for node_el in node_element_list:
-         node = node_el.getAttribute("uname")
-         if filter_node is not None and node != filter_node:
-             continue
-diff --git a/pcs/resource.py b/pcs/resource.py
-index 74adac6..046a826 100644
---- a/pcs/resource.py
-+++ b/pcs/resource.py
-@@ -191,7 +191,10 @@ def resource_cmd(argv):
-         elif len(argv) == 1:
-             print_resource_utilization(argv.pop(0))
-         else:
--            set_resource_utilization(argv.pop(0), argv)
-+            try:
-+                set_resource_utilization(argv.pop(0), argv)
-+            except CmdLineInputError as e:
-+                utils.exit_on_cmdline_input_errror(e, "resource", "utilization")
-     elif (sub_cmd == "get_resource_agent_info"):
-         get_resource_agent_info(argv)
-     else:
-@@ -2795,8 +2798,7 @@ def set_resource_utilization(resource_id, argv):
-     resource_el = utils.dom_get_resource(cib, resource_id)
-     if resource_el is None:
-         utils.err("Unable to find a resource: {0}".format(resource_id))
--
--    utils.dom_update_utilization(resource_el, utils.convert_args_to_tuples(argv))
-+    utils.dom_update_utilization(resource_el, prepare_options(argv))
-     utils.replace_cib_configuration(cib)
- 
- def print_resource_utilization(resource_id):
-diff --git a/pcs/test/test_node.py b/pcs/test/test_node.py
-index 9b45e07..137c7c7 100644
---- a/pcs/test/test_node.py
-+++ b/pcs/test/test_node.py
-@@ -7,7 +7,9 @@ from __future__ import (
- 
- import shutil
- from pcs.test.tools import pcs_unittest as unittest
-+from pcs.test.tools.pcs_unittest import mock
- 
-+from pcs import node
- from pcs.test.tools.assertions import AssertPcsMixin
- from pcs.test.tools.misc import (
-     ac,
-@@ -268,6 +270,20 @@ Node Utilization:
-         self.assertEqual(0, returnVal)
- 
-     def test_node_utilization_set_invalid(self):
-+        output, returnVal = pcs(temp_cib, "node utilization rh7-1 test")
-+        expected_out = """\
-+Error: missing value of 'test' option
-+"""
-+        ac(expected_out, output)
-+        self.assertEqual(1, returnVal)
-+
-+        output, returnVal = pcs(temp_cib, "node utilization rh7-1 =10")
-+        expected_out = """\
-+Error: missing key in '=10' option
-+"""
-+        ac(expected_out, output)
-+        self.assertEqual(1, returnVal)
-+
-         output, returnVal = pcs(temp_cib, "node utilization rh7-0 test=10")
-         expected_out = """\
- Error: Unable to find a node: rh7-0
-@@ -524,3 +540,43 @@ Node Attributes:
-             "node attribute rh7-1 missing= --force",
-             ""
-         )
-+
-+class SetNodeUtilizationTest(unittest.TestCase, AssertPcsMixin):
-+    def setUp(self):
-+        shutil.copy(empty_cib, temp_cib)
-+        self.pcs_runner = PcsRunner(temp_cib)
-+
-+    def test_refuse_non_option_attribute_parameter_among_options(self):
-+        self.assert_pcs_fail("node utilization rh7-1 net", [
-+            "Error: missing value of 'net' option",
-+        ])
-+
-+    def test_refuse_option_without_key(self):
-+        self.assert_pcs_fail("node utilization rh7-1 =1", [
-+            "Error: missing key in '=1' option",
-+        ])
-+
-+class PrintNodeUtilizationTest(unittest.TestCase, AssertPcsMixin):
-+    def setUp(self):
-+        shutil.copy(empty_cib, temp_cib)
-+        self.pcs_runner = PcsRunner(temp_cib)
-+
-+    @mock.patch("pcs.node.utils")
-+    def test_refuse_when_node_not_in_cib_and_is_not_remote(self, mock_utils):
-+        mock_cib = mock.MagicMock()
-+        mock_cib.getElementsByTagName = mock.Mock(return_value=[])
-+
-+        mock_utils.get_cib_dom = mock.Mock(return_value=mock_cib)
-+        mock_utils.usefile = False
-+        mock_utils.getNodeAttributesFromPacemaker = mock.Mock(return_value=[])
-+        mock_utils.err = mock.Mock(side_effect=SystemExit)
-+
-+        self.assertRaises(
-+            SystemExit,
-+            lambda: node.print_node_utilization("some")
-+        )
-+
-+    def test_refuse_when_node_not_in_mocked_cib(self):
-+        self.assert_pcs_fail("node utilization some_nonexistent_node", [
-+            "Error: Unable to find a node: some_nonexistent_node",
-+        ])
-diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
-index 87a7fa8..d32cfb4 100644
---- a/pcs/test/test_resource.py
-+++ b/pcs/test/test_resource.py
-@@ -4430,6 +4430,24 @@ Resource Utilization:
-         self.assertEqual(0, returnVal)
- 
-     def test_resource_utilization_set_invalid(self):
-+        output, returnVal = pcs(
-+            temp_large_cib, "resource utilization dummy test"
-+        )
-+        expected_out = """\
-+Error: missing value of 'test' option
-+"""
-+        ac(expected_out, output)
-+        self.assertEqual(1, returnVal)
-+
-+        output, returnVal = pcs(
-+            temp_large_cib, "resource utilization dummy =10"
-+        )
-+        expected_out = """\
-+Error: missing key in '=10' option
-+"""
-+        ac(expected_out, output)
-+        self.assertEqual(1, returnVal)
-+
-         output, returnVal = pcs(temp_large_cib, "resource utilization dummy0")
-         expected_out = """\
- Error: Unable to find a resource: dummy0
-diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py
-index 252de30..c4c6d87 100644
---- a/pcs/test/test_utils.py
-+++ b/pcs/test/test_utils.py
-@@ -1400,12 +1400,12 @@ class UtilsTest(unittest.TestCase):
-         """).documentElement
-         self.assertRaises(
-             SystemExit,
--            utils.dom_update_utilization, el, [("name", "invalid_val")]
-+            utils.dom_update_utilization, el, {"name": "invalid_val"}
-         )
- 
-         self.assertRaises(
-             SystemExit,
--            utils.dom_update_utilization, el, [("name", "0.01")]
-+            utils.dom_update_utilization, el, {"name": "0.01"}
-         )
- 
-         sys.stderr = tmp_stderr
-@@ -1415,7 +1415,12 @@ class UtilsTest(unittest.TestCase):
-         <resource id="test_id"/>
-         """).documentElement
-         utils.dom_update_utilization(
--            el, [("name", ""), ("key", "-1"), ("keys", "90")]
-+            el,
-+            {
-+                "name": "",
-+                "key": "-1",
-+                "keys": "90",
-+            }
-         )
- 
-         self.assertEqual(len(dom_get_child_elements(el)), 1)
-@@ -1459,7 +1464,11 @@ class UtilsTest(unittest.TestCase):
-         </resource>
-         """).documentElement
-         utils.dom_update_utilization(
--            el, [("key", "100"), ("keys", "")]
-+            el,
-+            {
-+                "key": "100",
-+                "keys": "",
-+            }
-         )
- 
-         u = dom_get_child_elements(el)[0]
-diff --git a/pcs/utils.py b/pcs/utils.py
-index a7ff7ca..d5b6dcf 100644
---- a/pcs/utils.py
-+++ b/pcs/utils.py
-@@ -472,6 +472,16 @@ def getNodesFromPacemaker():
-     except LibraryError as e:
-         process_library_reports(e.args)
- 
-+def getNodeAttributesFromPacemaker():
-+    try:
-+        return [
-+            node.attrs
-+            for node in ClusterState(getClusterStateXml()).node_section.nodes
-+        ]
-+    except LibraryError as e:
-+        process_library_reports(e.args)
-+
-+
- def hasCorosyncConf(conf=None):
-     if not conf:
-         if is_rhel6():
-@@ -2487,7 +2497,7 @@ def dom_update_utilization(dom_element, attributes, id_prefix=""):
-         id_prefix + dom_element.getAttribute("id") + "-utilization"
-     )
- 
--    for name, value in attributes:
-+    for name, value in sorted(attributes.items()):
-         if value != "" and not is_int(value):
-             err(
-                 "Value of utilization attribute must be integer: "
-diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
-index c650fe6..19caf14 100644
---- a/pcsd/public/js/nodes-ember.js
-+++ b/pcsd/public/js/nodes-ember.js
-@@ -500,9 +500,9 @@ Pcs.UtilizationTableComponent = Ember.Component.extend({
-     },
-     add: function(form_id) {
-       var id = "#" + form_id;
--      var name = $(id + " input[name='new_utilization_name']").val();
-+      var name = $(id + " input[name='new_utilization_name']").val().trim();
-       if (name == "") {
--        return;
-+        alert("Name of utilization attribute should be non-empty string.");
-       }
-       var value = $(id + " input[name='new_utilization_value']").val().trim();
-       if (!is_integer(value)) {
-diff --git a/pcsd/remote.rb b/pcsd/remote.rb
-index e467d0a..7dc7951 100644
---- a/pcsd/remote.rb
-+++ b/pcsd/remote.rb
-@@ -2240,7 +2240,7 @@ def set_node_utilization(params, reqest, auth_user)
- 
-   if retval != 0
-     return [400, "Unable to set utilization '#{name}=#{value}' for node " +
--      "'#{res_id}': #{stderr.join('')}"
-+      "'#{node}': #{stderr.join('')}"
-     ]
-   end
-   return 200
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1158805-01-add-support-for-qdevice-qnetd-provided-.patch b/SOURCES/bz1158805-01-add-support-for-qdevice-qnetd-provided-.patch
deleted file mode 100644
index 4f6eaaf..0000000
--- a/SOURCES/bz1158805-01-add-support-for-qdevice-qnetd-provided-.patch
+++ /dev/null
@@ -1,10043 +0,0 @@
-From db8643c4489274faee0bba008846a63c2ab63f46 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Wed, 15 Jun 2016 14:52:39 +0200
-Subject: [PATCH] bz1158805-01-add support for qdevice-qnetd provided by
- corosync
-
----
- pcs/cli/common/lib_wrapper.py                     |   10 +
- pcs/cluster.py                                    |  119 +-
- pcs/common/report_codes.py                        |   31 +-
- pcs/lib/commands/qdevice.py                       |   88 +-
- pcs/lib/commands/quorum.py                        |  217 +-
- pcs/lib/corosync/config_facade.py                 |   98 +-
- pcs/lib/corosync/live.py                          |   15 +
- pcs/lib/corosync/qdevice_client.py                |   93 +
- pcs/lib/corosync/qdevice_net.py                   |  314 ++-
- pcs/lib/env.py                                    |   11 +-
- pcs/lib/errors.py                                 |    6 +-
- pcs/lib/external.py                               |   44 +-
- pcs/lib/nodes_task.py                             |   69 +-
- pcs/lib/reports.py                                |  225 +-
- pcs/pcs.8                                         |   27 +-
- pcs/qdevice.py                                    |   71 +
- pcs/quorum.py                                     |   34 +-
- pcs/settings_default.py                           |    6 +-
- pcs/test/resources/qdevice-certs/qnetd-cacert.crt |    1 +
- pcs/test/test_lib_commands_qdevice.py             |  255 ++
- pcs/test/test_lib_commands_quorum.py              | 1109 ++++++++-
- pcs/test/test_lib_corosync_config_facade.py       |  367 ++-
- pcs/test/test_lib_corosync_live.py                |   62 +-
- pcs/test/test_lib_corosync_qdevice_client.py      |   60 +
- pcs/test/test_lib_corosync_qdevice_net.py         |  965 +++++++-
- pcs/test/test_lib_env.py                          |  142 +-
- pcs/test/test_lib_external.py                     |  126 +-
- pcs/test/test_lib_nodes_task.py                   |  168 +-
- pcs/test/test_quorum.py                           |    9 +-
- pcs/test/test_utils.py                            | 2628 +++++++++++----------
- pcs/usage.py                                      |   53 +-
- pcs/utils.py                                      |  147 +-
- pcsd/pcs.rb                                       |   17 +
- pcsd/remote.rb                                    |  163 +-
- pcsd/settings.rb                                  |    6 +
- pcsd/settings.rb.debian                           |   10 +-
- 36 files changed, 6170 insertions(+), 1596 deletions(-)
- create mode 100644 pcs/lib/corosync/qdevice_client.py
- create mode 100644 pcs/test/resources/qdevice-certs/qnetd-cacert.crt
- create mode 100644 pcs/test/test_lib_corosync_qdevice_client.py
-
-diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
-index 2ba5602..2dd5810 100644
---- a/pcs/cli/common/lib_wrapper.py
-+++ b/pcs/cli/common/lib_wrapper.py
-@@ -117,6 +117,8 @@ def load_module(env, middleware_factory, name):
-                 "get_config": quorum.get_config,
-                 "remove_device": quorum.remove_device,
-                 "set_options": quorum.set_options,
-+                "status": quorum.status_text,
-+                "status_device": quorum.status_device_text,
-                 "update_device": quorum.update_device,
-             }
-         )
-@@ -125,6 +127,7 @@ def load_module(env, middleware_factory, name):
-             env,
-             middleware.build(),
-             {
-+                "status": qdevice.qdevice_status_text,
-                 "setup": qdevice.qdevice_setup,
-                 "destroy": qdevice.qdevice_destroy,
-                 "start": qdevice.qdevice_start,
-@@ -132,6 +135,13 @@ def load_module(env, middleware_factory, name):
-                 "kill": qdevice.qdevice_kill,
-                 "enable": qdevice.qdevice_enable,
-                 "disable": qdevice.qdevice_disable,
-+                # following commands are internal use only, called from pcsd
-+                "client_net_setup": qdevice.client_net_setup,
-+                "client_net_import_certificate":
-+                    qdevice.client_net_import_certificate,
-+                "client_net_destroy": qdevice.client_net_destroy,
-+                "sign_net_cert_request":
-+                    qdevice.qdevice_net_sign_certificate_request,
-             }
-         )
-     if name == "sbd":
-diff --git a/pcs/cluster.py b/pcs/cluster.py
-index 002b5c5..988ab75 100644
---- a/pcs/cluster.py
-+++ b/pcs/cluster.py
-@@ -36,23 +36,29 @@ from pcs import (
- )
- from pcs.utils import parallel_for_nodes
- from pcs.common import report_codes
-+from pcs.cli.common.reports import process_library_reports
- from pcs.lib import (
-     pacemaker as lib_pacemaker,
-     sbd as lib_sbd,
-     reports as lib_reports,
- )
--from pcs.lib.tools import environment_file_to_dict
-+from pcs.lib.commands.quorum import _add_device_model_net
-+from pcs.lib.corosync import (
-+    config_parser as corosync_conf_utils,
-+    qdevice_net,
-+)
-+from pcs.lib.corosync.config_facade import ConfigFacade as corosync_conf_facade
-+from pcs.lib.errors import (
-+    LibraryError,
-+    ReportItemSeverity,
-+)
- from pcs.lib.external import (
-     disable_service,
-     NodeCommunicationException,
-     node_communicator_exception_to_report_item,
- )
- from pcs.lib.node import NodeAddresses
--from pcs.lib.errors import (
--    LibraryError,
--    ReportItemSeverity,
--)
--from pcs.lib.corosync import config_parser as corosync_conf_utils
-+from pcs.lib.tools import environment_file_to_dict
- 
- def cluster_cmd(argv):
-     if len(argv) == 0:
-@@ -288,7 +294,7 @@ def cluster_setup(argv):
-         )
-     if udpu_rrp and "rrp_mode" not in options["transport_options"]:
-         options["transport_options"]["rrp_mode"] = "passive"
--    utils.process_library_reports(messages)
-+    process_library_reports(messages)
- 
-     # prepare config file
-     if is_rhel6:
-@@ -306,7 +312,7 @@ def cluster_setup(argv):
-             options["totem_options"],
-             options["quorum_options"]
-         )
--    utils.process_library_reports(messages)
-+    process_library_reports(messages)
- 
-     # setup on the local node
-     if "--local" in utils.pcs_options:
-@@ -870,6 +876,7 @@ def start_cluster(argv):
-         return
- 
-     print("Starting Cluster...")
-+    service_list = []
-     if utils.is_rhel6():
- #   Verify that CMAN_QUORUM_TIMEOUT is set, if not, then we set it to 0
-         retval, output = getstatusoutput('source /etc/sysconfig/cman ; [ -z "$CMAN_QUORUM_TIMEOUT" ]')
-@@ -882,14 +889,15 @@ def start_cluster(argv):
-             print(output)
-             utils.err("unable to start cman")
-     else:
--        output, retval = utils.run(["service", "corosync","start"])
-+        service_list.append("corosync")
-+        if utils.need_to_handle_qdevice_service():
-+            service_list.append("corosync-qdevice")
-+    service_list.append("pacemaker")
-+    for service in service_list:
-+        output, retval = utils.run(["service", service, "start"])
-         if retval != 0:
-             print(output)
--            utils.err("unable to start corosync")
--    output, retval = utils.run(["service", "pacemaker", "start"])
--    if retval != 0:
--        print(output)
--        utils.err("unable to start pacemaker")
-+            utils.err("unable to start {0}".format(service))
-     if wait:
-         wait_for_nodes_started([], wait_timeout)
- 
-@@ -1035,14 +1043,20 @@ def enable_cluster(argv):
-         enable_cluster_nodes(argv)
-         return
- 
--    utils.enableServices()
-+    try:
-+        utils.enableServices()
-+    except LibraryError as e:
-+        process_library_reports(e.args)
- 
- def disable_cluster(argv):
-     if len(argv) > 0:
-         disable_cluster_nodes(argv)
-         return
- 
--    utils.disableServices()
-+    try:
-+        utils.disableServices()
-+    except LibraryError as e:
-+        process_library_reports(e.args)
- 
- def enable_cluster_all():
-     enable_cluster_nodes(utils.getNodesFromCorosyncConf())
-@@ -1132,13 +1146,18 @@ def stop_cluster_corosync():
-             utils.err("unable to stop cman")
-     else:
-         print("Stopping Cluster (corosync)...")
--        output, retval = utils.run(["service", "corosync","stop"])
--        if retval != 0:
--            print(output)
--            utils.err("unable to stop corosync")
-+        service_list = []
-+        if utils.need_to_handle_qdevice_service():
-+            service_list.append("corosync-qdevice")
-+        service_list.append("corosync")
-+        for service in service_list:
-+            output, retval = utils.run(["service", service, "stop"])
-+            if retval != 0:
-+                print(output)
-+                utils.err("unable to stop {0}".format(service))
- 
- def kill_cluster(argv):
--    daemons = ["crmd", "pengine", "attrd", "lrmd", "stonithd", "cib", "pacemakerd", "corosync"]
-+    daemons = ["crmd", "pengine", "attrd", "lrmd", "stonithd", "cib", "pacemakerd", "corosync-qdevice", "corosync"]
-     dummy_output, dummy_retval = utils.run(["killall", "-9"] + daemons)
- #    if dummy_retval != 0:
- #        print "Error: unable to execute killall -9"
-@@ -1321,19 +1340,16 @@ def cluster_node(argv):
-                 "cluster is not configured for RRP, "
-                 "you must not specify ring 1 address for the node"
-             )
--        utils.check_qdevice_algorithm_and_running_cluster(
--            utils.getCorosyncConf(), add=True
--        )
-         corosync_conf = None
-         (canAdd, error) =  utils.canAddNodeToCluster(node0)
-         if not canAdd:
-             utils.err("Unable to add '%s' to cluster: %s" % (node0, error))
- 
-+        lib_env = utils.get_lib_env()
-+        report_processor = lib_env.report_processor
-+        node_communicator = lib_env.node_communicator()
-+        node_addr = NodeAddresses(node0, node1)
-         try:
--            node_addr = NodeAddresses(node0, node1)
--            lib_env = utils.get_lib_env()
--            report_processor = lib_env.report_processor
--            node_communicator = lib_env.node_communicator()
-             if lib_sbd.is_sbd_enabled(utils.cmd_runner()):
-                 if "--watchdog" not in utils.pcs_options:
-                     watchdog = settings.sbd_watchdog_default
-@@ -1367,9 +1383,9 @@ def cluster_node(argv):
-                     report_processor, node_communicator, node_addr
-                 )
-         except LibraryError as e:
--            utils.process_library_reports(e.args)
-+            process_library_reports(e.args)
-         except NodeCommunicationException as e:
--            utils.process_library_reports(
-+            process_library_reports(
-                 [node_communicator_exception_to_report_item(e)]
-             )
- 
-@@ -1383,6 +1399,8 @@ def cluster_node(argv):
-             else:
-                 print("%s: Corosync updated" % my_node)
-                 corosync_conf = output
-+        # corosync.conf must be reloaded before the new node is started
-+        output, retval = utils.reloadCorosync()
-         if corosync_conf != None:
-             # send local cluster pcsd configs to the new node
-             # may be used for sending corosync config as well in future
-@@ -1406,6 +1424,25 @@ def cluster_node(argv):
-                 except:
-                     utils.err('Unable to communicate with pcsd')
- 
-+            # set qdevice-net certificates if needed
-+            if not utils.is_rhel6():
-+                try:
-+                    conf_facade = corosync_conf_facade.from_string(
-+                        corosync_conf
-+                    )
-+                    qdevice_model, qdevice_model_options, _ = conf_facade.get_quorum_device_settings()
-+                    if qdevice_model == "net":
-+                        _add_device_model_net(
-+                            lib_env,
-+                            qdevice_model_options["host"],
-+                            conf_facade.get_cluster_name(),
-+                            [node_addr],
-+                            skip_offline_nodes=False
-+                        )
-+                except LibraryError as e:
-+                    process_library_reports(e.args)
-+
-+            print("Setting up corosync...")
-             utils.setCorosyncConfig(node0, corosync_conf)
-             if "--enable" in utils.pcs_options:
-                 retval, err = utils.enableCluster(node0)
-@@ -1421,7 +1458,6 @@ def cluster_node(argv):
-             pcsd.pcsd_sync_certs([node0], exit_after_error=False)
-         else:
-             utils.err("Unable to update any nodes")
--        output, retval = utils.reloadCorosync()
-         if utils.is_cman_with_udpu_transport():
-             print("Warning: Using udpu transport on a CMAN cluster, "
-                 + "cluster restart is required to apply node addition")
-@@ -1433,9 +1469,6 @@ def cluster_node(argv):
-             utils.err(
-                 "node '%s' does not appear to exist in configuration" % node0
-             )
--        utils.check_qdevice_algorithm_and_running_cluster(
--            utils.getCorosyncConf(), add=False
--        )
-         if "--force" not in utils.pcs_options:
-             retval, data = utils.get_remote_quorumtool_output(node0)
-             if retval != 0:
-@@ -1697,10 +1730,18 @@ def cluster_destroy(argv):
-     else:
-         print("Shutting down pacemaker/corosync services...")
-         os.system("service pacemaker stop")
-+        # returns error if qdevice is not running, it is safe to ignore it
-+        # since we want it not to be running
-+        os.system("service corosync-qdevice stop")
-         os.system("service corosync stop")
-         print("Killing any remaining services...")
--        os.system("killall -q -9 corosync aisexec heartbeat pacemakerd ccm stonithd ha_logd lrmd crmd pengine attrd pingd mgmtd cib fenced dlm_controld gfs_controld")
--        utils.disableServices()
-+        os.system("killall -q -9 corosync corosync-qdevice aisexec heartbeat pacemakerd ccm stonithd ha_logd lrmd crmd pengine attrd pingd mgmtd cib fenced dlm_controld gfs_controld")
-+        try:
-+            utils.disableServices()
-+        except:
-+            # previously errors were suppressed in here, let's keep it that way
-+            # for now
-+            pass
-         try:
-             disable_service(utils.cmd_runner(), "sbd")
-         except:
-@@ -1716,6 +1757,12 @@ def cluster_destroy(argv):
-                 "pe*.bz2","cib.*"]
-         for name in state_files:
-             os.system("find /var/lib -name '"+name+"' -exec rm -f \{\} \;")
-+        try:
-+            qdevice_net.client_destroy()
-+        except:
-+            # errors from deleting other files are suppressed as well
-+            # we do not want to fail if qdevice was not set up
-+            pass
- 
- def cluster_verify(argv):
-     nofilename = True
-diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
-index bda982a..afe0554 100644
---- a/pcs/common/report_codes.py
-+++ b/pcs/common/report_codes.py
-@@ -45,6 +45,8 @@ COROSYNC_CONFIG_RELOAD_ERROR = "COROSYNC_CONFIG_RELOAD_ERROR"
- COROSYNC_NOT_RUNNING_CHECK_STARTED = "COROSYNC_NOT_RUNNING_CHECK_STARTED"
- COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR = "COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR"
- COROSYNC_NOT_RUNNING_ON_NODE = "COROSYNC_NOT_RUNNING_ON_NODE"
-+COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE = "COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE"
-+COROSYNC_QUORUM_GET_STATUS_ERROR = "COROSYNC_QUORUM_GET_STATUS_ERROR"
- COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE"
- CRM_MON_ERROR = "CRM_MON_ERROR"
- DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST"
-@@ -62,11 +64,11 @@ INVALID_SCORE = "INVALID_SCORE"
- INVALID_TIMEOUT_VALUE = "INVALID_TIMEOUT_VALUE"
- MULTIPLE_SCORE_OPTIONS = "MULTIPLE_SCORE_OPTIONS"
- NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL = "NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL"
--NODE_COMMUNICATION_ERROR = "NODE_COMMUNICATION_ERROR",
--NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED = "NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED",
--NODE_COMMUNICATION_ERROR_PERMISSION_DENIED = "NODE_COMMUNICATION_ERROR_PERMISSION_DENIED",
--NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT = "NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT",
--NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND = "NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND",
-+NODE_COMMUNICATION_ERROR = "NODE_COMMUNICATION_ERROR"
-+NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED = "NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED"
-+NODE_COMMUNICATION_ERROR_PERMISSION_DENIED = "NODE_COMMUNICATION_ERROR_PERMISSION_DENIED"
-+NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT = "NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT"
-+NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND = "NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND"
- NODE_COMMUNICATION_FINISHED = "NODE_COMMUNICATION_FINISHED"
- NODE_COMMUNICATION_NOT_CONNECTED = "NODE_COMMUNICATION_NOT_CONNECTED"
- NODE_COMMUNICATION_STARTED = "NODE_COMMUNICATION_STARTED"
-@@ -74,16 +76,25 @@ NODE_NOT_FOUND = "NODE_NOT_FOUND"
- NON_UDP_TRANSPORT_ADDR_MISMATCH = 'NON_UDP_TRANSPORT_ADDR_MISMATCH'
- OMITTING_NODE = "OMITTING_NODE"
- PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND = "PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND"
--PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE",
--PARSE_ERROR_COROSYNC_CONF = "PARSE_ERROR_COROSYNC_CONF",
--PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE",
-+PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE"
-+PARSE_ERROR_COROSYNC_CONF = "PARSE_ERROR_COROSYNC_CONF"
-+PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE"
- QDEVICE_ALREADY_DEFINED = "QDEVICE_ALREADY_DEFINED"
- QDEVICE_ALREADY_INITIALIZED = "QDEVICE_ALREADY_INITIALIZED"
-+QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE = "QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE"
-+QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED = "QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED"
-+QDEVICE_CERTIFICATE_REMOVAL_STARTED = "QDEVICE_CERTIFICATE_REMOVAL_STARTED"
-+QDEVICE_CERTIFICATE_REMOVED_FROM_NODE = "QDEVICE_CERTIFICATE_REMOVED_FROM_NODE"
-+QDEVICE_CERTIFICATE_IMPORT_ERROR = "QDEVICE_CERTIFICATE_IMPORT_ERROR"
-+QDEVICE_CERTIFICATE_SIGN_ERROR = "QDEVICE_CERTIFICATE_SIGN_ERROR"
- QDEVICE_DESTROY_ERROR = "QDEVICE_DESTROY_ERROR"
- QDEVICE_DESTROY_SUCCESS = "QDEVICE_DESTROY_SUCCESS"
-+QDEVICE_GET_STATUS_ERROR = "QDEVICE_GET_STATUS_ERROR"
- QDEVICE_INITIALIZATION_ERROR = "QDEVICE_INITIALIZATION_ERROR"
- QDEVICE_INITIALIZATION_SUCCESS = "QDEVICE_INITIALIZATION_SUCCESS"
- QDEVICE_NOT_DEFINED = "QDEVICE_NOT_DEFINED"
-+QDEVICE_NOT_INITIALIZED = "QDEVICE_NOT_INITIALIZED"
-+QDEVICE_CLIENT_RELOAD_STARTED = "QDEVICE_CLIENT_RELOAD_STARTED"
- QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED = "QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED"
- REQUIRED_OPTION_IS_MISSING = "REQUIRED_OPTION_IS_MISSING"
- RESOURCE_CLEANUP_ERROR = "RESOURCE_CLEANUP_ERROR"
-@@ -106,12 +117,16 @@ SBD_ENABLING_STARTED = "SBD_ENABLING_STARTED"
- SBD_NOT_INSTALLED = "SBD_NOT_INSTALLED"
- SBD_NOT_ENABLED = "SBD_NOT_ENABLED"
- SERVICE_DISABLE_ERROR = "SERVICE_DISABLE_ERROR"
-+SERVICE_DISABLE_STARTED = "SERVICE_DISABLE_STARTED"
- SERVICE_DISABLE_SUCCESS = "SERVICE_DISABLE_SUCCESS"
- SERVICE_ENABLE_ERROR = "SERVICE_ENABLE_ERROR"
-+SERVICE_ENABLE_STARTED = "SERVICE_ENABLE_STARTED"
-+SERVICE_ENABLE_SKIPPED = "SERVICE_ENABLE_SKIPPED"
- SERVICE_ENABLE_SUCCESS = "SERVICE_ENABLE_SUCCESS"
- SERVICE_KILL_ERROR = "SERVICE_KILL_ERROR"
- SERVICE_KILL_SUCCESS = "SERVICE_KILL_SUCCESS"
- SERVICE_START_ERROR = "SERVICE_START_ERROR"
-+SERVICE_START_SKIPPED = "SERVICE_START_SKIPPED"
- SERVICE_START_STARTED = "SERVICE_START_STARTED"
- SERVICE_START_SUCCESS = "SERVICE_START_SUCCESS"
- SERVICE_STOP_ERROR = "SERVICE_STOP_ERROR"
-diff --git a/pcs/lib/commands/qdevice.py b/pcs/lib/commands/qdevice.py
-index c300a4c..1d1d85f 100644
---- a/pcs/lib/commands/qdevice.py
-+++ b/pcs/lib/commands/qdevice.py
-@@ -5,6 +5,9 @@ from __future__ import (
-     unicode_literals,
- )
- 
-+import base64
-+import binascii
-+
- from pcs.lib import external, reports
- from pcs.lib.corosync import qdevice_net
- from pcs.lib.errors import LibraryError
-@@ -31,7 +34,7 @@ def qdevice_setup(lib_env, model, enable, start):
- def qdevice_destroy(lib_env, model):
-     """
-     Stop and disable qdevice on local host and remove its configuration
--    string model qdevice model to initialize
-+    string model qdevice model to destroy
-     """
-     _ensure_not_cman(lib_env)
-     _check_model(model)
-@@ -40,6 +43,22 @@ def qdevice_destroy(lib_env, model):
-     qdevice_net.qdevice_destroy()
-     lib_env.report_processor.process(reports.qdevice_destroy_success(model))
- 
-+def qdevice_status_text(lib_env, model, verbose=False, cluster=None):
-+    """
-+    Get runtime status of a quorum device in plain text
-+    string model qdevice model to query
-+    bool verbose get more detailed output
-+    string cluster show information only about specified cluster
-+    """
-+    _ensure_not_cman(lib_env)
-+    _check_model(model)
-+    runner = lib_env.cmd_runner()
-+    return (
-+        qdevice_net.qdevice_status_generic_text(runner, verbose)
-+        +
-+        qdevice_net.qdevice_status_cluster_text(runner, cluster, verbose)
-+    )
-+
- def qdevice_enable(lib_env, model):
-     """
-     make qdevice start automatically on boot on local host
-@@ -80,6 +99,73 @@ def qdevice_kill(lib_env, model):
-     _check_model(model)
-     _service_kill(lib_env, qdevice_net.qdevice_kill)
- 
-+def qdevice_net_sign_certificate_request(
-+    lib_env, certificate_request, cluster_name
-+):
-+    """
-+    Sign node certificate request by qnetd CA
-+    string certificate_request base64 encoded certificate request
-+    string cluster_name name of the cluster to which qdevice is being added
-+    """
-+    _ensure_not_cman(lib_env)
-+    try:
-+        certificate_request_data = base64.b64decode(certificate_request)
-+    except (TypeError, binascii.Error):
-+        raise LibraryError(reports.invalid_option_value(
-+            "qnetd certificate request",
-+            certificate_request,
-+            ["base64 encoded certificate"]
-+        ))
-+    return base64.b64encode(
-+        qdevice_net.qdevice_sign_certificate_request(
-+            lib_env.cmd_runner(),
-+            certificate_request_data,
-+            cluster_name
-+        )
-+    )
-+
-+def client_net_setup(lib_env, ca_certificate):
-+    """
-+    Intialize qdevice net client on local host
-+    ca_certificate base64 encoded qnetd CA certificate
-+    """
-+    _ensure_not_cman(lib_env)
-+    try:
-+        ca_certificate_data = base64.b64decode(ca_certificate)
-+    except (TypeError, binascii.Error):
-+        raise LibraryError(reports.invalid_option_value(
-+            "qnetd CA certificate",
-+            ca_certificate,
-+            ["base64 encoded certificate"]
-+        ))
-+    qdevice_net.client_setup(lib_env.cmd_runner(), ca_certificate_data)
-+
-+def client_net_import_certificate(lib_env, certificate):
-+    """
-+    Import qnetd client certificate to local node certificate storage
-+    certificate base64 encoded qnetd client certificate
-+    """
-+    _ensure_not_cman(lib_env)
-+    try:
-+        certificate_data = base64.b64decode(certificate)
-+    except (TypeError, binascii.Error):
-+        raise LibraryError(reports.invalid_option_value(
-+            "qnetd client certificate",
-+            certificate,
-+            ["base64 encoded certificate"]
-+        ))
-+    qdevice_net.client_import_certificate_and_key(
-+        lib_env.cmd_runner(),
-+        certificate_data
-+    )
-+
-+def client_net_destroy(lib_env):
-+    """
-+    delete qdevice client config files on local host
-+    """
-+    _ensure_not_cman(lib_env)
-+    qdevice_net.client_destroy()
-+
- def _ensure_not_cman(lib_env):
-     if lib_env.is_cman_cluster:
-         raise LibraryError(reports.cman_unsupported_command())
-diff --git a/pcs/lib/commands/quorum.py b/pcs/lib/commands/quorum.py
-index 1ee5411..aa00bbd 100644
---- a/pcs/lib/commands/quorum.py
-+++ b/pcs/lib/commands/quorum.py
-@@ -5,9 +5,18 @@ from __future__ import (
-     unicode_literals,
- )
- 
--
- from pcs.lib import reports
- from pcs.lib.errors import LibraryError
-+from pcs.lib.corosync import (
-+    live as corosync_live,
-+    qdevice_net,
-+    qdevice_client
-+)
-+from pcs.lib.external import (
-+    NodeCommunicationException,
-+    node_communicator_exception_to_report_item,
-+    parallel_nodes_communication_helper,
-+)
- 
- 
- def get_config(lib_env):
-@@ -42,6 +51,21 @@ def set_options(lib_env, options, skip_offline_nodes=False):
-     cfg.set_quorum_options(lib_env.report_processor, options)
-     lib_env.push_corosync_conf(cfg, skip_offline_nodes)
- 
-+def status_text(lib_env):
-+    """
-+    Get quorum runtime status in plain text
-+    """
-+    __ensure_not_cman(lib_env)
-+    return corosync_live.get_quorum_status_text(lib_env.cmd_runner())
-+
-+def status_device_text(lib_env, verbose=False):
-+    """
-+    Get quorum device client runtime status in plain text
-+    bool verbose get more detailed output
-+    """
-+    __ensure_not_cman(lib_env)
-+    return qdevice_client.get_status_text(lib_env.cmd_runner(), verbose)
-+
- def add_device(
-     lib_env, model, model_options, generic_options, force_model=False,
-     force_options=False, skip_offline_nodes=False
-@@ -58,6 +82,8 @@ def add_device(
-     __ensure_not_cman(lib_env)
- 
-     cfg = lib_env.get_corosync_conf()
-+    # Try adding qdevice to corosync.conf. This validates all the options and
-+    # makes sure qdevice is not defined in corosync.conf yet.
-     cfg.add_quorum_device(
-         lib_env.report_processor,
-         model,
-@@ -66,9 +92,131 @@ def add_device(
-         force_model,
-         force_options
-     )
--    # TODO validation, verification, certificates, etc.
-+
-+    # First setup certificates for qdevice, then send corosync.conf to nodes.
-+    # If anything fails, nodes will not have corosync.conf with qdevice in it,
-+    # so there is no effect on the cluster.
-+    if lib_env.is_corosync_conf_live:
-+        # do model specific configuration
-+        # if model is not known to pcs and was forced, do not configure antyhing
-+        # else but corosync.conf, as we do not know what to do anyways
-+        if model == "net":
-+            _add_device_model_net(
-+                lib_env,
-+                # we are sure it's there, it was validated in add_quorum_device
-+                model_options["host"],
-+                cfg.get_cluster_name(),
-+                cfg.get_nodes(),
-+                skip_offline_nodes
-+            )
-+
-+        lib_env.report_processor.process(
-+            reports.service_enable_started("corosync-qdevice")
-+        )
-+        communicator = lib_env.node_communicator()
-+        parallel_nodes_communication_helper(
-+            qdevice_client.remote_client_enable,
-+            [
-+                [(lib_env.report_processor, communicator, node), {}]
-+                for node in cfg.get_nodes()
-+            ],
-+            lib_env.report_processor,
-+            skip_offline_nodes
-+        )
-+
-+    # everything set up, it's safe to tell the nodes to use qdevice
-     lib_env.push_corosync_conf(cfg, skip_offline_nodes)
- 
-+    # Now, when corosync.conf has been reloaded, we can start qdevice service.
-+    if lib_env.is_corosync_conf_live:
-+        lib_env.report_processor.process(
-+            reports.service_start_started("corosync-qdevice")
-+        )
-+        communicator = lib_env.node_communicator()
-+        parallel_nodes_communication_helper(
-+            qdevice_client.remote_client_start,
-+            [
-+                [(lib_env.report_processor, communicator, node), {}]
-+                for node in cfg.get_nodes()
-+            ],
-+            lib_env.report_processor,
-+            skip_offline_nodes
-+        )
-+
-+def _add_device_model_net(
-+    lib_env, qnetd_host, cluster_name, cluster_nodes, skip_offline_nodes
-+):
-+    """
-+    setup cluster nodes for using qdevice model net
-+    string qnetd_host address of qdevice provider (qnetd host)
-+    string cluster_name name of the cluster to which qdevice is being added
-+    NodeAddressesList cluster_nodes list of cluster nodes addresses
-+    bool skip_offline_nodes continue even if not all nodes are accessible
-+    """
-+    communicator = lib_env.node_communicator()
-+    runner = lib_env.cmd_runner()
-+    reporter = lib_env.report_processor
-+
-+    reporter.process(
-+        reports.qdevice_certificate_distribution_started()
-+    )
-+    # get qnetd CA certificate
-+    try:
-+        qnetd_ca_cert = qdevice_net.remote_qdevice_get_ca_certificate(
-+            communicator,
-+            qnetd_host
-+        )
-+    except NodeCommunicationException as e:
-+        raise LibraryError(
-+            node_communicator_exception_to_report_item(e)
-+        )
-+    # init certificate storage on all nodes
-+    parallel_nodes_communication_helper(
-+        qdevice_net.remote_client_setup,
-+        [
-+            ((communicator, node, qnetd_ca_cert), {})
-+            for node in cluster_nodes
-+        ],
-+        reporter,
-+        skip_offline_nodes
-+    )
-+    # create client certificate request
-+    cert_request = qdevice_net.client_generate_certificate_request(
-+        runner,
-+        cluster_name
-+    )
-+    # sign the request on qnetd host
-+    try:
-+        signed_certificate = qdevice_net.remote_sign_certificate_request(
-+            communicator,
-+            qnetd_host,
-+            cert_request,
-+            cluster_name
-+        )
-+    except NodeCommunicationException as e:
-+        raise LibraryError(
-+            node_communicator_exception_to_report_item(e)
-+        )
-+    # transform the signed certificate to pk12 format which can sent to nodes
-+    pk12 = qdevice_net.client_cert_request_to_pk12(runner, signed_certificate)
-+    # distribute final certificate to nodes
-+    def do_and_report(reporter, communicator, node, pk12):
-+        qdevice_net.remote_client_import_certificate_and_key(
-+            communicator, node, pk12
-+        )
-+        reporter.process(
-+            reports.qdevice_certificate_accepted_by_node(node.label)
-+        )
-+    parallel_nodes_communication_helper(
-+        do_and_report,
-+        [
-+            ((reporter, communicator, node, pk12), {})
-+            for node in cluster_nodes
-+        ],
-+        reporter,
-+        skip_offline_nodes
-+    )
-+
- def update_device(
-     lib_env, model_options, generic_options, force_options=False,
-     skip_offline_nodes=False
-@@ -98,9 +246,74 @@ def remove_device(lib_env, skip_offline_nodes=False):
-     __ensure_not_cman(lib_env)
- 
-     cfg = lib_env.get_corosync_conf()
-+    model, dummy_options, dummy_options = cfg.get_quorum_device_settings()
-     cfg.remove_quorum_device()
-     lib_env.push_corosync_conf(cfg, skip_offline_nodes)
- 
-+    if lib_env.is_corosync_conf_live:
-+        # disable qdevice
-+        lib_env.report_processor.process(
-+            reports.service_disable_started("corosync-qdevice")
-+        )
-+        communicator = lib_env.node_communicator()
-+        parallel_nodes_communication_helper(
-+            qdevice_client.remote_client_disable,
-+            [
-+                [(lib_env.report_processor, communicator, node), {}]
-+                for node in cfg.get_nodes()
-+            ],
-+            lib_env.report_processor,
-+            skip_offline_nodes
-+        )
-+        # stop qdevice
-+        lib_env.report_processor.process(
-+            reports.service_stop_started("corosync-qdevice")
-+        )
-+        communicator = lib_env.node_communicator()
-+        parallel_nodes_communication_helper(
-+            qdevice_client.remote_client_stop,
-+            [
-+                [(lib_env.report_processor, communicator, node), {}]
-+                for node in cfg.get_nodes()
-+            ],
-+            lib_env.report_processor,
-+            skip_offline_nodes
-+        )
-+        # handle model specific configuration
-+        if model == "net":
-+            _remove_device_model_net(
-+                lib_env,
-+                cfg.get_nodes(),
-+                skip_offline_nodes
-+            )
-+
-+def _remove_device_model_net(lib_env, cluster_nodes, skip_offline_nodes):
-+    """
-+    remove configuration used by qdevice model net
-+    NodeAddressesList cluster_nodes list of cluster nodes addresses
-+    bool skip_offline_nodes continue even if not all nodes are accessible
-+    """
-+    reporter = lib_env.report_processor
-+    communicator = lib_env.node_communicator()
-+
-+    reporter.process(
-+        reports.qdevice_certificate_removal_started()
-+    )
-+    def do_and_report(reporter, communicator, node):
-+        qdevice_net.remote_client_destroy(communicator, node)
-+        reporter.process(
-+            reports.qdevice_certificate_removed_from_node(node.label)
-+        )
-+    parallel_nodes_communication_helper(
-+        do_and_report,
-+        [
-+            [(reporter, communicator, node), {}]
-+            for node in cluster_nodes
-+        ],
-+        lib_env.report_processor,
-+        skip_offline_nodes
-+    )
-+
- def __ensure_not_cman(lib_env):
-     if lib_env.is_corosync_conf_live and lib_env.is_cman_cluster:
-         raise LibraryError(reports.cman_unsupported_command())
-diff --git a/pcs/lib/corosync/config_facade.py b/pcs/lib/corosync/config_facade.py
-index 5a486ca..600a89b 100644
---- a/pcs/lib/corosync/config_facade.py
-+++ b/pcs/lib/corosync/config_facade.py
-@@ -22,6 +22,12 @@ class ConfigFacade(object):
-         "last_man_standing_window",
-         "wait_for_all",
-     )
-+    QUORUM_OPTIONS_INCOMPATIBLE_WITH_QDEVICE = (
-+        "auto_tie_breaker",
-+        "last_man_standing",
-+        "last_man_standing_window",
-+    )
-+
- 
-     @classmethod
-     def from_string(cls, config_string):
-@@ -52,6 +58,8 @@ class ConfigFacade(object):
-         self._config = parsed_config
-         # set to True if changes cannot be applied on running cluster
-         self._need_stopped_cluster = False
-+        # set to True if qdevice reload is required to apply changes
-+        self._need_qdevice_reload = False
- 
-     @property
-     def config(self):
-@@ -61,6 +69,17 @@ class ConfigFacade(object):
-     def need_stopped_cluster(self):
-         return self._need_stopped_cluster
- 
-+    @property
-+    def need_qdevice_reload(self):
-+        return self._need_qdevice_reload
-+
-+    def get_cluster_name(self):
-+        cluster_name = ""
-+        for totem in self.config.get_sections("totem"):
-+            for attrs in totem.get_attributes("cluster_name"):
-+                cluster_name = attrs[1]
-+        return cluster_name
-+
-     def get_nodes(self):
-         """
-         Get all defined nodes
-@@ -112,8 +131,9 @@ class ConfigFacade(object):
- 
-     def __validate_quorum_options(self, options):
-         report_items = []
-+        has_qdevice = self.has_quorum_device()
-+        qdevice_incompatible_options = []
-         for name, value in sorted(options.items()):
--
-             allowed_names = self.__class__.QUORUM_OPTIONS
-             if name not in allowed_names:
-                 report_items.append(
-@@ -124,6 +144,13 @@ class ConfigFacade(object):
-             if value == "":
-                 continue
- 
-+            if (
-+                has_qdevice
-+                and
-+                name in self.__class__.QUORUM_OPTIONS_INCOMPATIBLE_WITH_QDEVICE
-+            ):
-+                qdevice_incompatible_options.append(name)
-+
-             if name == "last_man_standing_window":
-                 if not value.isdigit():
-                     report_items.append(reports.invalid_option_value(
-@@ -137,6 +164,13 @@ class ConfigFacade(object):
-                         name, value, allowed_values
-                     ))
- 
-+        if qdevice_incompatible_options:
-+            report_items.append(
-+                reports.corosync_options_incompatible_with_qdevice(
-+                    qdevice_incompatible_options
-+                )
-+            )
-+
-         return report_items
- 
-     def has_quorum_device(self):
-@@ -201,13 +235,13 @@ class ConfigFacade(object):
-                 force=force_options
-             )
-         )
-+
-         # configuration cleanup
--        remove_need_stopped_cluster = {
--            "auto_tie_breaker": "",
--            "last_man_standing": "",
--            "last_man_standing_window": "",
--        }
--        need_stopped_cluster = False
-+        remove_need_stopped_cluster = dict([
-+            (name, "")
-+            for name in self.__class__.QUORUM_OPTIONS_INCOMPATIBLE_WITH_QDEVICE
-+        ])
-+        # remove old device settings
-         quorum_section_list = self.__ensure_section(self.config, "quorum")
-         for quorum in quorum_section_list:
-             for device in quorum.get_sections("device"):
-@@ -218,13 +252,19 @@ class ConfigFacade(object):
-                     and
-                     value not in ["", "0"]
-                 ):
--                    need_stopped_cluster = True
-+                    self._need_stopped_cluster = True
-+        # remove conflicting quorum options
-         attrs_to_remove = {
-             "allow_downscale": "",
-             "two_node": "",
-         }
-         attrs_to_remove.update(remove_need_stopped_cluster)
-         self.__set_section_options(quorum_section_list, attrs_to_remove)
-+        # remove nodes' votes
-+        for nodelist in self.config.get_sections("nodelist"):
-+            for node in nodelist.get_sections("node"):
-+                node.del_attributes_by_name("quorum_votes")
-+
-         # add new configuration
-         quorum = quorum_section_list[-1]
-         new_device = config_parser.Section("device")
-@@ -234,12 +274,9 @@ class ConfigFacade(object):
-         new_model = config_parser.Section(model)
-         self.__set_section_options([new_model], model_options)
-         new_device.add_section(new_model)
-+        self.__update_qdevice_votes()
-         self.__update_two_node()
-         self.__remove_empty_sections(self.config)
--        # update_two_node sets self._need_stopped_cluster when changing an
--        # algorithm lms <-> 2nodelms. We don't care about that, it's not really
--        # a change, as there was no qdevice before. So we override it.
--        self._need_stopped_cluster = need_stopped_cluster
- 
-     def update_quorum_device(
-         self, report_processor, model_options, generic_options,
-@@ -281,9 +318,10 @@ class ConfigFacade(object):
-                 model_sections.extend(device.get_sections(model))
-         self.__set_section_options(device_sections, generic_options)
-         self.__set_section_options(model_sections, model_options)
-+        self.__update_qdevice_votes()
-         self.__update_two_node()
-         self.__remove_empty_sections(self.config)
--        self._need_stopped_cluster = True
-+        self._need_qdevice_reload = True
- 
-     def remove_quorum_device(self):
-         """
-@@ -369,7 +407,7 @@ class ConfigFacade(object):
-                     continue
- 
-             if name == "algorithm":
--                allowed_values = ("2nodelms", "ffsplit", "lms")
-+                allowed_values = ("ffsplit", "lms")
-                 if value not in allowed_values:
-                     report_items.append(reports.invalid_option_value(
-                         name, value, allowed_values, severity, forceable
-@@ -461,19 +499,29 @@ class ConfigFacade(object):
-         else:
-             for quorum in self.config.get_sections("quorum"):
-                 quorum.del_attributes_by_name("two_node")
--        # update qdevice algorithm "lms" vs "2nodelms"
-+
-+    def __update_qdevice_votes(self):
-+        # ffsplit won't start if votes is missing or not set to 1
-+        # for other algorithms it's required not to put votes at all
-+        model = None
-+        algorithm = None
-+        device_sections = []
-         for quorum in self.config.get_sections("quorum"):
-             for device in quorum.get_sections("device"):
--                for net in device.get_sections("net"):
--                    algorithm = None
--                    for dummy_name, value in net.get_attributes("algorithm"):
--                        algorithm = value
--                    if algorithm == "lms" and has_two_nodes:
--                        net.set_attribute("algorithm", "2nodelms")
--                        self._need_stopped_cluster = True
--                    elif algorithm == "2nodelms" and not has_two_nodes:
--                        net.set_attribute("algorithm", "lms")
--                        self._need_stopped_cluster = True
-+                device_sections.append(device)
-+                for dummy_name, value in device.get_attributes("model"):
-+                    model = value
-+        for device in device_sections:
-+            for model_section in device.get_sections(model):
-+                for dummy_name, value in model_section.get_attributes(
-+                    "algorithm"
-+                ):
-+                    algorithm = value
-+        if model == "net":
-+            if algorithm == "ffsplit":
-+                self.__set_section_options(device_sections, {"votes": "1"})
-+            else:
-+                self.__set_section_options(device_sections, {"votes": ""})
- 
-     def __set_section_options(self, section_list, options):
-         for section in section_list[:-1]:
-diff --git a/pcs/lib/corosync/live.py b/pcs/lib/corosync/live.py
-index 2446a46..4129aeb 100644
---- a/pcs/lib/corosync/live.py
-+++ b/pcs/lib/corosync/live.py
-@@ -47,3 +47,18 @@ def reload_config(runner):
-             reports.corosync_config_reload_error(output.rstrip())
-         )
- 
-+def get_quorum_status_text(runner):
-+    """
-+    Get runtime quorum status from the local node
-+    """
-+    output, retval = runner.run([
-+        os.path.join(settings.corosync_binaries, "corosync-quorumtool"),
-+        "-p"
-+    ])
-+    # retval is 0 on success if node is not in partition with quorum
-+    # retval is 1 on error OR on success if node has quorum
-+    if retval not in [0, 1]:
-+        raise LibraryError(
-+            reports.corosync_quorum_get_status_error(output)
-+        )
-+    return output
-diff --git a/pcs/lib/corosync/qdevice_client.py b/pcs/lib/corosync/qdevice_client.py
-new file mode 100644
-index 0000000..98fbb0e
---- /dev/null
-+++ b/pcs/lib/corosync/qdevice_client.py
-@@ -0,0 +1,93 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+import os.path
-+
-+from pcs import settings
-+from pcs.lib import reports
-+from pcs.lib.errors import LibraryError
-+
-+
-+def get_status_text(runner, verbose=False):
-+    """
-+    Get quorum device client runtime status in plain text
-+    bool verbose get more detailed output
-+    """
-+    cmd = [
-+        os.path.join(settings.corosync_binaries, "corosync-qdevice-tool"),
-+        "-s"
-+    ]
-+    if verbose:
-+        cmd.append("-v")
-+    output, retval = runner.run(cmd)
-+    if retval != 0:
-+        raise LibraryError(
-+            reports.corosync_quorum_get_status_error(output)
-+        )
-+    return output
-+
-+def remote_client_enable(reporter, node_communicator, node):
-+    """
-+    enable qdevice client service (corosync-qdevice) on a remote node
-+    """
-+    response = node_communicator.call_node(
-+        node,
-+        "remote/qdevice_client_enable",
-+        None
-+    )
-+    if response == "corosync is not enabled, skipping":
-+        reporter.process(
-+            reports.service_enable_skipped(
-+                "corosync-qdevice",
-+                "corosync is not enabled",
-+                node.label
-+            )
-+        )
-+    else:
-+        reporter.process(
-+            reports.service_enable_success("corosync-qdevice", node.label)
-+        )
-+
-+def remote_client_disable(reporter, node_communicator, node):
-+    """
-+    disable qdevice client service (corosync-qdevice) on a remote node
-+    """
-+    node_communicator.call_node(node, "remote/qdevice_client_disable", None)
-+    reporter.process(
-+        reports.service_disable_success("corosync-qdevice", node.label)
-+    )
-+
-+def remote_client_start(reporter, node_communicator, node):
-+    """
-+    start qdevice client service (corosync-qdevice) on a remote node
-+    """
-+    response = node_communicator.call_node(
-+        node,
-+        "remote/qdevice_client_start",
-+        None
-+    )
-+    if response == "corosync is not running, skipping":
-+        reporter.process(
-+            reports.service_start_skipped(
-+                "corosync-qdevice",
-+                "corosync is not running",
-+                node.label
-+            )
-+        )
-+    else:
-+        reporter.process(
-+            reports.service_start_success("corosync-qdevice", node.label)
-+        )
-+
-+def remote_client_stop(reporter, node_communicator, node):
-+    """
-+    stop qdevice client service (corosync-qdevice) on a remote node
-+    """
-+    node_communicator.call_node(node, "remote/qdevice_client_stop", None)
-+    reporter.process(
-+        reports.service_stop_success("corosync-qdevice", node.label)
-+    )
-diff --git a/pcs/lib/corosync/qdevice_net.py b/pcs/lib/corosync/qdevice_net.py
-index 7479257..4054592 100644
---- a/pcs/lib/corosync/qdevice_net.py
-+++ b/pcs/lib/corosync/qdevice_net.py
-@@ -5,8 +5,14 @@ from __future__ import (
-     unicode_literals,
- )
- 
-+import base64
-+import binascii
-+import functools
-+import os
- import os.path
-+import re
- import shutil
-+import tempfile
- 
- from pcs import settings
- from pcs.lib import external, reports
-@@ -15,6 +21,18 @@ from pcs.lib.errors import LibraryError
- 
- __model = "net"
- __service_name = "corosync-qnetd"
-+__qnetd_certutil = os.path.join(
-+    settings.corosync_qnet_binaries,
-+    "corosync-qnetd-certutil"
-+)
-+__qnetd_tool = os.path.join(
-+    settings.corosync_qnet_binaries,
-+    "corosync-qnetd-tool"
-+)
-+__qdevice_certutil = os.path.join(
-+    settings.corosync_binaries,
-+    "corosync-qdevice-net-certutil"
-+)
- 
- def qdevice_setup(runner):
-     """
-@@ -24,25 +42,63 @@ def qdevice_setup(runner):
-         raise LibraryError(reports.qdevice_already_initialized(__model))
- 
-     output, retval = runner.run([
--        os.path.join(settings.corosync_binaries, "corosync-qnetd-certutil"),
--        "-i"
-+        __qnetd_certutil, "-i"
-     ])
-     if retval != 0:
-         raise LibraryError(
-             reports.qdevice_initialization_error(__model, output.rstrip())
-         )
- 
-+def qdevice_initialized():
-+    """
-+    check if qdevice server certificate database has been initialized
-+    """
-+    return os.path.exists(os.path.join(
-+        settings.corosync_qdevice_net_server_certs_dir,
-+        "cert8.db"
-+    ))
-+
- def qdevice_destroy():
-     """
-     delete qdevice configuration on local host
-     """
-     try:
--        shutil.rmtree(settings.corosync_qdevice_net_server_certs_dir)
-+        if qdevice_initialized():
-+            shutil.rmtree(settings.corosync_qdevice_net_server_certs_dir)
-     except EnvironmentError as e:
-         raise LibraryError(
-             reports.qdevice_destroy_error(__model, e.strerror)
-         )
- 
-+def qdevice_status_generic_text(runner, verbose=False):
-+    """
-+    get qdevice runtime status in plain text
-+    bool verbose get more detailed output
-+    """
-+    cmd = [__qnetd_tool, "-s"]
-+    if verbose:
-+        cmd.append("-v")
-+    output, retval = runner.run(cmd)
-+    if retval != 0:
-+        raise LibraryError(reports.qdevice_get_status_error(__model, output))
-+    return output
-+
-+def qdevice_status_cluster_text(runner, cluster=None, verbose=False):
-+    """
-+    get qdevice runtime status in plain text
-+    bool verbose get more detailed output
-+    string cluster show information only about specified cluster
-+    """
-+    cmd = [__qnetd_tool, "-l"]
-+    if verbose:
-+        cmd.append("-v")
-+    if cluster:
-+        cmd.extend(["-c", cluster])
-+    output, retval = runner.run(cmd)
-+    if retval != 0:
-+        raise LibraryError(reports.qdevice_get_status_error(__model, output))
-+    return output
-+
- def qdevice_enable(runner):
-     """
-     make qdevice start automatically on boot on local host
-@@ -72,3 +128,255 @@ def qdevice_kill(runner):
-     kill qdevice now on local host
-     """
-     external.kill_services(runner, [__service_name])
-+
-+def qdevice_sign_certificate_request(runner, cert_request, cluster_name):
-+    """
-+    sign client certificate request
-+    cert_request certificate request data
-+    string cluster_name name of the cluster to which qdevice is being added
-+    """
-+    if not qdevice_initialized():
-+        raise LibraryError(reports.qdevice_not_initialized(__model))
-+    # save the certificate request, corosync tool only works with files
-+    tmpfile = _store_to_tmpfile(
-+        cert_request,
-+        reports.qdevice_certificate_sign_error
-+    )
-+    # sign the request
-+    output, retval = runner.run([
-+        __qnetd_certutil, "-s", "-c", tmpfile.name, "-n", cluster_name
-+    ])
-+    tmpfile.close() # temp file is deleted on close
-+    if retval != 0:
-+        raise LibraryError(
-+            reports.qdevice_certificate_sign_error(output.strip())
-+        )
-+    # get signed certificate, corosync tool only works with files
-+    return _get_output_certificate(
-+        output,
-+        reports.qdevice_certificate_sign_error
-+    )
-+
-+def client_setup(runner, ca_certificate):
-+    """
-+    initialize qdevice client on local host
-+    ca_certificate qnetd CA certificate
-+    """
-+    client_destroy()
-+    # save CA certificate, corosync tool only works with files
-+    ca_file_path = os.path.join(
-+        settings.corosync_qdevice_net_client_certs_dir,
-+        settings.corosync_qdevice_net_client_ca_file_name
-+    )
-+    try:
-+        if not os.path.exists(ca_file_path):
-+            os.makedirs(
-+                settings.corosync_qdevice_net_client_certs_dir,
-+                mode=0o700
-+            )
-+        with open(ca_file_path, "wb") as ca_file:
-+            ca_file.write(ca_certificate)
-+    except EnvironmentError as e:
-+        raise LibraryError(
-+            reports.qdevice_initialization_error(__model, e.strerror)
-+        )
-+    # initialize client's certificate storage
-+    output, retval = runner.run([
-+        __qdevice_certutil, "-i", "-c", ca_file_path
-+    ])
-+    if retval != 0:
-+        raise LibraryError(
-+            reports.qdevice_initialization_error(__model, output.rstrip())
-+        )
-+
-+def client_initialized():
-+    """
-+    check if qdevice net client certificate database has been initialized
-+    """
-+    return os.path.exists(os.path.join(
-+        settings.corosync_qdevice_net_client_certs_dir,
-+        "cert8.db"
-+    ))
-+
-+def client_destroy():
-+    """
-+    delete qdevice client config files on local host
-+    """
-+    try:
-+        if client_initialized():
-+            shutil.rmtree(settings.corosync_qdevice_net_client_certs_dir)
-+    except EnvironmentError as e:
-+        raise LibraryError(
-+            reports.qdevice_destroy_error(__model, e.strerror)
-+        )
-+
-+def client_generate_certificate_request(runner, cluster_name):
-+    """
-+    create a certificate request which can be signed by qnetd server
-+    string cluster_name name of the cluster to which qdevice is being added
-+    """
-+    if not client_initialized():
-+        raise LibraryError(reports.qdevice_not_initialized(__model))
-+    output, retval = runner.run([
-+        __qdevice_certutil, "-r", "-n", cluster_name
-+    ])
-+    if retval != 0:
-+        raise LibraryError(
-+            reports.qdevice_initialization_error(__model, output.rstrip())
-+        )
-+    return _get_output_certificate(
-+        output,
-+        functools.partial(reports.qdevice_initialization_error, __model)
-+    )
-+
-+def client_cert_request_to_pk12(runner, cert_request):
-+    """
-+    transform signed certificate request to pk12 certificate which can be
-+    imported to nodes
-+    cert_request signed certificate request
-+    """
-+    if not client_initialized():
-+        raise LibraryError(reports.qdevice_not_initialized(__model))
-+    # save the signed certificate request, corosync tool only works with files
-+    tmpfile = _store_to_tmpfile(
-+        cert_request,
-+        reports.qdevice_certificate_import_error
-+    )
-+    # transform it
-+    output, retval = runner.run([
-+        __qdevice_certutil, "-M", "-c", tmpfile.name
-+    ])
-+    tmpfile.close() # temp file is deleted on close
-+    if retval != 0:
-+        raise LibraryError(
-+            reports.qdevice_certificate_import_error(output)
-+        )
-+    # get resulting pk12, corosync tool only works with files
-+    return _get_output_certificate(
-+        output,
-+        reports.qdevice_certificate_import_error
-+    )
-+
-+def client_import_certificate_and_key(runner, pk12_certificate):
-+    """
-+    import qdevice client certificate to the local node certificate storage
-+    """
-+    if not client_initialized():
-+        raise LibraryError(reports.qdevice_not_initialized(__model))
-+    # save the certificate, corosync tool only works with files
-+    tmpfile = _store_to_tmpfile(
-+        pk12_certificate,
-+        reports.qdevice_certificate_import_error
-+    )
-+    output, retval = runner.run([
-+        __qdevice_certutil, "-m", "-c", tmpfile.name
-+    ])
-+    tmpfile.close() # temp file is deleted on close
-+    if retval != 0:
-+        raise LibraryError(
-+            reports.qdevice_certificate_import_error(output)
-+        )
-+
-+def remote_qdevice_get_ca_certificate(node_communicator, host):
-+    """
-+    connect to a qnetd host and get qnetd CA certificate
-+    string host address of the qnetd host
-+    """
-+    try:
-+        return base64.b64decode(
-+            node_communicator.call_host(
-+                host,
-+                "remote/qdevice_net_get_ca_certificate",
-+                None
-+            )
-+        )
-+    except (TypeError, binascii.Error):
-+        raise LibraryError(reports.invalid_response_format(host))
-+
-+def remote_client_setup(node_communicator, node, qnetd_ca_certificate):
-+    """
-+    connect to a remote node and initialize qdevice there
-+    NodeAddresses node target node
-+    qnetd_ca_certificate qnetd CA certificate
-+    """
-+    return node_communicator.call_node(
-+        node,
-+        "remote/qdevice_net_client_init_certificate_storage",
-+        external.NodeCommunicator.format_data_dict([
-+            ("ca_certificate", base64.b64encode(qnetd_ca_certificate)),
-+        ])
-+    )
-+
-+def remote_sign_certificate_request(
-+    node_communicator, host, cert_request, cluster_name
-+):
-+    """
-+    connect to a qdevice host and sign node certificate there
-+    string host address of the qnetd host
-+    cert_request certificate request to be signed
-+    string cluster_name name of the cluster to which qdevice is being added
-+    """
-+    try:
-+        return base64.b64decode(
-+            node_communicator.call_host(
-+                host,
-+                "remote/qdevice_net_sign_node_certificate",
-+                external.NodeCommunicator.format_data_dict([
-+                    ("certificate_request", base64.b64encode(cert_request)),
-+                    ("cluster_name", cluster_name),
-+                ])
-+            )
-+        )
-+    except (TypeError, binascii.Error):
-+        raise LibraryError(reports.invalid_response_format(host))
-+
-+def remote_client_import_certificate_and_key(node_communicator, node, pk12):
-+    """
-+    import pk12 certificate on a remote node
-+    NodeAddresses node target node
-+    pk12 certificate
-+    """
-+    return node_communicator.call_node(
-+        node,
-+        "remote/qdevice_net_client_import_certificate",
-+        external.NodeCommunicator.format_data_dict([
-+            ("certificate", base64.b64encode(pk12)),
-+        ])
-+    )
-+
-+def remote_client_destroy(node_communicator, node):
-+    """
-+    delete qdevice client config files on a remote node
-+    NodeAddresses node target node
-+    """
-+    return node_communicator.call_node(
-+        node,
-+        "remote/qdevice_net_client_destroy",
-+        None
-+    )
-+
-+def _store_to_tmpfile(data, report_func):
-+    try:
-+        tmpfile = tempfile.NamedTemporaryFile(mode="wb", suffix=".pcs")
-+        tmpfile.write(data)
-+        tmpfile.flush()
-+        return tmpfile
-+    except EnvironmentError as e:
-+        raise LibraryError(report_func(e.strerror))
-+
-+def _get_output_certificate(cert_tool_output, report_func):
-+    regexp = re.compile(r"^Certificate( request)? stored in (?P<path>.+)$")
-+    filename = None
-+    for line in cert_tool_output.splitlines():
-+        match = regexp.search(line)
-+        if match:
-+            filename = match.group("path")
-+    if not filename:
-+        raise LibraryError(report_func(cert_tool_output))
-+    try:
-+        with open(filename, "rb") as cert_file:
-+            return cert_file.read()
-+    except EnvironmentError as e:
-+        raise LibraryError(report_func(
-+            "{path}: {error}".format(path=filename, error=e.strerror)
-+        ))
-diff --git a/pcs/lib/env.py b/pcs/lib/env.py
-index 1151891..24e4252 100644
---- a/pcs/lib/env.py
-+++ b/pcs/lib/env.py
-@@ -10,6 +10,7 @@ from lxml import etree
- from pcs.lib import reports
- from pcs.lib.external import (
-     is_cman_cluster,
-+    is_service_running,
-     CommandRunner,
-     NodeCommunicator,
- )
-@@ -21,6 +22,7 @@ from pcs.lib.corosync.live import (
- from pcs.lib.nodes_task import (
-     distribute_corosync_conf,
-     check_corosync_offline_on_nodes,
-+    qdevice_reload_on_nodes,
- )
- from pcs.lib.pacemaker import (
-     get_cib,
-@@ -152,11 +154,18 @@ class LibraryEnvironment(object):
-                 corosync_conf_data,
-                 skip_offline_nodes
-             )
--            if not corosync_conf_facade.need_stopped_cluster:
-+            if is_service_running(self.cmd_runner(), "corosync"):
-                 reload_corosync_config(self.cmd_runner())
-                 self.report_processor.process(
-                     reports.corosync_config_reloaded()
-                 )
-+            if corosync_conf_facade.need_qdevice_reload:
-+                qdevice_reload_on_nodes(
-+                    self.node_communicator(),
-+                    self.report_processor,
-+                    node_list,
-+                    skip_offline_nodes
-+                )
-         else:
-             self._corosync_conf_data = corosync_conf_data
- 
-diff --git a/pcs/lib/errors.py b/pcs/lib/errors.py
-index c0bd3d1..9cab5e9 100644
---- a/pcs/lib/errors.py
-+++ b/pcs/lib/errors.py
-@@ -42,4 +42,8 @@ class ReportItem(object):
-         self.message = self.message_pattern.format(**self.info)
- 
-     def __repr__(self):
--        return self.code+": "+str(self.info)
-+        return "{severity} {code}: {info}".format(
-+            severity=self.severity,
-+            code=self.code,
-+            info=self.info
-+        )
-diff --git a/pcs/lib/external.py b/pcs/lib/external.py
-index 34426f9..c773e5a 100644
---- a/pcs/lib/external.py
-+++ b/pcs/lib/external.py
-@@ -49,7 +49,11 @@ except ImportError:
- 
- from pcs.lib import reports
- from pcs.lib.errors import LibraryError, ReportItemSeverity
--from pcs.common.tools import simple_cache
-+from pcs.common import report_codes
-+from pcs.common.tools import (
-+    simple_cache,
-+    run_parallel as tools_run_parallel,
-+)
- from pcs import settings
- 
- 
-@@ -521,7 +525,7 @@ class NodeCommunicator(object):
-                 # text in response body with HTTP code 400
-                 # we need to be backward compatible with that
-                 raise NodeCommandUnsuccessfulException(
--                    host, request, response_data
-+                    host, request, response_data.rstrip()
-                 )
-             elif e.code == 401:
-                 raise NodeAuthenticationException(
-@@ -581,3 +585,39 @@ class NodeCommunicator(object):
-                 base64.b64encode(" ".join(self._groups).encode("utf-8"))
-             ))
-         return cookies
-+
-+
-+def parallel_nodes_communication_helper(
-+    func, func_args_kwargs, reporter, skip_offline_nodes=False
-+):
-+    """
-+    Help running node calls in parallel and handle communication exceptions.
-+    Raise LibraryError on any failure.
-+
-+    function func function to be run, should be a function calling a node
-+    iterable func_args_kwargs list of tuples: (*args, **kwargs)
-+    bool skip_offline_nodes do not raise LibraryError if a node is unreachable
-+    """
-+    failure_severity = ReportItemSeverity.ERROR
-+    failure_forceable = report_codes.SKIP_OFFLINE_NODES
-+    if skip_offline_nodes:
-+        failure_severity = ReportItemSeverity.WARNING
-+        failure_forceable = None
-+    report_items = []
-+
-+    def _parallel(*args, **kwargs):
-+        try:
-+            func(*args, **kwargs)
-+        except NodeCommunicationException as e:
-+            report_items.append(
-+                node_communicator_exception_to_report_item(
-+                    e,
-+                    failure_severity,
-+                    failure_forceable
-+                )
-+            )
-+        except LibraryError as e:
-+            report_items.extend(e.args)
-+
-+    tools_run_parallel(_parallel, func_args_kwargs)
-+    reporter.process_list(report_items)
-diff --git a/pcs/lib/nodes_task.py b/pcs/lib/nodes_task.py
-index b9a61f6..e94d327 100644
---- a/pcs/lib/nodes_task.py
-+++ b/pcs/lib/nodes_task.py
-@@ -8,14 +8,19 @@ from __future__ import (
- import json
- 
- from pcs.common import report_codes
-+from pcs.common.tools import run_parallel as tools_run_parallel
- from pcs.lib import reports
--from pcs.lib.errors import ReportItemSeverity
-+from pcs.lib.errors import LibraryError, ReportItemSeverity
- from pcs.lib.external import (
-     NodeCommunicator,
-     NodeCommunicationException,
-     node_communicator_exception_to_report_item,
-+    parallel_nodes_communication_helper,
-+)
-+from pcs.lib.corosync import (
-+    live as corosync_live,
-+    qdevice_client,
- )
--from pcs.lib.corosync import live as corosync_live
- 
- 
- def distribute_corosync_conf(
-@@ -33,11 +38,9 @@ def distribute_corosync_conf(
-     if skip_offline_nodes:
-         failure_severity = ReportItemSeverity.WARNING
-         failure_forceable = None
--
--    reporter.process(reports.corosync_config_distribution_started())
-     report_items = []
--    # TODO use parallel communication
--    for node in node_addr_list:
-+
-+    def _parallel(node):
-         try:
-             corosync_live.set_remote_corosync_conf(
-                 node_communicator,
-@@ -62,6 +65,12 @@ def distribute_corosync_conf(
-                     failure_forceable
-                 )
-             )
-+
-+    reporter.process(reports.corosync_config_distribution_started())
-+    tools_run_parallel(
-+        _parallel,
-+        [((node, ), {}) for node in node_addr_list]
-+    )
-     reporter.process_list(report_items)
- 
- def check_corosync_offline_on_nodes(
-@@ -77,13 +86,11 @@ def check_corosync_offline_on_nodes(
-     if skip_offline_nodes:
-         failure_severity = ReportItemSeverity.WARNING
-         failure_forceable = None
--
--    reporter.process(reports.corosync_not_running_check_started())
-     report_items = []
--    # TODO use parallel communication
--    for node in node_addr_list:
-+
-+    def _parallel(node):
-         try:
--            status = node_communicator.call_node(node, "remote/status", "")
-+            status = node_communicator.call_node(node, "remote/status", None)
-             if not json.loads(status)["corosync"]:
-                 reporter.process(
-                     reports.corosync_not_running_on_node_ok(node.label)
-@@ -115,8 +122,48 @@ def check_corosync_offline_on_nodes(
-                     failure_forceable
-                 )
-             )
-+
-+    reporter.process(reports.corosync_not_running_check_started())
-+    tools_run_parallel(
-+        _parallel,
-+        [((node, ), {}) for node in node_addr_list]
-+    )
-     reporter.process_list(report_items)
- 
-+def qdevice_reload_on_nodes(
-+    node_communicator, reporter, node_addr_list, skip_offline_nodes=False
-+):
-+    """
-+    Reload corosync-qdevice configuration on cluster nodes
-+    NodeAddressesList node_addr_list nodes to reload config on
-+    bool skip_offline_nodes don't raise an error on node communication errors
-+    """
-+    reporter.process(reports.qdevice_client_reload_started())
-+    parallel_params = [
-+        [(reporter, node_communicator, node), {}]
-+        for node in node_addr_list
-+    ]
-+    # catch an exception so we try to start qdevice on nodes where we stopped it
-+    report_items = []
-+    try:
-+        parallel_nodes_communication_helper(
-+            qdevice_client.remote_client_stop,
-+            parallel_params,
-+            reporter,
-+            skip_offline_nodes
-+        )
-+    except LibraryError as e:
-+        report_items.extend(e.args)
-+    try:
-+        parallel_nodes_communication_helper(
-+            qdevice_client.remote_client_start,
-+            parallel_params,
-+            reporter,
-+            skip_offline_nodes
-+        )
-+    except LibraryError as e:
-+        report_items.extend(e.args)
-+    reporter.process_list(report_items)
- 
- def node_check_auth(communicator, node):
-     """
-diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
-index 490b4ff..d8f88cd 100644
---- a/pcs/lib/reports.py
-+++ b/pcs/lib/reports.py
-@@ -552,6 +552,19 @@ def corosync_running_on_node_fail(node):
-         info={"node": node}
-     )
- 
-+def corosync_quorum_get_status_error(reason):
-+    """
-+    unable to get runtime status of quorum on local node
-+    string reason an error message
-+    """
-+    return ReportItem.error(
-+        report_codes.COROSYNC_QUORUM_GET_STATUS_ERROR,
-+        "Unable to get quorum status: {reason}",
-+        info={
-+            "reason": reason,
-+        }
-+    )
-+
- def corosync_config_reloaded():
-     """
-     corosync configuration has been reloaded
-@@ -614,6 +627,21 @@ def corosync_config_parser_other_error():
-         "Unable to parse corosync config"
-     )
- 
-+def corosync_options_incompatible_with_qdevice(options):
-+    """
-+    cannot set specified corosync options when qdevice is in use
-+    iterable options incompatible options names
-+    """
-+    return ReportItem.error(
-+        report_codes.COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE,
-+        "These options cannot be set when the cluster uses a quorum device: "
-+        + "{options_names_str}",
-+        info={
-+            "options_names": options,
-+            "options_names_str": ", ".join(sorted(options)),
-+        }
-+    )
-+
- def qdevice_already_defined():
-     """
-     qdevice is already set up in a cluster, when it was expected not to be
-@@ -641,6 +669,15 @@ def qdevice_remove_or_cluster_stop_needed():
-         "You need to stop the cluster or remove qdevice from cluster to continue"
-     )
- 
-+def qdevice_client_reload_started():
-+    """
-+    qdevice client configuration is about to be reloaded on nodes
-+    """
-+    return ReportItem.info(
-+        report_codes.QDEVICE_CLIENT_RELOAD_STARTED,
-+        "Reloading qdevice configuration on nodes..."
-+    )
-+
- def qdevice_already_initialized(model):
-     """
-     cannot create qdevice on local host, it has been already created
-@@ -654,6 +691,19 @@ def qdevice_already_initialized(model):
-         }
-     )
- 
-+def qdevice_not_initialized(model):
-+    """
-+    cannot work with qdevice on local host, it has not been created yet
-+    string model qdevice model
-+    """
-+    return ReportItem.error(
-+        report_codes.QDEVICE_NOT_INITIALIZED,
-+        "Quorum device '{model}' has not been initialized yet",
-+        info={
-+            "model": model,
-+        }
-+    )
-+
- def qdevice_initialization_success(model):
-     """
-     qdevice was successfully initialized on local host
-@@ -682,6 +732,72 @@ def qdevice_initialization_error(model, reason):
-         }
-     )
- 
-+def qdevice_certificate_distribution_started():
-+    """
-+    Qdevice certificates are about to be set up on nodes
-+    """
-+    return ReportItem.info(
-+        report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-+        "Setting up qdevice certificates on nodes..."
-+    )
-+
-+def qdevice_certificate_accepted_by_node(node):
-+    """
-+    Qdevice certificates have been saved to a node
-+    string node node on which certificates have been saved
-+    """
-+    return ReportItem.info(
-+        report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
-+        "{node}: Succeeded",
-+        info={"node": node}
-+    )
-+
-+def qdevice_certificate_removal_started():
-+    """
-+    Qdevice certificates are about to be removed from nodes
-+    """
-+    return ReportItem.info(
-+        report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED,
-+        "Removing qdevice certificates from nodes..."
-+    )
-+
-+def qdevice_certificate_removed_from_node(node):
-+    """
-+    Qdevice certificates have been removed from a node
-+    string node node on which certificates have been deleted
-+    """
-+    return ReportItem.info(
-+        report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
-+        "{node}: Succeeded",
-+        info={"node": node}
-+    )
-+
-+def qdevice_certificate_import_error(reason):
-+    """
-+    an error occured when importing qdevice certificate to a node
-+    string reason an error message
-+    """
-+    return ReportItem.error(
-+        report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR,
-+        "Unable to import quorum device certificate: {reason}",
-+        info={
-+            "reason": reason,
-+        }
-+    )
-+
-+def qdevice_certificate_sign_error(reason):
-+    """
-+    an error occured when signing qdevice certificate
-+    string reason an error message
-+    """
-+    return ReportItem.error(
-+        report_codes.QDEVICE_CERTIFICATE_SIGN_ERROR,
-+        "Unable to sign quorum device certificate: {reason}",
-+        info={
-+            "reason": reason,
-+        }
-+    )
-+
- def qdevice_destroy_success(model):
-     """
-     qdevice configuration successfully removed from local host
-@@ -710,6 +826,21 @@ def qdevice_destroy_error(model, reason):
-         }
-     )
- 
-+def qdevice_get_status_error(model, reason):
-+    """
-+    unable to get runtime status of qdevice
-+    string model qdevice model
-+    string reason an error message
-+    """
-+    return ReportItem.error(
-+        report_codes.QDEVICE_GET_STATUS_ERROR,
-+        "Unable to get status of quorum device '{model}': {reason}",
-+        info={
-+            "model": model,
-+            "reason": reason,
-+        }
-+    )
-+
- def cman_unsupported_command():
-     """
-     requested library command is not available as local cluster is CMAN based
-@@ -1022,31 +1153,55 @@ def service_start_started(service):
-         }
-     )
- 
--def service_start_error(service, reason):
-+def service_start_error(service, reason, node=None):
-     """
-     system service start failed
-     string service service name or description
-     string reason error message
-+    string node node on which service has been requested to start
-     """
-+    msg = "Unable to start {service}: {reason}"
-     return ReportItem.error(
-         report_codes.SERVICE_START_ERROR,
--        "Unable to start {service}: {reason}",
-+        msg if node is None else "{node}: " + msg,
-         info={
-             "service": service,
-             "reason": reason,
-+            "node": node,
-         }
-     )
- 
--def service_start_success(service):
-+def service_start_success(service, node=None):
-     """
-     system service was started successfully
-     string service service name or description
-+    string node node on which service has been requested to start
-     """
-+    msg = "{service} started"
-     return ReportItem.info(
-         report_codes.SERVICE_START_SUCCESS,
--        "{service} started",
-+        msg if node is None else "{node}: " + msg,
-         info={
-             "service": service,
-+            "node": node,
-+        }
-+    )
-+
-+def service_start_skipped(service, reason, node=None):
-+    """
-+    starting system service was skipped, no error occured
-+    string service service name or description
-+    string reason why the start has been skipped
-+    string node node on which service has been requested to start
-+    """
-+    msg = "not starting {service} - {reason}"
-+    return ReportItem.info(
-+        report_codes.SERVICE_START_SKIPPED,
-+        msg if node is None else "{node}: " + msg,
-+        info={
-+            "service": service,
-+            "reason": reason,
-+            "node": node,
-         }
-     )
- 
-@@ -1063,31 +1218,37 @@ def service_stop_started(service):
-         }
-     )
- 
--def service_stop_error(service, reason):
-+def service_stop_error(service, reason, node=None):
-     """
-     system service stop failed
-     string service service name or description
-     string reason error message
-+    string node node on which service has been requested to stop
-     """
-+    msg = "Unable to stop {service}: {reason}"
-     return ReportItem.error(
-         report_codes.SERVICE_STOP_ERROR,
--        "Unable to stop {service}: {reason}",
-+        msg if node is None else "{node}: " + msg,
-         info={
-             "service": service,
-             "reason": reason,
-+            "node": node,
-         }
-     )
- 
--def service_stop_success(service):
-+def service_stop_success(service, node=None):
-     """
-     system service was stopped successfully
-     string service service name or description
-+    string node node on which service has been requested to stop
-     """
-+    msg = "{service} stopped"
-     return ReportItem.info(
-         report_codes.SERVICE_STOP_SUCCESS,
--        "{service} stopped",
-+        msg if node is None else "{node}: " + msg,
-         info={
-             "service": service,
-+            "node": node,
-         }
-     )
- 
-@@ -1121,6 +1282,19 @@ def service_kill_success(services):
-         }
-     )
- 
-+def service_enable_started(service):
-+    """
-+    system service is being enabled
-+    string service service name or description
-+    """
-+    return ReportItem.info(
-+        report_codes.SERVICE_ENABLE_STARTED,
-+        "Enabling {service}...",
-+        info={
-+            "service": service,
-+        }
-+    )
-+
- def service_enable_error(service, reason, node=None):
-     """
-     system service enable failed
-@@ -1143,7 +1317,7 @@ def service_enable_success(service, node=None):
-     """
-     system service was enabled successfully
-     string service service name or description
--    string node node on which service was enabled
-+    string node node on which service has been enabled
-     """
-     msg = "{service} enabled"
-     return ReportItem.info(
-@@ -1155,6 +1329,37 @@ def service_enable_success(service, node=None):
-         }
-     )
- 
-+def service_enable_skipped(service, reason, node=None):
-+    """
-+    enabling system service was skipped, no error occured
-+    string service service name or description
-+    string reason why the enabling has been skipped
-+    string node node on which service has been requested to enable
-+    """
-+    msg = "not enabling {service} - {reason}"
-+    return ReportItem.info(
-+        report_codes.SERVICE_ENABLE_SKIPPED,
-+        msg if node is None else "{node}: " + msg,
-+        info={
-+            "service": service,
-+            "reason": reason,
-+            "node": node,
-+        }
-+    )
-+
-+def service_disable_started(service):
-+    """
-+    system service is being disabled
-+    string service service name or description
-+    """
-+    return ReportItem.info(
-+        report_codes.SERVICE_DISABLE_STARTED,
-+        "Disabling {service}...",
-+        info={
-+            "service": service,
-+        }
-+    )
-+
- def service_disable_error(service, reason, node=None):
-     """
-     system service disable failed
-@@ -1189,7 +1394,6 @@ def service_disable_success(service, node=None):
-         }
-     )
- 
--
- def invalid_metadata_format(severity=ReportItemSeverity.ERROR, forceable=None):
-     """
-     Invalid format of metadata
-@@ -1201,7 +1405,6 @@ def invalid_metadata_format(severity=ReportItemSeverity.ERROR, forceable=None):
-         forceable=forceable
-     )
- 
--
- def unable_to_get_agent_metadata(
-     agent, reason, severity=ReportItemSeverity.ERROR, forceable=None
- ):
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 425b613..a72a9bd 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -518,8 +518,11 @@ rule remove <rule id>
- Remove a rule if a rule id is specified, if rule is last rule in its constraint, the constraint will be removed.
- .SS "qdevice"
- .TP
-+status <device model> [\fB\-\-full\fR] [<cluster name>]
-+Show runtime status of specified model of quorum device provider.  Using \fB\-\-full\fR will give more detailed output.  If <cluster name> is specified, only information about the specified cluster will be displayed.
-+.TP
- setup model <device model> [\fB\-\-enable\fR] [\fB\-\-start\fR]
--Configure specified model of quorum device provider.  Quorum device then may be added to clusters by "pcs quorum device add" command.  \fB\-\-start\fR will also start the provider.  \fB\-\-enable\fR will configure the provider to start on boot.
-+Configure specified model of quorum device provider.  Quorum device then can be added to clusters by running "pcs quorum device add" command in a cluster.  \fB\-\-start\fR will also start the provider.  \fB\-\-enable\fR will configure the provider to start on boot.
- .TP
- destroy <device model>
- Disable and stop specified model of quorum device provider and delete its configuration files.
-@@ -531,7 +534,7 @@ stop <device model>
- Stop specified model of quorum device provider.
- .TP
- kill <device model>
--Force specified model of quorum device provider to stop (performs kill -9).
-+Force specified model of quorum device provider to stop (performs kill \-9).  Note that init system (e.g. systemd) can detect that the qdevice is not running and start it again.  If you want to stop the qdevice, run "pcs qdevice stop" command.
- .TP
- enable <device model>
- Configure specified model of quorum device provider to start on boot.
-@@ -543,14 +546,22 @@ Configure specified model of quorum device provider to not start on boot.
- config
- Show quorum configuration.
- .TP
--device add [generic options] model <device model> [model options]
--Add quorum device to cluster.  Quorum device needs to be created first by "pcs qdevice setup" command.
-+status
-+Show quorum runtime status.
-+.TP
-+device add [<generic options>] model <device model> [<model options>]
-+Add a quorum device to the cluster.  Quorum device needs to be created first by "pcs qdevice setup" command.  It is not possible to use more than one quorum device in a cluster simultaneously.  Generic options, model and model options are all documented in corosync's corosync\-qdevice(8) man page.
- .TP
- device remove
--Remove quorum device from cluster.
-+Remove a quorum device from the cluster.
- .TP
--device update [generic options] [model <model options>]
--Add/Change quorum device options.  Requires cluster to be stopped.
-+device status [\fB\-\-full\fR]
-+Show quorum device runtime status.  Using \fB\-\-full\fR will give more detailed output.
-+.TP
-+device update [<generic options>] [model <model options>]
-+Add/Change quorum device options.  Generic options and model options are all documented in corosync's corosync\-qdevice(8) man page.  Requires the cluster to be stopped.
-+
-+WARNING: If you want to change "host" option of qdevice model net, use "pcs quorum device remove" and "pcs quorum device add" commands to set up configuration properly unless old and new host is the same machine.
- .TP
- unblock [\fB\-\-force\fR]
- Cancel waiting for all nodes when establishing quorum.  Useful in situations where you know the cluster is inquorate, but you are confident that the cluster should proceed with resource management regardless.  This command should ONLY be used when nodes which the cluster is waiting for have been confirmed to be powered off and to have no access to shared resources.
-@@ -558,7 +569,7 @@ Cancel waiting for all nodes when establishing quorum.  Useful in situations whe
- .B WARNING: If the nodes are not actually powered off or they do have access to shared resources, data corruption/cluster failure can occur. To prevent accidental running of this command, \-\-force or interactive user response is required in order to proceed.
- .TP
- update [auto_tie_breaker=[0|1]] [last_man_standing=[0|1]] [last_man_standing_window=[<time in ms>]] [wait_for_all=[0|1]]
--Add/Change quorum options.  At least one option must be specified.  Options are documented in corosync's votequorum(5) man page.  Requires cluster to be stopped.
-+Add/Change quorum options.  At least one option must be specified.  Options are documented in corosync's votequorum(5) man page.  Requires the cluster to be stopped.
- .SS "status"
- .TP
- [status] [\fB\-\-full\fR | \fB\-\-hide-inactive\fR]
-diff --git a/pcs/qdevice.py b/pcs/qdevice.py
-index 1f06709..0037704 100644
---- a/pcs/qdevice.py
-+++ b/pcs/qdevice.py
-@@ -23,6 +23,8 @@ def qdevice_cmd(lib, argv, modifiers):
-     try:
-         if sub_cmd == "help":
-             usage.qdevice(argv)
-+        elif sub_cmd == "status":
-+            qdevice_status_cmd(lib, argv_next, modifiers)
-         elif sub_cmd == "setup":
-             qdevice_setup_cmd(lib, argv_next, modifiers)
-         elif sub_cmd == "destroy":
-@@ -37,6 +39,11 @@ def qdevice_cmd(lib, argv, modifiers):
-             qdevice_enable_cmd(lib, argv_next, modifiers)
-         elif sub_cmd == "disable":
-             qdevice_disable_cmd(lib, argv_next, modifiers)
-+        # following commands are internal use only, called from pcsd
-+        elif sub_cmd == "sign-net-cert-request":
-+            qdevice_sign_net_cert_request_cmd(lib, argv_next, modifiers)
-+        elif sub_cmd == "net-client":
-+            qdevice_net_client_cmd(lib, argv_next, modifiers)
-         else:
-             raise CmdLineInputError()
-     except LibraryError as e:
-@@ -44,6 +51,35 @@ def qdevice_cmd(lib, argv, modifiers):
-     except CmdLineInputError as e:
-         utils.exit_on_cmdline_input_errror(e, "qdevice", sub_cmd)
- 
-+# this is internal use only, called from pcsd
-+def qdevice_net_client_cmd(lib, argv, modifiers):
-+    if len(argv) < 1:
-+        utils.err("invalid command")
-+
-+    sub_cmd, argv_next = argv[0], argv[1:]
-+    try:
-+        if sub_cmd == "setup":
-+            qdevice_net_client_setup_cmd(lib, argv_next, modifiers)
-+        elif sub_cmd == "import-certificate":
-+            qdevice_net_client_import_certificate_cmd(lib, argv_next, modifiers)
-+        elif sub_cmd == "destroy":
-+            qdevice_net_client_destroy(lib, argv_next, modifiers)
-+        else:
-+            raise CmdLineInputError("invalid command")
-+    except LibraryError as e:
-+        utils.process_library_reports(e.args)
-+    except CmdLineInputError as e:
-+        utils.err(e.message)
-+
-+def qdevice_status_cmd(lib, argv, modifiers):
-+    if len(argv) < 1 or len(argv) > 2:
-+        raise CmdLineInputError()
-+    model = argv[0]
-+    cluster = None if len(argv) < 2 else argv[1]
-+    print(
-+        lib.qdevice.status(model, modifiers["full"], cluster)
-+    )
-+
- def qdevice_setup_cmd(lib, argv, modifiers):
-     if len(argv) != 2:
-         raise CmdLineInputError()
-@@ -87,3 +123,38 @@ def qdevice_disable_cmd(lib, argv, modifiers):
-         raise CmdLineInputError()
-     model = argv[0]
-     lib.qdevice.disable(model)
-+
-+# following commands are internal use only, called from pcsd
-+
-+def qdevice_net_client_setup_cmd(lib, argv, modifiers):
-+    ca_certificate = _read_stdin()
-+    lib.qdevice.client_net_setup(ca_certificate)
-+
-+def qdevice_net_client_import_certificate_cmd(lib, argv, modifiers):
-+    certificate = _read_stdin()
-+    lib.qdevice.client_net_import_certificate(certificate)
-+
-+def qdevice_net_client_destroy(lib, argv, modifiers):
-+    lib.qdevice.client_net_destroy()
-+
-+def qdevice_sign_net_cert_request_cmd(lib, argv, modifiers):
-+    certificate_request = _read_stdin()
-+    signed = lib.qdevice.sign_net_cert_request(
-+        certificate_request,
-+        modifiers["name"]
-+    )
-+    if sys.version_info.major > 2:
-+        # In python3 base64.b64encode returns bytes.
-+        # In python2 base64.b64encode returns string.
-+        # Bytes is printed like this: b'bytes content'
-+        # and we need to get rid of that b'', so we change bytes to string.
-+        # Since it's base64encoded, it's safe to use ascii.
-+        signed = signed.decode("ascii")
-+    print(signed)
-+
-+def _read_stdin():
-+    # in python3 stdin returns str so we need to use buffer
-+    if hasattr(sys.stdin, "buffer"):
-+        return sys.stdin.buffer.read()
-+    else:
-+        return sys.stdin.read()
-diff --git a/pcs/quorum.py b/pcs/quorum.py
-index f793a21..27085ac 100644
---- a/pcs/quorum.py
-+++ b/pcs/quorum.py
-@@ -28,6 +28,8 @@ def quorum_cmd(lib, argv, modificators):
-             usage.quorum(argv)
-         elif sub_cmd == "config":
-             quorum_config_cmd(lib, argv_next, modificators)
-+        elif sub_cmd == "status":
-+            quorum_status_cmd(lib, argv_next, modificators)
-         elif sub_cmd == "device":
-             quorum_device_cmd(lib, argv_next, modificators)
-         elif sub_cmd == "unblock":
-@@ -51,6 +53,8 @@ def quorum_device_cmd(lib, argv, modificators):
-             quorum_device_add_cmd(lib, argv_next, modificators)
-         elif sub_cmd == "remove":
-             quorum_device_remove_cmd(lib, argv_next, modificators)
-+        elif sub_cmd == "status":
-+            quorum_device_status_cmd(lib, argv_next, modificators)
-         elif sub_cmd == "update":
-             quorum_device_update_cmd(lib, argv_next, modificators)
-         else:
-@@ -97,6 +101,21 @@ def quorum_config_to_str(config):
- 
-     return lines
- 
-+def quorum_status_cmd(lib, argv, modificators):
-+    if argv:
-+        raise CmdLineInputError()
-+    print(lib.quorum.status())
-+
-+def quorum_update_cmd(lib, argv, modificators):
-+    options = parse_args.prepare_options(argv)
-+    if not options:
-+        raise CmdLineInputError()
-+
-+    lib.quorum.set_options(
-+        options,
-+        skip_offline_nodes=modificators["skip_offline_nodes"]
-+    )
-+
- def quorum_device_add_cmd(lib, argv, modificators):
-     # we expect "model" keyword once, followed by the actual model value
-     options_lists = parse_args.split_list(argv, "model")
-@@ -131,6 +150,11 @@ def quorum_device_remove_cmd(lib, argv, modificators):
-         skip_offline_nodes=modificators["skip_offline_nodes"]
-     )
- 
-+def quorum_device_status_cmd(lib, argv, modificators):
-+    if argv:
-+        raise CmdLineInputError()
-+    print(lib.quorum.status_device(modificators["full"]))
-+
- def quorum_device_update_cmd(lib, argv, modificators):
-     # we expect "model" keyword once
-     options_lists = parse_args.split_list(argv, "model")
-@@ -154,13 +178,3 @@ def quorum_device_update_cmd(lib, argv, modificators):
-         force_options=modificators["force"],
-         skip_offline_nodes=modificators["skip_offline_nodes"]
-     )
--
--def quorum_update_cmd(lib, argv, modificators):
--    options = parse_args.prepare_options(argv)
--    if not options:
--        raise CmdLineInputError()
--
--    lib.quorum.set_options(
--        options,
--        skip_offline_nodes=modificators["skip_offline_nodes"]
--    )
-diff --git a/pcs/settings_default.py b/pcs/settings_default.py
-index 3acd8e0..9d44918 100644
---- a/pcs/settings_default.py
-+++ b/pcs/settings_default.py
-@@ -2,18 +2,20 @@ import os.path
- 
- pacemaker_binaries = "/usr/sbin/"
- corosync_binaries = "/usr/sbin/"
-+corosync_qnet_binaries = "/usr/bin/"
- ccs_binaries = "/usr/sbin/"
- corosync_conf_dir = "/etc/corosync/"
- corosync_conf_file = os.path.join(corosync_conf_dir, "corosync.conf")
- corosync_uidgid_dir = os.path.join(corosync_conf_dir, "uidgid.d/")
- corosync_qdevice_net_server_certs_dir = os.path.join(
-     corosync_conf_dir,
--    "qdevice/net/qnetd/nssdb"
-+    "qnetd/nssdb"
- )
- corosync_qdevice_net_client_certs_dir = os.path.join(
-     corosync_conf_dir,
--    "qdevice/net/node/nssdb"
-+    "qdevice/net/nssdb"
- )
-+corosync_qdevice_net_client_ca_file_name = "qnetd-cacert.crt"
- cluster_conf_file = "/etc/cluster/cluster.conf"
- fence_agent_binaries = "/usr/sbin/"
- pengine_binary = "/usr/libexec/pacemaker/pengine"
-diff --git a/pcs/test/resources/qdevice-certs/qnetd-cacert.crt b/pcs/test/resources/qdevice-certs/qnetd-cacert.crt
-new file mode 100644
-index 0000000..34dcab0
---- /dev/null
-+++ b/pcs/test/resources/qdevice-certs/qnetd-cacert.crt
-@@ -0,0 +1 @@
-+certificate data
-\ No newline at end of file
-diff --git a/pcs/test/test_lib_commands_qdevice.py b/pcs/test/test_lib_commands_qdevice.py
-index 3900c1d..ff588d5 100644
---- a/pcs/test/test_lib_commands_qdevice.py
-+++ b/pcs/test/test_lib_commands_qdevice.py
-@@ -6,6 +6,7 @@ from __future__ import (
- )
- 
- from unittest import TestCase
-+import base64
- import logging
- 
- from pcs.test.tools.pcs_mock import mock
-@@ -58,6 +59,11 @@ class QdeviceDisabledOnCmanTest(QdeviceTestCase):
-             lambda: lib.qdevice_destroy(self.lib_env, "bad model")
-         )
- 
-+    def test_status_text(self):
-+        self.base_test(
-+            lambda: lib.qdevice_status_text(self.lib_env, "bad model")
-+        )
-+
-     def test_enable(self):
-         self.base_test(
-             lambda: lib.qdevice_enable(self.lib_env, "bad model")
-@@ -83,6 +89,30 @@ class QdeviceDisabledOnCmanTest(QdeviceTestCase):
-             lambda: lib.qdevice_kill(self.lib_env, "bad model")
-         )
- 
-+    def test_qdevice_net_sign_certificate_request(self):
-+        self.base_test(
-+            lambda: lib.qdevice_net_sign_certificate_request(
-+                self.lib_env,
-+                "certificate request",
-+                "cluster name"
-+            )
-+        )
-+
-+    def test_client_net_setup(self):
-+        self.base_test(
-+            lambda: lib.client_net_setup(self.lib_env, "ca certificate")
-+        )
-+
-+    def test_client_net_import_certificate(self):
-+        self.base_test(
-+            lambda: lib.client_net_import_certificate(self.lib_env, "cert")
-+        )
-+
-+    def test_client_net_destroy(self):
-+        self.base_test(
-+            lambda: lib.client_net_destroy(self.lib_env)
-+        )
-+
- 
- @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
- class QdeviceBadModelTest(QdeviceTestCase):
-@@ -110,6 +140,11 @@ class QdeviceBadModelTest(QdeviceTestCase):
-             lambda: lib.qdevice_destroy(self.lib_env, "bad model")
-         )
- 
-+    def test_status_text(self):
-+        self.base_test(
-+            lambda: lib.qdevice_status_text(self.lib_env, "bad model")
-+        )
-+
-     def test_enable(self):
-         self.base_test(
-             lambda: lib.qdevice_enable(self.lib_env, "bad model")
-@@ -489,6 +524,80 @@ class QdeviceNetDestroyTest(QdeviceTestCase):
-         )
- 
- 
-+@mock.patch("pcs.lib.commands.qdevice.qdevice_net.qdevice_status_cluster_text")
-+@mock.patch("pcs.lib.commands.qdevice.qdevice_net.qdevice_status_generic_text")
-+@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-+@mock.patch.object(
-+    LibraryEnvironment,
-+    "cmd_runner",
-+    lambda self: "mock_runner"
-+)
-+class TestQdeviceNetStatusTextTest(QdeviceTestCase):
-+    def test_success(self, mock_status_generic, mock_status_cluster):
-+        mock_status_generic.return_value = "generic status info\n"
-+        mock_status_cluster.return_value = "cluster status info\n"
-+
-+        self.assertEquals(
-+            lib.qdevice_status_text(self.lib_env, "net"),
-+             "generic status info\ncluster status info\n"
-+        )
-+
-+        mock_status_generic.assert_called_once_with("mock_runner", False)
-+        mock_status_cluster.assert_called_once_with("mock_runner", None, False)
-+
-+    def test_success_verbose(self, mock_status_generic, mock_status_cluster):
-+        mock_status_generic.return_value = "generic status info\n"
-+        mock_status_cluster.return_value = "cluster status info\n"
-+
-+        self.assertEquals(
-+            lib.qdevice_status_text(self.lib_env, "net", verbose=True),
-+             "generic status info\ncluster status info\n"
-+        )
-+
-+        mock_status_generic.assert_called_once_with("mock_runner", True)
-+        mock_status_cluster.assert_called_once_with("mock_runner", None, True)
-+
-+    def test_success_cluster(self, mock_status_generic, mock_status_cluster):
-+        mock_status_generic.return_value = "generic status info\n"
-+        mock_status_cluster.return_value = "cluster status info\n"
-+
-+        self.assertEquals(
-+            lib.qdevice_status_text(self.lib_env, "net", cluster="name"),
-+             "generic status info\ncluster status info\n"
-+        )
-+
-+        mock_status_generic.assert_called_once_with("mock_runner", False)
-+        mock_status_cluster.assert_called_once_with("mock_runner", "name", False)
-+
-+    def test_error_generic_status(
-+        self, mock_status_generic, mock_status_cluster
-+    ):
-+        mock_status_generic.side_effect = LibraryError("mock_report_item")
-+        mock_status_cluster.return_value = "cluster status info\n"
-+
-+        self.assertRaises(
-+            LibraryError,
-+            lambda: lib.qdevice_status_text(self.lib_env, "net")
-+        )
-+
-+        mock_status_generic.assert_called_once_with("mock_runner", False)
-+        mock_status_cluster.assert_not_called()
-+
-+    def test_error_cluster_status(
-+        self, mock_status_generic, mock_status_cluster
-+    ):
-+        mock_status_generic.return_value = "generic status info\n"
-+        mock_status_cluster.side_effect = LibraryError("mock_report_item")
-+
-+        self.assertRaises(
-+            LibraryError,
-+            lambda: lib.qdevice_status_text(self.lib_env, "net")
-+        )
-+
-+        mock_status_generic.assert_called_once_with("mock_runner", False)
-+        mock_status_cluster.assert_called_once_with("mock_runner", None, False)
-+
-+
- @mock.patch("pcs.lib.external.enable_service")
- @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
- @mock.patch.object(
-@@ -757,3 +866,149 @@ class QdeviceNetKillTest(QdeviceTestCase):
-             "mock_runner",
-             ["corosync-qnetd"]
-         )
-+
-+
-+@mock.patch(
-+    "pcs.lib.commands.qdevice.qdevice_net.qdevice_sign_certificate_request"
-+)
-+@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-+@mock.patch.object(
-+    LibraryEnvironment,
-+    "cmd_runner",
-+    lambda self: "mock_runner"
-+)
-+class QdeviceNetSignCertificateRequestTest(QdeviceTestCase):
-+    def test_success(self, mock_qdevice_func):
-+        qdevice_func_input = "certificate request".encode("utf-8")
-+        qdevice_func_output = "signed certificate".encode("utf-8")
-+        mock_qdevice_func.return_value = qdevice_func_output
-+        cluster_name = "clusterName"
-+
-+        self.assertEqual(
-+            base64.b64encode(qdevice_func_output),
-+            lib.qdevice_net_sign_certificate_request(
-+                self.lib_env,
-+                base64.b64encode(qdevice_func_input),
-+                cluster_name
-+            )
-+        )
-+
-+        mock_qdevice_func.assert_called_once_with(
-+            "mock_runner",
-+            qdevice_func_input,
-+            cluster_name
-+        )
-+
-+    def test_bad_input(self, mock_qdevice_func):
-+        qdevice_func_input = "certificate request".encode("utf-8")
-+        cluster_name = "clusterName"
-+
-+        assert_raise_library_error(
-+            lambda: lib.qdevice_net_sign_certificate_request(
-+                self.lib_env,
-+                qdevice_func_input,
-+                cluster_name
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.INVALID_OPTION_VALUE,
-+                {
-+                    "option_name": "qnetd certificate request",
-+                    "option_value": qdevice_func_input,
-+                    "allowed_values": ["base64 encoded certificate"],
-+                }
-+            )
-+        )
-+
-+        mock_qdevice_func.assert_not_called()
-+
-+
-+@mock.patch("pcs.lib.commands.qdevice.qdevice_net.client_setup")
-+@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-+@mock.patch.object(
-+    LibraryEnvironment,
-+    "cmd_runner",
-+    lambda self: "mock_runner"
-+)
-+class ClientNetSetupTest(QdeviceTestCase):
-+    def test_success(self, mock_qdevice_func):
-+        qdevice_func_input = "CA certificate".encode("utf-8")
-+
-+        lib.client_net_setup(self.lib_env, base64.b64encode(qdevice_func_input))
-+
-+        mock_qdevice_func.assert_called_once_with(
-+            "mock_runner",
-+            qdevice_func_input
-+        )
-+
-+    def test_bad_input(self, mock_qdevice_func):
-+        qdevice_func_input = "CA certificate".encode("utf-8")
-+
-+        assert_raise_library_error(
-+            lambda: lib.client_net_setup(self.lib_env, qdevice_func_input),
-+            (
-+                severity.ERROR,
-+                report_codes.INVALID_OPTION_VALUE,
-+                {
-+                    "option_name": "qnetd CA certificate",
-+                    "option_value": qdevice_func_input,
-+                    "allowed_values": ["base64 encoded certificate"],
-+                }
-+            )
-+        )
-+
-+        mock_qdevice_func.assert_not_called()
-+
-+
-+@mock.patch(
-+    "pcs.lib.commands.qdevice.qdevice_net.client_import_certificate_and_key"
-+)
-+@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-+@mock.patch.object(
-+    LibraryEnvironment,
-+    "cmd_runner",
-+    lambda self: "mock_runner"
-+)
-+class ClientNetImportCertificateTest(QdeviceTestCase):
-+    def test_success(self, mock_qdevice_func):
-+        qdevice_func_input = "client certificate".encode("utf-8")
-+
-+        lib.client_net_import_certificate(
-+            self.lib_env,
-+            base64.b64encode(qdevice_func_input)
-+        )
-+
-+        mock_qdevice_func.assert_called_once_with(
-+            "mock_runner",
-+            qdevice_func_input
-+        )
-+
-+    def test_bad_input(self, mock_qdevice_func):
-+        qdevice_func_input = "client certificate".encode("utf-8")
-+
-+        assert_raise_library_error(
-+            lambda: lib.client_net_import_certificate(
-+                self.lib_env,
-+                qdevice_func_input
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.INVALID_OPTION_VALUE,
-+                {
-+                    "option_name": "qnetd client certificate",
-+                    "option_value": qdevice_func_input,
-+                    "allowed_values": ["base64 encoded certificate"],
-+                }
-+            )
-+        )
-+
-+        mock_qdevice_func.assert_not_called()
-+
-+
-+@mock.patch("pcs.lib.commands.qdevice.qdevice_net.client_destroy")
-+@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-+class ClientNetDestroyTest(QdeviceTestCase):
-+    def test_success(self, mock_qdevice_func):
-+        lib.client_net_destroy(self.lib_env)
-+        mock_qdevice_func.assert_called_once_with()
-+
-diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py
-index 5725381..e824f37 100644
---- a/pcs/test/test_lib_commands_quorum.py
-+++ b/pcs/test/test_lib_commands_quorum.py
-@@ -21,7 +21,12 @@ from pcs.test.tools.pcs_mock import mock
- 
- from pcs.common import report_codes
- from pcs.lib.env import LibraryEnvironment
--from pcs.lib.errors import ReportItemSeverity as severity
-+from pcs.lib.errors import (
-+    LibraryError,
-+    ReportItemSeverity as severity,
-+)
-+from pcs.lib.external import NodeCommunicationException
-+from pcs.lib.node import NodeAddresses, NodeAddressesList
- 
- from pcs.lib.commands import quorum as lib
- 
-@@ -243,25 +248,102 @@ class SetQuorumOptionsTest(TestCase, CmanMixin):
-         mock_push_corosync.assert_not_called()
- 
- 
-+@mock.patch("pcs.lib.commands.quorum.corosync_live.get_quorum_status_text")
-+@mock.patch.object(
-+    LibraryEnvironment,
-+    "cmd_runner",
-+    lambda self: "mock_runner"
-+)
-+class StatusTextTest(TestCase, CmanMixin):
-+    def setUp(self):
-+        self.mock_logger = mock.MagicMock(logging.Logger)
-+        self.mock_reporter = MockLibraryReportProcessor()
-+        self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-+
-+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
-+    def test_disabled_on_cman(self, mock_status):
-+        self.assert_disabled_on_cman(
-+            lambda: lib.status_text(self.lib_env)
-+        )
-+        mock_status.assert_not_called()
-+
-+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-+    def test_success(self, mock_status):
-+        mock_status.return_value = "status text"
-+        self.assertEqual(
-+            lib.status_text(self.lib_env),
-+            "status text"
-+        )
-+        mock_status.assert_called_once_with("mock_runner")
-+
-+
-+@mock.patch("pcs.lib.commands.quorum.qdevice_client.get_status_text")
-+@mock.patch.object(
-+    LibraryEnvironment,
-+    "cmd_runner",
-+    lambda self: "mock_runner"
-+)
-+class StatusDeviceTextTest(TestCase, CmanMixin):
-+    def setUp(self):
-+        self.mock_logger = mock.MagicMock(logging.Logger)
-+        self.mock_reporter = MockLibraryReportProcessor()
-+        self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-+
-+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
-+    def test_disabled_on_cman(self, mock_status):
-+        self.assert_disabled_on_cman(
-+            lambda: lib.status_device_text(self.lib_env)
-+        )
-+        mock_status.assert_not_called()
-+
-+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-+    def test_success(self, mock_status):
-+        mock_status.return_value = "status text"
-+        self.assertEqual(
-+            lib.status_device_text(self.lib_env),
-+            "status text"
-+        )
-+        mock_status.assert_called_once_with("mock_runner", False)
-+
-+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-+    def test_success_verbose(self, mock_status):
-+        mock_status.return_value = "status text"
-+        self.assertEqual(
-+            lib.status_device_text(self.lib_env, True),
-+            "status text"
-+        )
-+        mock_status.assert_called_once_with("mock_runner", True)
-+
-+
- @mock.patch.object(LibraryEnvironment, "push_corosync_conf")
- @mock.patch.object(LibraryEnvironment, "get_corosync_conf_data")
-+@mock.patch("pcs.lib.commands.quorum._add_device_model_net")
-+@mock.patch("pcs.lib.commands.quorum.qdevice_client.remote_client_enable")
-+@mock.patch("pcs.lib.commands.quorum.qdevice_client.remote_client_start")
- class AddDeviceTest(TestCase, CmanMixin):
-     def setUp(self):
-         self.mock_logger = mock.MagicMock(logging.Logger)
-         self.mock_reporter = MockLibraryReportProcessor()
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
--    def test_disabled_on_cman(self, mock_get_corosync, mock_push_corosync):
-+    def test_disabled_on_cman(
-+        self, mock_client_start, mock_client_enable, mock_add_net,
-+        mock_get_corosync, mock_push_corosync
-+    ):
-         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-         self.assert_disabled_on_cman(
-             lambda: lib.add_device(lib_env, "net", {"host": "127.0.0.1"}, {})
-         )
-         mock_get_corosync.assert_not_called()
-         mock_push_corosync.assert_not_called()
-+        mock_add_net.assert_not_called()
-+        mock_client_enable.assert_not_called()
-+        mock_client_start.assert_not_called()
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
-     def test_enabled_on_cman_if_not_live(
--        self, mock_get_corosync, mock_push_corosync
-+        self, mock_client_start, mock_client_enable, mock_add_net,
-+        mock_get_corosync, mock_push_corosync
-     ):
-         original_conf = open(rc("corosync-3nodes.conf")).read()
-         mock_get_corosync.return_value = original_conf
-@@ -287,9 +369,15 @@ class AddDeviceTest(TestCase, CmanMixin):
- 
-         self.assertEqual(1, mock_get_corosync.call_count)
-         self.assertEqual(0, mock_push_corosync.call_count)
-+        mock_add_net.assert_not_called()
-+        mock_client_enable.assert_not_called()
-+        mock_client_start.assert_not_called()
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
--    def test_success(self, mock_get_corosync, mock_push_corosync):
-+    def test_success(
-+        self, mock_client_start, mock_client_enable, mock_add_net,
-+        mock_get_corosync, mock_push_corosync
-+    ):
-         original_conf = open(rc("corosync-3nodes.conf")).read()
-         mock_get_corosync.return_value = original_conf
-         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-@@ -311,6 +399,70 @@ class AddDeviceTest(TestCase, CmanMixin):
-     device {
-         timeout: 12345
-         model: net
-+        votes: 1
-+
-+        net {
-+            algorithm: ffsplit
-+            host: 127.0.0.1
-+        }
-+    }
-+"""
-+            )
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.SERVICE_ENABLE_STARTED,
-+                    {
-+                        "service": "corosync-qdevice",
-+                    }
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.SERVICE_START_STARTED,
-+                    {
-+                        "service": "corosync-qdevice",
-+                    }
-+                ),
-+            ]
-+        )
-+        self.assertEqual(1, len(mock_add_net.mock_calls))
-+        self.assertEqual(3, len(mock_client_enable.mock_calls))
-+        self.assertEqual(3, len(mock_client_start.mock_calls))
-+
-+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-+    def test_success_file(
-+        self, mock_client_start, mock_client_enable, mock_add_net,
-+        mock_get_corosync, mock_push_corosync
-+    ):
-+        original_conf = open(rc("corosync-3nodes.conf")).read()
-+        mock_get_corosync.return_value = original_conf
-+        lib_env = LibraryEnvironment(
-+            self.mock_logger,
-+            self.mock_reporter,
-+            corosync_conf_data=original_conf
-+        )
-+
-+        lib.add_device(
-+            lib_env,
-+            "net",
-+            {"host": "127.0.0.1", "algorithm": "ffsplit"},
-+            {"timeout": "12345"}
-+        )
-+
-+        self.assertEqual(1, len(mock_push_corosync.mock_calls))
-+        ac(
-+            mock_push_corosync.mock_calls[0][1][0].config.export(),
-+            original_conf.replace(
-+                "provider: corosync_votequorum\n",
-+                """provider: corosync_votequorum
-+
-+    device {
-+        timeout: 12345
-+        model: net
-+        votes: 1
- 
-         net {
-             algorithm: ffsplit
-@@ -321,9 +473,15 @@ class AddDeviceTest(TestCase, CmanMixin):
-             )
-         )
-         self.assertEqual([], self.mock_reporter.report_item_list)
-+        mock_add_net.assert_not_called()
-+        mock_client_enable.assert_not_called()
-+        mock_client_start.assert_not_called()
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
--    def test_invalid_options(self, mock_get_corosync, mock_push_corosync):
-+    def test_invalid_options(
-+        self, mock_client_start, mock_client_enable, mock_add_net,
-+        mock_get_corosync, mock_push_corosync
-+    ):
-         original_conf = open(rc("corosync-3nodes.conf")).read()
-         mock_get_corosync.return_value = original_conf
-         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-@@ -349,9 +507,15 @@ class AddDeviceTest(TestCase, CmanMixin):
- 
-         self.assertEqual(1, mock_get_corosync.call_count)
-         self.assertEqual(0, mock_push_corosync.call_count)
-+        mock_add_net.assert_not_called()
-+        mock_client_enable.assert_not_called()
-+        mock_client_start.assert_not_called()
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
--    def test_invalid_options_forced(self, mock_get_corosync, mock_push_corosync):
-+    def test_invalid_options_forced(
-+        self, mock_client_start, mock_client_enable, mock_add_net,
-+        mock_get_corosync, mock_push_corosync
-+    ):
-         original_conf = open(rc("corosync-3nodes.conf")).read()
-         mock_get_corosync.return_value = original_conf
-         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-@@ -375,7 +539,21 @@ class AddDeviceTest(TestCase, CmanMixin):
-                         "option_type": "quorum device",
-                         "allowed": ["sync_timeout", "timeout"],
-                     }
--                )
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.SERVICE_ENABLE_STARTED,
-+                    {
-+                        "service": "corosync-qdevice",
-+                    }
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.SERVICE_START_STARTED,
-+                    {
-+                        "service": "corosync-qdevice",
-+                    }
-+                ),
-             ]
-         )
-         self.assertEqual(1, mock_get_corosync.call_count)
-@@ -389,6 +567,7 @@ class AddDeviceTest(TestCase, CmanMixin):
-     device {
-         bad_option: bad_value
-         model: net
-+        votes: 1
- 
-         net {
-             algorithm: ffsplit
-@@ -398,9 +577,15 @@ class AddDeviceTest(TestCase, CmanMixin):
- """
-             )
-         )
-+        self.assertEqual(1, len(mock_add_net.mock_calls))
-+        self.assertEqual(3, len(mock_client_enable.mock_calls))
-+        self.assertEqual(3, len(mock_client_start.mock_calls))
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
--    def test_invalid_model(self, mock_get_corosync, mock_push_corosync):
-+    def test_invalid_model(
-+        self, mock_client_start, mock_client_enable, mock_add_net,
-+        mock_get_corosync, mock_push_corosync
-+    ):
-         original_conf = open(rc("corosync-3nodes.conf")).read()
-         mock_get_corosync.return_value = original_conf
-         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-@@ -421,9 +606,15 @@ class AddDeviceTest(TestCase, CmanMixin):
- 
-         self.assertEqual(1, mock_get_corosync.call_count)
-         self.assertEqual(0, mock_push_corosync.call_count)
-+        mock_add_net.assert_not_called()
-+        mock_client_enable.assert_not_called()
-+        mock_client_start.assert_not_called()
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
--    def test_invalid_model_forced(self, mock_get_corosync, mock_push_corosync):
-+    def test_invalid_model_forced(
-+        self, mock_client_start, mock_client_enable, mock_add_net,
-+        mock_get_corosync, mock_push_corosync
-+    ):
-         original_conf = open(rc("corosync-3nodes.conf")).read()
-         mock_get_corosync.return_value = original_conf
-         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-@@ -441,7 +632,21 @@ class AddDeviceTest(TestCase, CmanMixin):
-                         "option_value": "bad model",
-                         "allowed_values": ("net", ),
-                     },
--                )
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.SERVICE_ENABLE_STARTED,
-+                    {
-+                        "service": "corosync-qdevice",
-+                    }
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.SERVICE_START_STARTED,
-+                    {
-+                        "service": "corosync-qdevice",
-+                    }
-+                ),
-             ]
-         )
-         self.assertEqual(1, mock_get_corosync.call_count)
-@@ -458,25 +663,678 @@ class AddDeviceTest(TestCase, CmanMixin):
- """
-             )
-         )
-+        mock_add_net.assert_not_called() # invalid model - don't setup net model
-+        self.assertEqual(3, len(mock_client_enable.mock_calls))
-+        self.assertEqual(3, len(mock_client_start.mock_calls))
-+
-+
-+@mock.patch(
-+    "pcs.lib.commands.quorum.qdevice_net.remote_client_import_certificate_and_key"
-+)
-+@mock.patch("pcs.lib.commands.quorum.qdevice_net.client_cert_request_to_pk12")
-+@mock.patch(
-+    "pcs.lib.commands.quorum.qdevice_net.remote_sign_certificate_request"
-+)
-+@mock.patch(
-+    "pcs.lib.commands.quorum.qdevice_net.client_generate_certificate_request"
-+)
-+@mock.patch("pcs.lib.commands.quorum.qdevice_net.remote_client_setup")
-+@mock.patch(
-+    "pcs.lib.commands.quorum.qdevice_net.remote_qdevice_get_ca_certificate"
-+)
-+@mock.patch.object(
-+    LibraryEnvironment,
-+    "cmd_runner",
-+    lambda self: "mock_runner"
-+)
-+@mock.patch.object(
-+    LibraryEnvironment,
-+    "node_communicator",
-+    lambda self: "mock_communicator"
-+)
-+class AddDeviceNetTest(TestCase):
-+    #pylint: disable=too-many-instance-attributes
-+    def setUp(self):
-+        self.mock_logger = mock.MagicMock(logging.Logger)
-+        self.mock_reporter = MockLibraryReportProcessor()
-+        self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-+        self.qnetd_host = "qnetd_host"
-+        self.cluster_name = "clusterName"
-+        self.nodes = NodeAddressesList([
-+            NodeAddresses("node1"),
-+            NodeAddresses("node2"),
-+        ])
-+        self.ca_cert = "CA certificate"
-+        self.cert_request = "client certificate request"
-+        self.signed_cert = "signed certificate"
-+        self.final_cert = "final client certificate"
-+
-+    def test_success(
-+        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-+        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-+    ):
-+        mock_get_ca.return_value = self.ca_cert
-+        mock_get_cert_request.return_value = self.cert_request
-+        mock_sign_cert_request.return_value = self.signed_cert
-+        mock_cert_to_pk12.return_value = self.final_cert
-+        skip_offline_nodes = False
-+
-+        lib._add_device_model_net(
-+            self.lib_env,
-+            self.qnetd_host,
-+            self.cluster_name,
-+            self.nodes,
-+            skip_offline_nodes
-+        )
-+
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-+                    {}
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
-+                    {
-+                        "node": self.nodes[0].label
-+                    }
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
-+                    {
-+                        "node": self.nodes[1].label
-+                    }
-+                ),
-+            ]
-+        )
-+        mock_get_ca.assert_called_once_with(
-+            "mock_communicator",
-+            self.qnetd_host
-+        )
-+        client_setup_calls = [
-+            mock.call("mock_communicator", self.nodes[0], self.ca_cert),
-+            mock.call("mock_communicator", self.nodes[1], self.ca_cert),
-+        ]
-+        self.assertEqual(
-+            len(client_setup_calls),
-+            len(mock_client_setup.mock_calls)
-+        )
-+        mock_client_setup.assert_has_calls(client_setup_calls)
-+        mock_get_cert_request.assert_called_once_with(
-+            "mock_runner",
-+            self.cluster_name
-+        )
-+        mock_sign_cert_request.assert_called_once_with(
-+            "mock_communicator",
-+            self.qnetd_host,
-+            self.cert_request,
-+            self.cluster_name
-+        )
-+        mock_cert_to_pk12.assert_called_once_with(
-+            "mock_runner",
-+            self.signed_cert
-+        )
-+        client_import_calls = [
-+            mock.call("mock_communicator", self.nodes[0], self.final_cert),
-+            mock.call("mock_communicator", self.nodes[1], self.final_cert),
-+        ]
-+        self.assertEqual(
-+            len(client_import_calls),
-+            len(mock_import_cert.mock_calls)
-+        )
-+        mock_import_cert.assert_has_calls(client_import_calls)
-+
-+    def test_error_get_ca_cert(
-+        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-+        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-+    ):
-+        mock_get_ca.side_effect = NodeCommunicationException(
-+            "host", "command", "reason"
-+        )
-+        mock_get_cert_request.return_value = self.cert_request
-+        mock_sign_cert_request.return_value = self.signed_cert
-+        mock_cert_to_pk12.return_value = self.final_cert
-+        skip_offline_nodes = False
-+
-+        assert_raise_library_error(
-+            lambda: lib._add_device_model_net(
-+                self.lib_env,
-+                self.qnetd_host,
-+                self.cluster_name,
-+                self.nodes,
-+                skip_offline_nodes
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.NODE_COMMUNICATION_ERROR,
-+                {}
-+            )
-+        )
-+
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-+                    {}
-+                )
-+            ]
-+        )
-+        mock_get_ca.assert_called_once_with(
-+            "mock_communicator",
-+            self.qnetd_host
-+        )
-+        mock_client_setup.assert_not_called()
-+        mock_get_cert_request.assert_not_called()
-+        mock_sign_cert_request.assert_not_called()
-+        mock_cert_to_pk12.assert_not_called()
-+        mock_import_cert.assert_not_called()
-+
-+
-+    def test_error_client_setup(
-+        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-+        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-+    ):
-+        mock_get_ca.return_value = self.ca_cert
-+        def raiser(communicator, node, cert):
-+            if node == self.nodes[1]:
-+                raise NodeCommunicationException("host", "command", "reason")
-+        mock_client_setup.side_effect = raiser
-+        mock_get_cert_request.return_value = self.cert_request
-+        mock_sign_cert_request.return_value = self.signed_cert
-+        mock_cert_to_pk12.return_value = self.final_cert
-+        skip_offline_nodes = False
-+
-+        assert_raise_library_error(
-+            lambda: lib._add_device_model_net(
-+                self.lib_env,
-+                self.qnetd_host,
-+                self.cluster_name,
-+                self.nodes,
-+                skip_offline_nodes
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.NODE_COMMUNICATION_ERROR,
-+                {},
-+                report_codes.SKIP_OFFLINE_NODES
-+            )
-+        )
-+
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-+                    {}
-+                ),
-+                (
-+                    severity.ERROR,
-+                    report_codes.NODE_COMMUNICATION_ERROR,
-+                    {},
-+                    report_codes.SKIP_OFFLINE_NODES
-+                ),
-+            ]
-+        )
-+        mock_get_ca.assert_called_once_with(
-+            "mock_communicator",
-+            self.qnetd_host
-+        )
-+        client_setup_calls = [
-+            mock.call("mock_communicator", self.nodes[0], self.ca_cert),
-+            mock.call("mock_communicator", self.nodes[1], self.ca_cert),
-+        ]
-+        self.assertEqual(
-+            len(client_setup_calls),
-+            len(mock_client_setup.mock_calls)
-+        )
-+        mock_client_setup.assert_has_calls(client_setup_calls)
-+
-+    def test_error_client_setup_skip_offline(
-+        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-+        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-+    ):
-+        mock_get_ca.return_value = self.ca_cert
-+        def raiser(communicator, node, cert):
-+            if node == self.nodes[1]:
-+                raise NodeCommunicationException("host", "command", "reason")
-+        mock_client_setup.side_effect = raiser
-+        mock_get_cert_request.return_value = self.cert_request
-+        mock_sign_cert_request.return_value = self.signed_cert
-+        mock_cert_to_pk12.return_value = self.final_cert
-+        skip_offline_nodes = True
-+
-+        lib._add_device_model_net(
-+            self.lib_env,
-+            self.qnetd_host,
-+            self.cluster_name,
-+            self.nodes,
-+            skip_offline_nodes
-+        )
-+
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-+                    {}
-+                ),
-+                (
-+                    severity.WARNING,
-+                    report_codes.NODE_COMMUNICATION_ERROR,
-+                    {}
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
-+                    {
-+                        "node": self.nodes[0].label
-+                    }
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
-+                    {
-+                        "node": self.nodes[1].label
-+                    }
-+                ),
-+            ]
-+        )
-+        mock_get_ca.assert_called_once_with(
-+            "mock_communicator",
-+            self.qnetd_host
-+        )
-+        client_setup_calls = [
-+            mock.call("mock_communicator", self.nodes[0], self.ca_cert),
-+            mock.call("mock_communicator", self.nodes[1], self.ca_cert),
-+        ]
-+        self.assertEqual(
-+            len(client_setup_calls),
-+            len(mock_client_setup.mock_calls)
-+        )
-+        mock_client_setup.assert_has_calls(client_setup_calls)
-+
-+    def test_generate_cert_request_error(
-+        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-+        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-+    ):
-+        mock_get_ca.return_value = self.ca_cert
-+        mock_get_cert_request.side_effect = LibraryError()
-+        mock_sign_cert_request.return_value = self.signed_cert
-+        mock_cert_to_pk12.return_value = self.final_cert
-+        skip_offline_nodes = False
-+
-+        self.assertRaises(
-+            LibraryError,
-+            lambda: lib._add_device_model_net(
-+                self.lib_env,
-+                self.qnetd_host,
-+                self.cluster_name,
-+                self.nodes,
-+                skip_offline_nodes
-+            )
-+        )
-+
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-+                    {}
-+                )
-+            ]
-+        )
-+        mock_get_ca.assert_called_once_with(
-+            "mock_communicator",
-+            self.qnetd_host
-+        )
-+        client_setup_calls = [
-+            mock.call("mock_communicator", self.nodes[0], self.ca_cert),
-+            mock.call("mock_communicator", self.nodes[1], self.ca_cert),
-+        ]
-+        self.assertEqual(
-+            len(client_setup_calls),
-+            len(mock_client_setup.mock_calls)
-+        )
-+        mock_client_setup.assert_has_calls(client_setup_calls)
-+        mock_get_cert_request.assert_called_once_with(
-+            "mock_runner",
-+            self.cluster_name
-+        )
-+        mock_sign_cert_request.assert_not_called()
-+        mock_cert_to_pk12.assert_not_called()
-+        mock_import_cert.assert_not_called()
-+
-+    def test_sign_certificate_error(
-+        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-+        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-+    ):
-+        mock_get_ca.return_value = self.ca_cert
-+        mock_get_cert_request.return_value = self.cert_request
-+        mock_sign_cert_request.side_effect = NodeCommunicationException(
-+            "host", "command", "reason"
-+        )
-+        mock_cert_to_pk12.return_value = self.final_cert
-+        skip_offline_nodes = False
-+
-+        assert_raise_library_error(
-+            lambda: lib._add_device_model_net(
-+                self.lib_env,
-+                self.qnetd_host,
-+                self.cluster_name,
-+                self.nodes,
-+                skip_offline_nodes
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.NODE_COMMUNICATION_ERROR,
-+                {}
-+            )
-+        )
-+
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-+                    {}
-+                )
-+            ]
-+        )
-+        mock_get_ca.assert_called_once_with(
-+            "mock_communicator",
-+            self.qnetd_host
-+        )
-+        client_setup_calls = [
-+            mock.call("mock_communicator", self.nodes[0], self.ca_cert),
-+            mock.call("mock_communicator", self.nodes[1], self.ca_cert),
-+        ]
-+        self.assertEqual(
-+            len(client_setup_calls),
-+            len(mock_client_setup.mock_calls)
-+        )
-+        mock_client_setup.assert_has_calls(client_setup_calls)
-+        mock_get_cert_request.assert_called_once_with(
-+            "mock_runner",
-+            self.cluster_name
-+        )
-+        mock_sign_cert_request.assert_called_once_with(
-+            "mock_communicator",
-+            self.qnetd_host,
-+            self.cert_request,
-+            self.cluster_name
-+        )
-+        mock_cert_to_pk12.assert_not_called()
-+        mock_import_cert.assert_not_called()
-+
-+    def test_certificate_to_pk12_error(
-+        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-+        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-+    ):
-+        mock_get_ca.return_value = self.ca_cert
-+        mock_get_cert_request.return_value = self.cert_request
-+        mock_sign_cert_request.return_value = self.signed_cert
-+        mock_cert_to_pk12.side_effect = LibraryError()
-+        skip_offline_nodes = False
-+
-+        self.assertRaises(
-+            LibraryError,
-+            lambda: lib._add_device_model_net(
-+                self.lib_env,
-+                self.qnetd_host,
-+                self.cluster_name,
-+                self.nodes,
-+                skip_offline_nodes
-+            )
-+        )
-+
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-+                    {}
-+                )
-+            ]
-+        )
-+        mock_get_ca.assert_called_once_with(
-+            "mock_communicator",
-+            self.qnetd_host
-+        )
-+        client_setup_calls = [
-+            mock.call("mock_communicator", self.nodes[0], self.ca_cert),
-+            mock.call("mock_communicator", self.nodes[1], self.ca_cert),
-+        ]
-+        self.assertEqual(
-+            len(client_setup_calls),
-+            len(mock_client_setup.mock_calls)
-+        )
-+        mock_client_setup.assert_has_calls(client_setup_calls)
-+        mock_get_cert_request.assert_called_once_with(
-+            "mock_runner",
-+            self.cluster_name
-+        )
-+        mock_sign_cert_request.assert_called_once_with(
-+            "mock_communicator",
-+            self.qnetd_host,
-+            self.cert_request,
-+            self.cluster_name
-+        )
-+        mock_cert_to_pk12.assert_called_once_with(
-+            "mock_runner",
-+            self.signed_cert
-+        )
-+        mock_import_cert.assert_not_called()
-+
-+    def test_client_import_cert_error(
-+        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-+        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-+    ):
-+        mock_get_ca.return_value = self.ca_cert
-+        mock_get_cert_request.return_value = self.cert_request
-+        mock_sign_cert_request.return_value = self.signed_cert
-+        mock_cert_to_pk12.return_value = self.final_cert
-+        def raiser(communicator, node, cert):
-+            if node == self.nodes[1]:
-+                raise NodeCommunicationException("host", "command", "reason")
-+        mock_import_cert.side_effect = raiser
-+        skip_offline_nodes = False
-+
-+        assert_raise_library_error(
-+            lambda: lib._add_device_model_net(
-+                self.lib_env,
-+                self.qnetd_host,
-+                self.cluster_name,
-+                self.nodes,
-+                skip_offline_nodes
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.NODE_COMMUNICATION_ERROR,
-+                {},
-+                report_codes.SKIP_OFFLINE_NODES
-+            )
-+        )
-+
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-+                    {}
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
-+                    {
-+                        "node": self.nodes[0].label
-+                    }
-+                ),
-+                (
-+                    severity.ERROR,
-+                    report_codes.NODE_COMMUNICATION_ERROR,
-+                    {},
-+                    report_codes.SKIP_OFFLINE_NODES
-+                ),
-+            ]
-+        )
-+        mock_get_ca.assert_called_once_with(
-+            "mock_communicator",
-+            self.qnetd_host
-+        )
-+        client_setup_calls = [
-+            mock.call("mock_communicator", self.nodes[0], self.ca_cert),
-+            mock.call("mock_communicator", self.nodes[1], self.ca_cert),
-+        ]
-+        self.assertEqual(
-+            len(client_setup_calls),
-+            len(mock_client_setup.mock_calls)
-+        )
-+        mock_client_setup.assert_has_calls(client_setup_calls)
-+        mock_get_cert_request.assert_called_once_with(
-+            "mock_runner",
-+            self.cluster_name
-+        )
-+        mock_sign_cert_request.assert_called_once_with(
-+            "mock_communicator",
-+            self.qnetd_host,
-+            self.cert_request,
-+            self.cluster_name
-+        )
-+        mock_cert_to_pk12.assert_called_once_with(
-+            "mock_runner",
-+            self.signed_cert
-+        )
-+        client_import_calls = [
-+            mock.call("mock_communicator", self.nodes[0], self.final_cert),
-+            mock.call("mock_communicator", self.nodes[1], self.final_cert),
-+        ]
-+        self.assertEqual(
-+            len(client_import_calls),
-+            len(mock_import_cert.mock_calls)
-+        )
-+        mock_import_cert.assert_has_calls(client_import_calls)
-+
-+    def test_client_import_cert_error_skip_offline(
-+        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-+        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-+    ):
-+        mock_get_ca.return_value = self.ca_cert
-+        mock_get_cert_request.return_value = self.cert_request
-+        mock_sign_cert_request.return_value = self.signed_cert
-+        mock_cert_to_pk12.return_value = self.final_cert
-+        def raiser(communicator, node, cert):
-+            if node == self.nodes[1]:
-+                raise NodeCommunicationException("host", "command", "reason")
-+        mock_import_cert.side_effect = raiser
-+        skip_offline_nodes = True
-+
-+        lib._add_device_model_net(
-+            self.lib_env,
-+            self.qnetd_host,
-+            self.cluster_name,
-+            self.nodes,
-+            skip_offline_nodes
-+        )
-+
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-+                    {}
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
-+                    {
-+                        "node": self.nodes[0].label
-+                    }
-+                ),
-+                (
-+                    severity.WARNING,
-+                    report_codes.NODE_COMMUNICATION_ERROR,
-+                    {}
-+                ),
-+            ]
-+        )
-+        mock_get_ca.assert_called_once_with(
-+            "mock_communicator",
-+            self.qnetd_host
-+        )
-+        client_setup_calls = [
-+            mock.call("mock_communicator", self.nodes[0], self.ca_cert),
-+            mock.call("mock_communicator", self.nodes[1], self.ca_cert),
-+        ]
-+        self.assertEqual(
-+            len(client_setup_calls),
-+            len(mock_client_setup.mock_calls)
-+        )
-+        mock_client_setup.assert_has_calls(client_setup_calls)
-+        mock_get_cert_request.assert_called_once_with(
-+            "mock_runner",
-+            self.cluster_name
-+        )
-+        mock_sign_cert_request.assert_called_once_with(
-+            "mock_communicator",
-+            self.qnetd_host,
-+            self.cert_request,
-+            self.cluster_name
-+        )
-+        mock_cert_to_pk12.assert_called_once_with(
-+            "mock_runner",
-+            self.signed_cert
-+        )
-+        client_import_calls = [
-+            mock.call("mock_communicator", self.nodes[0], self.final_cert),
-+            mock.call("mock_communicator", self.nodes[1], self.final_cert),
-+        ]
-+        self.assertEqual(
-+            len(client_import_calls),
-+            len(mock_import_cert.mock_calls)
-+        )
-+        mock_import_cert.assert_has_calls(client_import_calls)
- 
- 
- @mock.patch.object(LibraryEnvironment, "push_corosync_conf")
- @mock.patch.object(LibraryEnvironment, "get_corosync_conf_data")
-+@mock.patch("pcs.lib.commands.quorum._remove_device_model_net")
-+@mock.patch("pcs.lib.commands.quorum.qdevice_client.remote_client_disable")
-+@mock.patch("pcs.lib.commands.quorum.qdevice_client.remote_client_stop")
- class RemoveDeviceTest(TestCase, CmanMixin):
-     def setUp(self):
-         self.mock_logger = mock.MagicMock(logging.Logger)
-         self.mock_reporter = MockLibraryReportProcessor()
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
--    def test_disabled_on_cman(self, mock_get_corosync, mock_push_corosync):
-+    def test_disabled_on_cman(
-+        self, mock_remote_stop, mock_remote_disable, mock_remove_net,
-+        mock_get_corosync, mock_push_corosync
-+    ):
-         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-         self.assert_disabled_on_cman(lambda: lib.remove_device(lib_env))
-         mock_get_corosync.assert_not_called()
-         mock_push_corosync.assert_not_called()
-+        mock_remove_net.assert_not_called()
-+        mock_remote_disable.assert_not_called()
-+        mock_remote_stop.assert_not_called()
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
-     def test_enabled_on_cman_if_not_live(
--        self, mock_get_corosync, mock_push_corosync
-+        self, mock_remote_stop, mock_remote_disable, mock_remove_net,
-+        mock_get_corosync, mock_push_corosync
-     ):
-         original_conf = open(rc("corosync-3nodes.conf")).read()
-         mock_get_corosync.return_value = original_conf
-@@ -495,9 +1353,17 @@ class RemoveDeviceTest(TestCase, CmanMixin):
-             )
-         )
- 
-+        self.assertEqual(1, mock_get_corosync.call_count)
-+        self.assertEqual(0, mock_push_corosync.call_count)
-+        mock_remove_net.assert_not_called()
-+        mock_remote_disable.assert_not_called()
-+        mock_remote_stop.assert_not_called()
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
--    def test_no_device(self, mock_get_corosync, mock_push_corosync):
-+    def test_no_device(
-+        self, mock_remote_stop, mock_remote_disable, mock_remove_net,
-+        mock_get_corosync, mock_push_corosync
-+    ):
-         original_conf = open(rc("corosync-3nodes.conf")).read()
-         mock_get_corosync.return_value = original_conf
-         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-@@ -511,10 +1377,17 @@ class RemoveDeviceTest(TestCase, CmanMixin):
-             )
-         )
- 
--        mock_push_corosync.assert_not_called()
-+        self.assertEqual(1, mock_get_corosync.call_count)
-+        self.assertEqual(0, mock_push_corosync.call_count)
-+        mock_remove_net.assert_not_called()
-+        mock_remote_disable.assert_not_called()
-+        mock_remote_stop.assert_not_called()
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
--    def test_success(self, mock_get_corosync, mock_push_corosync):
-+    def test_success(
-+        self, mock_remote_stop, mock_remote_disable, mock_remove_net,
-+        mock_get_corosync, mock_push_corosync
-+    ):
-         original_conf = open(rc("corosync-3nodes-qdevice.conf")).read()
-         no_device_conf = open(rc("corosync-3nodes.conf")).read()
-         mock_get_corosync.return_value = original_conf
-@@ -527,7 +1400,213 @@ class RemoveDeviceTest(TestCase, CmanMixin):
-             mock_push_corosync.mock_calls[0][1][0].config.export(),
-             no_device_conf
-         )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.SERVICE_DISABLE_STARTED,
-+                    {
-+                        "service": "corosync-qdevice",
-+                    }
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.SERVICE_STOP_STARTED,
-+                    {
-+                        "service": "corosync-qdevice",
-+                    }
-+                ),
-+            ]
-+        )
-+        self.assertEqual(1, len(mock_remove_net.mock_calls))
-+        self.assertEqual(3, len(mock_remote_disable.mock_calls))
-+        self.assertEqual(3, len(mock_remote_stop.mock_calls))
-+
-+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-+    def test_success_file(
-+        self, mock_remote_stop, mock_remote_disable, mock_remove_net,
-+        mock_get_corosync, mock_push_corosync
-+    ):
-+        original_conf = open(rc("corosync-3nodes-qdevice.conf")).read()
-+        no_device_conf = open(rc("corosync-3nodes.conf")).read()
-+        mock_get_corosync.return_value = original_conf
-+        lib_env = LibraryEnvironment(
-+            self.mock_logger,
-+            self.mock_reporter,
-+            corosync_conf_data=original_conf
-+        )
-+
-+        lib.remove_device(lib_env)
-+
-+        self.assertEqual(1, len(mock_push_corosync.mock_calls))
-+        ac(
-+            mock_push_corosync.mock_calls[0][1][0].config.export(),
-+            no_device_conf
-+        )
-         self.assertEqual([], self.mock_reporter.report_item_list)
-+        mock_remove_net.assert_not_called()
-+        mock_remote_disable.assert_not_called()
-+        mock_remote_stop.assert_not_called()
-+
-+
-+@mock.patch("pcs.lib.commands.quorum.qdevice_net.remote_client_destroy")
-+@mock.patch.object(
-+    LibraryEnvironment,
-+    "node_communicator",
-+    lambda self: "mock_communicator"
-+)
-+class RemoveDeviceNetTest(TestCase):
-+    def setUp(self):
-+        self.mock_logger = mock.MagicMock(logging.Logger)
-+        self.mock_reporter = MockLibraryReportProcessor()
-+        self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-+        self.nodes = NodeAddressesList([
-+            NodeAddresses("node1"),
-+            NodeAddresses("node2"),
-+        ])
-+
-+    def test_success(self, mock_client_destroy):
-+        skip_offline_nodes = False
-+
-+        lib._remove_device_model_net(
-+            self.lib_env,
-+            self.nodes,
-+            skip_offline_nodes
-+        )
-+
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED,
-+                    {}
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
-+                    {
-+                        "node": self.nodes[0].label
-+                    }
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
-+                    {
-+                        "node": self.nodes[1].label
-+                    }
-+                ),
-+            ]
-+        )
-+        client_destroy_calls = [
-+            mock.call("mock_communicator", self.nodes[0]),
-+            mock.call("mock_communicator", self.nodes[1]),
-+        ]
-+        self.assertEqual(
-+            len(client_destroy_calls),
-+            len(mock_client_destroy.mock_calls)
-+        )
-+        mock_client_destroy.assert_has_calls(client_destroy_calls)
-+
-+    def test_error_client_destroy(self, mock_client_destroy):
-+        def raiser(communicator, node):
-+            if node == self.nodes[1]:
-+                raise NodeCommunicationException("host", "command", "reason")
-+        mock_client_destroy.side_effect = raiser
-+        skip_offline_nodes = False
-+
-+        assert_raise_library_error(
-+            lambda: lib._remove_device_model_net(
-+                self.lib_env,
-+                self.nodes,
-+                skip_offline_nodes
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.NODE_COMMUNICATION_ERROR,
-+                {},
-+                report_codes.SKIP_OFFLINE_NODES
-+            )
-+        )
-+
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED,
-+                    {}
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
-+                    {
-+                        "node": self.nodes[0].label
-+                    }
-+                ),
-+                (
-+                    severity.ERROR,
-+                    report_codes.NODE_COMMUNICATION_ERROR,
-+                    {},
-+                    report_codes.SKIP_OFFLINE_NODES
-+                ),
-+            ]
-+        )
-+        client_destroy_calls = [
-+            mock.call("mock_communicator", self.nodes[0]),
-+            mock.call("mock_communicator", self.nodes[1]),
-+        ]
-+        self.assertEqual(
-+            len(client_destroy_calls),
-+            len(mock_client_destroy.mock_calls)
-+        )
-+        mock_client_destroy.assert_has_calls(client_destroy_calls)
-+
-+    def test_error_client_destroy_skip_offline(self, mock_client_destroy):
-+        def raiser(communicator, node):
-+            if node == self.nodes[1]:
-+                raise NodeCommunicationException("host", "command", "reason")
-+        mock_client_destroy.side_effect = raiser
-+        skip_offline_nodes = True
-+
-+        lib._remove_device_model_net(
-+            self.lib_env,
-+            self.nodes,
-+            skip_offline_nodes
-+        )
-+
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED,
-+                    {}
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
-+                    {
-+                        "node": self.nodes[0].label
-+                    }
-+                ),
-+                (
-+                    severity.WARNING,
-+                    report_codes.NODE_COMMUNICATION_ERROR,
-+                    {}
-+                ),
-+            ]
-+        )
-+        client_destroy_calls = [
-+            mock.call("mock_communicator", self.nodes[0]),
-+            mock.call("mock_communicator", self.nodes[1]),
-+        ]
-+        self.assertEqual(
-+            len(client_destroy_calls),
-+            len(mock_client_destroy.mock_calls)
-+        )
-+        mock_client_destroy.assert_has_calls(client_destroy_calls)
- 
- 
- @mock.patch.object(LibraryEnvironment, "push_corosync_conf")
-diff --git a/pcs/test/test_lib_corosync_config_facade.py b/pcs/test/test_lib_corosync_config_facade.py
-index 5700016..4a35fd9 100644
---- a/pcs/test/test_lib_corosync_config_facade.py
-+++ b/pcs/test/test_lib_corosync_config_facade.py
-@@ -31,6 +31,7 @@ class FromStringTest(TestCase):
-         self.assertEqual(facade.__class__, lib.ConfigFacade)
-         self.assertEqual(facade.config.export(), config)
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_parse_error_missing_brace(self):
-         config = "section {"
-@@ -55,6 +56,43 @@ class FromStringTest(TestCase):
-         )
- 
- 
-+class GetClusterNametest(TestCase):
-+    def test_no_name(self):
-+        config = ""
-+        facade = lib.ConfigFacade.from_string(config)
-+        self.assertEqual("", facade.get_cluster_name())
-+        self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-+
-+    def test_empty_name(self):
-+        config = "totem {\n cluster_name:\n}\n"
-+        facade = lib.ConfigFacade.from_string(config)
-+        self.assertEqual("", facade.get_cluster_name())
-+        self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-+
-+    def test_one_name(self):
-+        config = "totem {\n cluster_name: test\n}\n"
-+        facade = lib.ConfigFacade.from_string(config)
-+        self.assertEqual("test", facade.get_cluster_name())
-+        self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-+
-+    def test_more_names(self):
-+        config = "totem {\n cluster_name: test\n cluster_name: TEST\n}\n"
-+        facade = lib.ConfigFacade.from_string(config)
-+        self.assertEqual("TEST", facade.get_cluster_name())
-+        self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-+
-+    def test_more_sections(self):
-+        config = "totem{\ncluster_name:test\n}\ntotem{\ncluster_name:TEST\n}\n"
-+        facade = lib.ConfigFacade.from_string(config)
-+        self.assertEqual("TEST", facade.get_cluster_name())
-+        self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-+
-+
- class GetNodesTest(TestCase):
-     def assert_equal_nodelist(self, expected_nodes, real_nodelist):
-         real_nodes = [
-@@ -69,6 +107,7 @@ class GetNodesTest(TestCase):
-         nodes = facade.get_nodes()
-         self.assertEqual(0, len(nodes))
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_empty_nodelist(self):
-         config = """\
-@@ -79,6 +118,7 @@ nodelist {
-         nodes = facade.get_nodes()
-         self.assertEqual(0, len(nodes))
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_one_nodelist(self):
-         config = """\
-@@ -107,6 +147,7 @@ nodelist {
-             nodes
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_more_nodelists(self):
-         config = """\
-@@ -137,6 +178,7 @@ nodelist {
-             nodes
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
- 
- class GetQuorumOptionsTest(TestCase):
-@@ -146,6 +188,7 @@ class GetQuorumOptionsTest(TestCase):
-         options = facade.get_quorum_options()
-         self.assertEqual({}, options)
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_empty_quorum(self):
-         config = """\
-@@ -156,6 +199,7 @@ quorum {
-         options = facade.get_quorum_options()
-         self.assertEqual({}, options)
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_no_options(self):
-         config = """\
-@@ -167,6 +211,7 @@ quorum {
-         options = facade.get_quorum_options()
-         self.assertEqual({}, options)
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_some_options(self):
-         config = """\
-@@ -191,6 +236,7 @@ quorum {
-             options
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_option_repeated(self):
-         config = """\
-@@ -208,6 +254,7 @@ quorum {
-             options
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_quorum_repeated(self):
-         config = """\
-@@ -231,6 +278,7 @@ quorum {
-             options
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
- 
- class SetQuorumOptionsTest(TestCase):
-@@ -247,6 +295,7 @@ class SetQuorumOptionsTest(TestCase):
-         facade = lib.ConfigFacade.from_string(config)
-         facade.set_quorum_options(reporter, {"wait_for_all": "0"})
-         self.assertTrue(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         self.assertEqual(
-             """\
- quorum {
-@@ -263,6 +312,7 @@ quorum {
-         facade = lib.ConfigFacade.from_string(config)
-         facade.set_quorum_options(reporter, {"wait_for_all": ""})
-         self.assertTrue(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         self.assertEqual("", facade.config.export())
-         self.assertEqual([], reporter.report_item_list)
- 
-@@ -279,6 +329,7 @@ quorum {
-         facade.set_quorum_options(reporter, expected_options)
- 
-         self.assertTrue(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         test_facade = lib.ConfigFacade.from_string(facade.config.export())
-         self.assertEqual(
-             expected_options,
-@@ -309,6 +360,7 @@ quorum {
-         )
- 
-         self.assertTrue(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         test_facade = lib.ConfigFacade.from_string(facade.config.export())
-         self.assertEqual(
-             {
-@@ -329,6 +381,7 @@ quorum {
-         facade.set_quorum_options(reporter, {"auto_tie_breaker": "1"})
- 
-         self.assertTrue(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         self.assertEqual(
-             "1",
-             facade.get_quorum_options().get("auto_tie_breaker", None)
-@@ -347,6 +400,7 @@ quorum {
-         facade.set_quorum_options(reporter, {"auto_tie_breaker": "0"})
- 
-         self.assertTrue(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         self.assertEqual(
-             "0",
-             facade.get_quorum_options().get("auto_tie_breaker", None)
-@@ -365,6 +419,7 @@ quorum {
-         facade.set_quorum_options(reporter, {"auto_tie_breaker": "1"})
- 
-         self.assertTrue(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         self.assertEqual(
-             "1",
-             facade.get_quorum_options().get("auto_tie_breaker", None)
-@@ -383,6 +438,7 @@ quorum {
-         facade.set_quorum_options(reporter, {"auto_tie_breaker": "0"})
- 
-         self.assertTrue(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         self.assertEqual(
-             "0",
-             facade.get_quorum_options().get("auto_tie_breaker", None)
-@@ -421,6 +477,7 @@ quorum {
-             )
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         self.assertEqual(
-             lib.ConfigFacade.from_string(config).get_quorum_options(),
-             facade.get_quorum_options()
-@@ -476,6 +533,7 @@ quorum {
-             )
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         self.assertEqual(
-             lib.ConfigFacade.from_string(config).get_quorum_options(),
-             facade.get_quorum_options()
-@@ -522,11 +580,60 @@ quorum {
-             )
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-+        self.assertEqual(
-+            lib.ConfigFacade.from_string(config).get_quorum_options(),
-+            facade.get_quorum_options()
-+        )
-+
-+    def test_qdevice_incompatible_options(self):
-+        config = open(rc("corosync-3nodes-qdevice.conf")).read()
-+        reporter = MockLibraryReportProcessor()
-+        facade = lib.ConfigFacade.from_string(config)
-+        options = {
-+            "auto_tie_breaker": "1",
-+            "last_man_standing": "1",
-+            "last_man_standing_window": "250",
-+        }
-+        assert_raise_library_error(
-+            lambda: facade.set_quorum_options(reporter, options),
-+            (
-+                severity.ERROR,
-+                report_codes.COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE,
-+                {
-+                    "options_names": [
-+                        "auto_tie_breaker",
-+                        "last_man_standing",
-+                        "last_man_standing_window",
-+                    ],
-+                }
-+            )
-+        )
-+        self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         self.assertEqual(
-             lib.ConfigFacade.from_string(config).get_quorum_options(),
-             facade.get_quorum_options()
-         )
- 
-+    def test_qdevice_compatible_options(self):
-+        config = open(rc("corosync-3nodes-qdevice.conf")).read()
-+        reporter = MockLibraryReportProcessor()
-+        facade = lib.ConfigFacade.from_string(config)
-+        expected_options = {
-+            "wait_for_all": "1",
-+        }
-+        facade.set_quorum_options(reporter, expected_options)
-+
-+        self.assertTrue(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-+        test_facade = lib.ConfigFacade.from_string(facade.config.export())
-+        self.assertEqual(
-+            expected_options,
-+            test_facade.get_quorum_options()
-+        )
-+        self.assertEqual([], reporter.report_item_list)
-+
- 
- class HasQuorumDeviceTest(TestCase):
-     def test_empty_config(self):
-@@ -534,12 +641,14 @@ class HasQuorumDeviceTest(TestCase):
-         facade = lib.ConfigFacade.from_string(config)
-         self.assertFalse(facade.has_quorum_device())
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_no_device(self):
-         config = open(rc("corosync.conf")).read()
-         facade = lib.ConfigFacade.from_string(config)
-         self.assertFalse(facade.has_quorum_device())
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_empty_device(self):
-         config = """\
-@@ -551,6 +660,7 @@ quorum {
-         facade = lib.ConfigFacade.from_string(config)
-         self.assertFalse(facade.has_quorum_device())
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_device_set(self):
-         config = """\
-@@ -563,6 +673,7 @@ quorum {
-         facade = lib.ConfigFacade.from_string(config)
-         self.assertTrue(facade.has_quorum_device())
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_no_model(self):
-         config = """\
-@@ -578,6 +689,7 @@ quorum {
-         facade = lib.ConfigFacade.from_string(config)
-         self.assertFalse(facade.has_quorum_device())
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
- 
- class GetQuorumDeviceSettingsTest(TestCase):
-@@ -589,6 +701,7 @@ class GetQuorumDeviceSettingsTest(TestCase):
-             facade.get_quorum_device_settings()
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_no_device(self):
-         config = open(rc("corosync.conf")).read()
-@@ -598,6 +711,7 @@ class GetQuorumDeviceSettingsTest(TestCase):
-             facade.get_quorum_device_settings()
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_empty_device(self):
-         config = """\
-@@ -612,6 +726,7 @@ quorum {
-             facade.get_quorum_device_settings()
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_no_model(self):
-         config = """\
-@@ -630,6 +745,7 @@ quorum {
-             facade.get_quorum_device_settings()
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_configured_properly(self):
-         config = """\
-@@ -649,6 +765,7 @@ quorum {
-             facade.get_quorum_device_settings()
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_more_devices_one_quorum(self):
-         config = """\
-@@ -681,6 +798,7 @@ quorum {
-             facade.get_quorum_device_settings()
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_more_devices_more_quorum(self):
-         config = """\
-@@ -715,6 +833,7 @@ quorum {
-             facade.get_quorum_device_settings()
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
- 
- class AddQuorumDeviceTest(TestCase):
-@@ -754,9 +873,10 @@ quorum {
-             )
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         ac(config, facade.config.export())
- 
--    def test_success_net_minimal(self):
-+    def test_success_net_minimal_ffsplit(self):
-         config = open(rc("corosync-3nodes.conf")).read()
-         reporter = MockLibraryReportProcessor()
-         facade = lib.ConfigFacade.from_string(config)
-@@ -774,6 +894,7 @@ quorum {
- 
-     device {
-         model: net
-+        votes: 1
- 
-         net {
-             algorithm: ffsplit
-@@ -784,55 +905,10 @@ quorum {
-             facade.config.export()
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         self.assertEqual([], reporter.report_item_list)
- 
--    def test_success_net_full(self):
--        config = open(rc("corosync-3nodes.conf")).read()
--        reporter = MockLibraryReportProcessor()
--        facade = lib.ConfigFacade.from_string(config)
--        facade.add_quorum_device(
--            reporter,
--            "net",
--            {
--                "host": "127.0.0.1",
--                "port": "4433",
--                "algorithm": "ffsplit",
--                "connect_timeout": "12345",
--                "force_ip_version": "4",
--                "tie_breaker": "lowest",
--            },
--            {
--                "timeout": "23456",
--                "sync_timeout": "34567"
--            }
--        )
--        ac(
--            config.replace(
--                "    provider: corosync_votequorum",
--                """\
--    provider: corosync_votequorum
--
--    device {
--        sync_timeout: 34567
--        timeout: 23456
--        model: net
--
--        net {
--            algorithm: ffsplit
--            connect_timeout: 12345
--            force_ip_version: 4
--            host: 127.0.0.1
--            port: 4433
--            tie_breaker: lowest
--        }
--    }"""
--            ),
--            facade.config.export()
--        )
--        self.assertFalse(facade.need_stopped_cluster)
--        self.assertEqual([], reporter.report_item_list)
--
--    def test_succes_net_lms_3node(self):
-+    def test_success_net_minimal_lms(self):
-         config = open(rc("corosync-3nodes.conf")).read()
-         reporter = MockLibraryReportProcessor()
-         facade = lib.ConfigFacade.from_string(config)
-@@ -860,16 +936,18 @@ quorum {
-             facade.config.export()
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         self.assertEqual([], reporter.report_item_list)
- 
--    def test_succes_net_2nodelms_3node(self):
-+    def test_success_remove_nodes_votes(self):
-         config = open(rc("corosync-3nodes.conf")).read()
-+        config_votes = config.replace("node {", "node {\nquorum_votes: 2")
-         reporter = MockLibraryReportProcessor()
--        facade = lib.ConfigFacade.from_string(config)
-+        facade = lib.ConfigFacade.from_string(config_votes)
-         facade.add_quorum_device(
-             reporter,
-             "net",
--            {"host": "127.0.0.1", "algorithm": "2nodelms"},
-+            {"host": "127.0.0.1", "algorithm": "lms"},
-             {}
-         )
-         ac(
-@@ -890,47 +968,28 @@ quorum {
-             facade.config.export()
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         self.assertEqual([], reporter.report_item_list)
- 
--    def test_succes_net_lms_2node(self):
--        config = open(rc("corosync.conf")).read()
--        reporter = MockLibraryReportProcessor()
--        facade = lib.ConfigFacade.from_string(config)
--        facade.add_quorum_device(
--            reporter,
--            "net",
--            {"host": "127.0.0.1", "algorithm": "lms"},
--            {}
--        )
--        ac(
--            config.replace(
--                "    provider: corosync_votequorum",
--                """\
--    provider: corosync_votequorum
--
--    device {
--        model: net
--
--        net {
--            algorithm: 2nodelms
--            host: 127.0.0.1
--        }
--    }"""
--            ).replace("    two_node: 1\n", ""),
--            facade.config.export()
--        )
--        self.assertFalse(facade.need_stopped_cluster)
--        self.assertEqual([], reporter.report_item_list)
--
--    def test_succes_net_2nodelms_2node(self):
--        config = open(rc("corosync.conf")).read()
-+    def test_success_net_full(self):
-+        config = open(rc("corosync-3nodes.conf")).read()
-         reporter = MockLibraryReportProcessor()
-         facade = lib.ConfigFacade.from_string(config)
-         facade.add_quorum_device(
-             reporter,
-             "net",
--            {"host": "127.0.0.1", "algorithm": "2nodelms"},
--            {}
-+            {
-+                "host": "127.0.0.1",
-+                "port": "4433",
-+                "algorithm": "ffsplit",
-+                "connect_timeout": "12345",
-+                "force_ip_version": "4",
-+                "tie_breaker": "lowest",
-+            },
-+            {
-+                "timeout": "23456",
-+                "sync_timeout": "34567"
-+            }
-         )
-         ac(
-             config.replace(
-@@ -939,17 +998,25 @@ quorum {
-     provider: corosync_votequorum
- 
-     device {
-+        sync_timeout: 34567
-+        timeout: 23456
-         model: net
-+        votes: 1
- 
-         net {
--            algorithm: 2nodelms
-+            algorithm: ffsplit
-+            connect_timeout: 12345
-+            force_ip_version: 4
-             host: 127.0.0.1
-+            port: 4433
-+            tie_breaker: lowest
-         }
-     }"""
--            ).replace("    two_node: 1\n", ""),
-+            ),
-             facade.config.export()
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         self.assertEqual([], reporter.report_item_list)
- 
-     def test_remove_conflicting_options(self):
-@@ -982,6 +1049,7 @@ quorum {
- 
-     device {
-         model: net
-+        votes: 1
- 
-         net {
-             algorithm: ffsplit
-@@ -994,6 +1062,7 @@ quorum {
-             facade.config.export()
-         )
-         self.assertTrue(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         self.assertEqual([], reporter.report_item_list)
- 
-     def test_remove_old_configuration(self):
-@@ -1030,6 +1099,7 @@ quorum {
- 
-     device {
-         model: net
-+        votes: 1
- 
-         net {
-             algorithm: ffsplit
-@@ -1042,6 +1112,7 @@ quorum {
-             facade.config.export()
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         self.assertEqual([], reporter.report_item_list)
- 
-     def test_bad_model(self):
-@@ -1062,6 +1133,7 @@ quorum {
-             )
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         ac(config, facade.config.export())
- 
-     def test_bad_model_forced(self):
-@@ -1082,6 +1154,7 @@ quorum {
-             facade.config.export()
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         assert_report_item_list_equal(
-             reporter.report_item_list,
-             [
-@@ -1115,6 +1188,7 @@ quorum {
-             )
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         ac(config, facade.config.export())
- 
-     def test_bad_options_net(self):
-@@ -1147,7 +1221,7 @@ quorum {
-                 {
-                     "option_name": "algorithm",
-                     "option_value": "bad algorithm",
--                    "allowed_values": ("2nodelms", "ffsplit", "lms"),
-+                    "allowed_values": ("ffsplit", "lms"),
-                 },
-                 report_codes.FORCE_OPTIONS
-             ),
-@@ -1254,6 +1328,7 @@ quorum {
-             )
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         ac(config, facade.config.export())
- 
-     def test_mandatory_options_missing_net_forced(self):
-@@ -1277,6 +1352,7 @@ quorum {
-             )
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         ac(config, facade.config.export())
- 
-     def test_mandatory_options_empty_net_forced(self):
-@@ -1300,6 +1376,7 @@ quorum {
-             )
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         ac(config, facade.config.export())
- 
-     def test_bad_options_net_forced(self):
-@@ -1326,6 +1403,7 @@ quorum {
-             force_options=True
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         ac(
-             config.replace(
-                 "    provider: corosync_votequorum",
-@@ -1360,7 +1438,7 @@ quorum {
-                     {
-                         "option_name": "algorithm",
-                         "option_value": "bad algorithm",
--                        "allowed_values": ("2nodelms", "ffsplit", "lms"),
-+                        "allowed_values": ("ffsplit", "lms"),
-                     }
-                 ),
-                 (
-@@ -1445,9 +1523,52 @@ quorum {
-             ]
-         )
- 
-+    def test_bad_options_net_disallowed_algorithms(self):
-+        config = open(rc("corosync-3nodes.conf")).read()
-+        reporter = MockLibraryReportProcessor()
-+        facade = lib.ConfigFacade.from_string(config)
-+        assert_raise_library_error(
-+            lambda: facade.add_quorum_device(
-+                reporter,
-+                "net",
-+                {"host": "127.0.0.1", "algorithm": "test"},
-+                {}
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.INVALID_OPTION_VALUE,
-+                {
-+                    "option_name": "algorithm",
-+                    "option_value": "test",
-+                    "allowed_values": ("ffsplit", "lms"),
-+                },
-+                report_codes.FORCE_OPTIONS
-+            )
-+        )
-+
-+        assert_raise_library_error(
-+            lambda: facade.add_quorum_device(
-+                reporter,
-+                "net",
-+                {"host": "127.0.0.1", "algorithm": "2nodelms"},
-+                {}
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.INVALID_OPTION_VALUE,
-+                {
-+                    "option_name": "algorithm",
-+                    "option_value": "2nodelms",
-+                    "allowed_values": ("ffsplit", "lms"),
-+                },
-+                report_codes.FORCE_OPTIONS
-+            )
-+        )
-+
-+
- class UpdateQuorumDeviceTest(TestCase):
--    def fixture_add_device(self, config):
--        return re.sub(
-+    def fixture_add_device(self, config, votes=None):
-+        with_device = re.sub(
-             re.compile(r"quorum {[^}]*}", re.MULTILINE | re.DOTALL),
-             """\
- quorum {
-@@ -1465,6 +1586,12 @@ quorum {
- }""",
-             config
-         )
-+        if votes:
-+            with_device = with_device.replace(
-+                "model: net",
-+                "model: net\n        votes: {0}".format(votes)
-+            )
-+        return with_device
- 
-     def test_not_existing(self):
-         config = open(rc("corosync.conf")).read()
-@@ -1483,11 +1610,13 @@ quorum {
-             )
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         ac(config, facade.config.export())
- 
-     def test_success_model_options_net(self):
-         config = self.fixture_add_device(
--            open(rc("corosync-3nodes.conf")).read()
-+            open(rc("corosync-3nodes.conf")).read(),
-+            votes="1"
-         )
-         reporter = MockLibraryReportProcessor()
-         facade = lib.ConfigFacade.from_string(config)
-@@ -1496,7 +1625,8 @@ quorum {
-             {"host": "127.0.0.2", "port": "", "algorithm": "ffsplit"},
-             {}
-         )
--        self.assertTrue(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_stopped_cluster)
-+        self.assertTrue(facade.need_qdevice_reload)
-         ac(
-             config.replace(
-                 "host: 127.0.0.1\n            port: 4433",
-@@ -1506,27 +1636,6 @@ quorum {
-         )
-         self.assertEqual([], reporter.report_item_list)
- 
--    def test_success_net_3node_2nodelms(self):
--        config = self.fixture_add_device(
--            open(rc("corosync-3nodes.conf")).read()
--        )
--        reporter = MockLibraryReportProcessor()
--        facade = lib.ConfigFacade.from_string(config)
--        facade.update_quorum_device(
--            reporter,
--            {"algorithm": "2nodelms"},
--            {}
--        )
--        self.assertTrue(facade.need_stopped_cluster)
--        ac(
--            config.replace(
--                "port: 4433",
--                "port: 4433\n            algorithm: lms"
--            ),
--            facade.config.export()
--        )
--        self.assertEqual([], reporter.report_item_list)
--
-     def test_success_net_doesnt_require_host_and_algorithm(self):
-         config = self.fixture_add_device(
-             open(rc("corosync-3nodes.conf")).read()
-@@ -1534,7 +1643,8 @@ quorum {
-         reporter = MockLibraryReportProcessor()
-         facade = lib.ConfigFacade.from_string(config)
-         facade.update_quorum_device(reporter, {"port": "4444"}, {})
--        self.assertTrue(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_stopped_cluster)
-+        self.assertTrue(facade.need_qdevice_reload)
-         ac(
-             config.replace(
-                 "host: 127.0.0.1\n            port: 4433",
-@@ -1572,12 +1682,13 @@ quorum {
-                 {
-                     "option_name": "algorithm",
-                     "option_value": "",
--                    "allowed_values": ("2nodelms", "ffsplit", "lms")
-+                    "allowed_values": ("ffsplit", "lms")
-                 },
-                 report_codes.FORCE_OPTIONS
-             )
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         ac(config, facade.config.export())
- 
-     def test_net_required_options_cannot_be_removed_forced(self):
-@@ -1605,6 +1716,7 @@ quorum {
-             )
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         ac(config, facade.config.export())
- 
-     def test_bad_net_options(self):
-@@ -1632,7 +1744,7 @@ quorum {
-                 {
-                     "option_name": "algorithm",
-                     "option_value": "bad algorithm",
--                    "allowed_values": ("2nodelms", "ffsplit", "lms"),
-+                    "allowed_values": ("ffsplit", "lms"),
-                 },
-                 report_codes.FORCE_OPTIONS
-             ),
-@@ -1695,6 +1807,7 @@ quorum {
-             ),
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         ac(config, facade.config.export())
- 
-     def test_bad_net_options_forced(self):
-@@ -1716,7 +1829,8 @@ quorum {
-             {},
-             force_options=True
-         )
--        self.assertTrue(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_stopped_cluster)
-+        self.assertTrue(facade.need_qdevice_reload)
-         ac(
-             config.replace(
-                 "            host: 127.0.0.1\n            port: 4433",
-@@ -1740,7 +1854,7 @@ quorum {
-                     {
-                         "option_name": "algorithm",
-                         "option_value": "bad algorithm",
--                        "allowed_values": ("2nodelms", "ffsplit", "lms"),
-+                        "allowed_values": ("ffsplit", "lms"),
-                     },
-                 ),
-                 (
-@@ -1809,7 +1923,8 @@ quorum {
-             {},
-             {"timeout": "", "sync_timeout": "23456"}
-         )
--        self.assertTrue(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_stopped_cluster)
-+        self.assertTrue(facade.need_qdevice_reload)
-         ac(
-             config.replace(
-                 "timeout: 12345\n        model: net",
-@@ -1830,7 +1945,8 @@ quorum {
-             {"port": "4444"},
-             {"timeout": "23456"}
-         )
--        self.assertTrue(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_stopped_cluster)
-+        self.assertTrue(facade.need_qdevice_reload)
-         ac(
-             config
-                 .replace("port: 4433", "port: 4444")
-@@ -1898,6 +2014,7 @@ quorum {
-             )
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         ac(config, facade.config.export())
- 
-     def test_bad_generic_options_cannot_force_model(self):
-@@ -1924,6 +2041,7 @@ quorum {
-             )
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         ac(config, facade.config.export())
- 
-     def test_bad_generic_options_forced(self):
-@@ -1942,7 +2060,8 @@ quorum {
-             },
-             force_options=True
-         )
--        self.assertTrue(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_stopped_cluster)
-+        self.assertTrue(facade.need_qdevice_reload)
-         ac(
-             config.replace(
-                 "        timeout: 12345\n        model: net",
-@@ -2001,6 +2120,7 @@ class RemoveQuorumDeviceTest(TestCase):
-             )
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_no_device(self):
-         config = open(rc("corosync-3nodes.conf")).read()
-@@ -2014,6 +2134,7 @@ class RemoveQuorumDeviceTest(TestCase):
-             )
-         )
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
- 
-     def test_remove_all_devices(self):
-         config_no_devices = open(rc("corosync-3nodes.conf")).read()
-@@ -2054,6 +2175,7 @@ quorum {
-         facade = lib.ConfigFacade.from_string(config)
-         facade.remove_quorum_device()
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         ac(
-             config_no_devices,
-             facade.config.export()
-@@ -2082,6 +2204,7 @@ quorum {
-         facade = lib.ConfigFacade.from_string(config)
-         facade.remove_quorum_device()
-         self.assertFalse(facade.need_stopped_cluster)
-+        self.assertFalse(facade.need_qdevice_reload)
-         ac(
-             config_no_devices,
-             facade.config.export()
-diff --git a/pcs/test/test_lib_corosync_live.py b/pcs/test/test_lib_corosync_live.py
-index 4878136..96fe235 100644
---- a/pcs/test/test_lib_corosync_live.py
-+++ b/pcs/test/test_lib_corosync_live.py
-@@ -47,6 +47,22 @@ class GetLocalCorosyncConfTest(TestCase):
-         )
- 
- 
-+class SetRemoteCorosyncConfTest(TestCase):
-+    def test_success(self):
-+        config = "test {\nconfig: data\n}\n"
-+        node = NodeAddresses("node1")
-+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-+        mock_communicator.call_node.return_value = "dummy return"
-+
-+        lib.set_remote_corosync_conf(mock_communicator, node, config)
-+
-+        mock_communicator.call_node.assert_called_once_with(
-+            node,
-+            "remote/set_corosync_conf",
-+            "corosync_conf=test+%7B%0Aconfig%3A+data%0A%7D%0A"
-+        )
-+
-+
- class ReloadConfigTest(TestCase):
-     def path(self, name):
-         return os.path.join(settings.corosync_binaries, name)
-@@ -85,17 +101,43 @@ class ReloadConfigTest(TestCase):
-         ])
- 
- 
--class SetRemoteCorosyncConfTest(TestCase):
-+class GetQuorumStatusTextTest(TestCase):
-+    def setUp(self):
-+        self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
-+        self.quorum_tool = "/usr/sbin/corosync-quorumtool"
-+
-     def test_success(self):
--        config = "test {\nconfig: data\n}\n"
--        node = NodeAddresses("node1")
--        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
--        mock_communicator.call_node.return_value = "dummy return"
-+        self.mock_runner.run.return_value = ("status info", 0)
-+        self.assertEqual(
-+            "status info",
-+            lib.get_quorum_status_text(self.mock_runner)
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            self.quorum_tool, "-p"
-+        ])
- 
--        lib.set_remote_corosync_conf(mock_communicator, node, config)
-+    def test_success_with_retval_1(self):
-+        self.mock_runner.run.return_value = ("status info", 1)
-+        self.assertEqual(
-+            "status info",
-+            lib.get_quorum_status_text(self.mock_runner)
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            self.quorum_tool, "-p"
-+        ])
- 
--        mock_communicator.call_node.assert_called_once_with(
--            node,
--            "remote/set_corosync_conf",
--            "corosync_conf=test+%7B%0Aconfig%3A+data%0A%7D%0A"
-+    def test_error(self):
-+        self.mock_runner.run.return_value = ("status error", 2)
-+        assert_raise_library_error(
-+            lambda: lib.get_quorum_status_text(self.mock_runner),
-+            (
-+                severity.ERROR,
-+                report_codes.COROSYNC_QUORUM_GET_STATUS_ERROR,
-+                {
-+                    "reason": "status error",
-+                }
-+            )
-         )
-+        self.mock_runner.run.assert_called_once_with([
-+            self.quorum_tool, "-p"
-+        ])
-diff --git a/pcs/test/test_lib_corosync_qdevice_client.py b/pcs/test/test_lib_corosync_qdevice_client.py
-new file mode 100644
-index 0000000..e0332f1
---- /dev/null
-+++ b/pcs/test/test_lib_corosync_qdevice_client.py
-@@ -0,0 +1,60 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from unittest import TestCase
-+
-+from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.assertions import assert_raise_library_error
-+
-+from pcs.common import report_codes
-+from pcs.lib.errors import ReportItemSeverity as severity
-+from pcs.lib.external import CommandRunner
-+
-+import pcs.lib.corosync.qdevice_client as lib
-+
-+
-+class GetStatusTextTest(TestCase):
-+    def setUp(self):
-+        self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
-+        self.qdevice_tool = "/usr/sbin/corosync-qdevice-tool"
-+
-+    def test_success(self):
-+        self.mock_runner.run.return_value = ("status info", 0)
-+        self.assertEqual(
-+            "status info",
-+            lib.get_status_text(self.mock_runner)
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            self.qdevice_tool, "-s"
-+        ])
-+
-+    def test_success_verbose(self):
-+        self.mock_runner.run.return_value = ("status info", 0)
-+        self.assertEqual(
-+            "status info",
-+            lib.get_status_text(self.mock_runner, True)
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            self.qdevice_tool, "-s", "-v"
-+        ])
-+
-+    def test_error(self):
-+        self.mock_runner.run.return_value = ("status error", 1)
-+        assert_raise_library_error(
-+            lambda: lib.get_status_text(self.mock_runner),
-+            (
-+                severity.ERROR,
-+                report_codes.COROSYNC_QUORUM_GET_STATUS_ERROR,
-+                {
-+                    "reason": "status error",
-+                }
-+            )
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            self.qdevice_tool, "-s"
-+        ])
-+
-diff --git a/pcs/test/test_lib_corosync_qdevice_net.py b/pcs/test/test_lib_corosync_qdevice_net.py
-index 38bc9c8..3d473f7 100644
---- a/pcs/test/test_lib_corosync_qdevice_net.py
-+++ b/pcs/test/test_lib_corosync_qdevice_net.py
-@@ -7,18 +7,40 @@ from __future__ import (
- 
- from unittest import TestCase
- 
-+import base64
-+import os.path
-+
- from pcs.test.tools.pcs_mock import mock
- from pcs.test.tools.assertions import assert_raise_library_error
-+from pcs.test.tools.misc import get_test_resource
- 
-+from pcs import settings
- from pcs.common import report_codes
--from pcs.lib.errors import ReportItemSeverity as severity
--from pcs.lib.external import CommandRunner
-+from pcs.lib import reports
-+from pcs.lib.errors import ReportItemSeverity as severity, LibraryError
-+from pcs.lib.external import (
-+    CommandRunner,
-+    NodeCommunicator,
-+    NodeCommunicationException,
-+)
- 
- import pcs.lib.corosync.qdevice_net as lib
- 
- 
--_qnetd_cert_dir = "/etc/corosync/qdevice/net/qnetd/nssdb"
--_qnetd_tool = "/usr/sbin/corosync-qnetd-certutil"
-+_qnetd_cert_dir = "/etc/corosync/qnetd/nssdb"
-+_qnetd_cert_tool = "/usr/bin/corosync-qnetd-certutil"
-+_qnetd_tool = "/usr/bin/corosync-qnetd-tool"
-+_client_cert_dir = "/etc/corosync/qdevice/net/nssdb"
-+_client_cert_tool = "/usr/sbin/corosync-qdevice-net-certutil"
-+
-+def cert_to_url(cert):
-+    return base64.b64encode(cert).decode("utf-8").replace("=", "%3D")
-+
-+class CertificateTestCase(TestCase):
-+    def setUp(self):
-+        self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
-+        self.mock_tmpfile = mock.MagicMock()
-+        self.mock_tmpfile.name = "tmpfile path"
- 
- @mock.patch("pcs.lib.corosync.qdevice_net.external.is_dir_nonempty")
- class QdeviceSetupTest(TestCase):
-@@ -32,7 +54,7 @@ class QdeviceSetupTest(TestCase):
-         lib.qdevice_setup(self.mock_runner)
- 
-         mock_is_dir_nonempty.assert_called_once_with(_qnetd_cert_dir)
--        self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-i"])
-+        self.mock_runner.run.assert_called_once_with([_qnetd_cert_tool, "-i"])
- 
-     def test_cert_db_exists(self, mock_is_dir_nonempty):
-         mock_is_dir_nonempty.return_value = True
-@@ -47,7 +69,7 @@ class QdeviceSetupTest(TestCase):
-         )
- 
-         mock_is_dir_nonempty.assert_called_once_with(_qnetd_cert_dir)
--        self.mock_runner.assert_not_called()
-+        self.mock_runner.run.assert_not_called()
- 
-     def test_init_tool_fail(self, mock_is_dir_nonempty):
-         mock_is_dir_nonempty.return_value = False
-@@ -66,16 +88,24 @@ class QdeviceSetupTest(TestCase):
-         )
- 
-         mock_is_dir_nonempty.assert_called_once_with(_qnetd_cert_dir)
--        self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-i"])
-+        self.mock_runner.run.assert_called_once_with([_qnetd_cert_tool, "-i"])
- 
- 
- @mock.patch("pcs.lib.corosync.qdevice_net.shutil.rmtree")
-+@mock.patch("pcs.lib.corosync.qdevice_net.qdevice_initialized")
- class QdeviceDestroyTest(TestCase):
--    def test_success(self, mock_rmtree):
-+    def test_success(self, mock_initialized, mock_rmtree):
-+        mock_initialized.return_value = True
-         lib.qdevice_destroy()
-         mock_rmtree.assert_called_once_with(_qnetd_cert_dir)
- 
--    def test_cert_dir_rm_error(self, mock_rmtree):
-+    def test_not_initialized(self, mock_initialized, mock_rmtree):
-+        mock_initialized.return_value = False
-+        lib.qdevice_destroy()
-+        mock_rmtree.assert_not_called()
-+
-+    def test_cert_dir_rm_error(self, mock_initialized, mock_rmtree):
-+        mock_initialized.return_value = True
-         mock_rmtree.side_effect = EnvironmentError("test errno", "test message")
-         assert_raise_library_error(
-             lib.qdevice_destroy,
-@@ -89,3 +119,920 @@ class QdeviceDestroyTest(TestCase):
-             )
-         )
-         mock_rmtree.assert_called_once_with(_qnetd_cert_dir)
-+
-+
-+class QdeviceStatusGenericTest(TestCase):
-+    def setUp(self):
-+        self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
-+
-+    def test_success(self):
-+        self.mock_runner.run.return_value = ("status info", 0)
-+        self.assertEqual(
-+            "status info",
-+            lib.qdevice_status_generic_text(self.mock_runner)
-+        )
-+        self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-s"])
-+
-+    def test_success_verbose(self):
-+        self.mock_runner.run.return_value = ("status info", 0)
-+        self.assertEqual(
-+            "status info",
-+            lib.qdevice_status_generic_text(self.mock_runner, True)
-+        )
-+        self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-s", "-v"])
-+
-+    def test_error(self):
-+        self.mock_runner.run.return_value = ("status error", 1)
-+        assert_raise_library_error(
-+            lambda: lib.qdevice_status_generic_text(self.mock_runner),
-+            (
-+                severity.ERROR,
-+                report_codes.QDEVICE_GET_STATUS_ERROR,
-+                {
-+                    "model": "net",
-+                    "reason": "status error",
-+                }
-+            )
-+        )
-+        self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-s"])
-+
-+
-+class QdeviceStatusClusterTest(TestCase):
-+    def setUp(self):
-+        self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
-+
-+    def test_success(self):
-+        self.mock_runner.run.return_value = ("status info", 0)
-+        self.assertEqual(
-+            "status info",
-+            lib.qdevice_status_cluster_text(self.mock_runner)
-+        )
-+        self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l"])
-+
-+    def test_success_verbose(self):
-+        self.mock_runner.run.return_value = ("status info", 0)
-+        self.assertEqual(
-+            "status info",
-+            lib.qdevice_status_cluster_text(self.mock_runner, verbose=True)
-+        )
-+        self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l", "-v"])
-+
-+    def test_success_cluster(self):
-+        self.mock_runner.run.return_value = ("status info", 0)
-+        self.assertEqual(
-+            "status info",
-+            lib.qdevice_status_cluster_text(self.mock_runner, "cluster")
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            _qnetd_tool, "-l", "-c", "cluster"
-+        ])
-+
-+    def test_success_cluster_verbose(self):
-+        self.mock_runner.run.return_value = ("status info", 0)
-+        self.assertEqual(
-+            "status info",
-+            lib.qdevice_status_cluster_text(self.mock_runner, "cluster", True)
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            _qnetd_tool, "-l", "-v", "-c", "cluster"
-+        ])
-+
-+    def test_error(self):
-+        self.mock_runner.run.return_value = ("status error", 1)
-+        assert_raise_library_error(
-+            lambda: lib.qdevice_status_cluster_text(self.mock_runner),
-+            (
-+                severity.ERROR,
-+                report_codes.QDEVICE_GET_STATUS_ERROR,
-+                {
-+                    "model": "net",
-+                    "reason": "status error",
-+                }
-+            )
-+        )
-+        self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l"])
-+
-+
-+@mock.patch("pcs.lib.corosync.qdevice_net._get_output_certificate")
-+@mock.patch("pcs.lib.corosync.qdevice_net._store_to_tmpfile")
-+class QdeviceSignCertificateRequestTest(CertificateTestCase):
-+    @mock.patch(
-+        "pcs.lib.corosync.qdevice_net.qdevice_initialized",
-+        lambda: True
-+    )
-+    def test_success(self, mock_tmp_store, mock_get_cert):
-+        mock_tmp_store.return_value = self.mock_tmpfile
-+        self.mock_runner.run.return_value = ("tool output", 0)
-+        mock_get_cert.return_value = "new certificate".encode("utf-8")
-+
-+        result = lib.qdevice_sign_certificate_request(
-+            self.mock_runner,
-+            "certificate request",
-+            "clusterName"
-+        )
-+        self.assertEqual(result, mock_get_cert.return_value)
-+
-+        mock_tmp_store.assert_called_once_with(
-+            "certificate request",
-+            reports.qdevice_certificate_sign_error
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            _qnetd_cert_tool,
-+            "-s", "-c", self.mock_tmpfile.name, "-n", "clusterName"
-+        ])
-+        mock_get_cert.assert_called_once_with(
-+            "tool output",
-+            reports.qdevice_certificate_sign_error
-+        )
-+
-+    @mock.patch(
-+        "pcs.lib.corosync.qdevice_net.qdevice_initialized",
-+        lambda: False
-+    )
-+    def test_not_initialized(self, mock_tmp_store, mock_get_cert):
-+        assert_raise_library_error(
-+            lambda: lib.qdevice_sign_certificate_request(
-+                self.mock_runner,
-+                "certificate request",
-+                "clusterName"
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.QDEVICE_NOT_INITIALIZED,
-+                {
-+                    "model": "net",
-+                }
-+            )
-+        )
-+        mock_tmp_store.assert_not_called()
-+        self.mock_runner.run.assert_not_called()
-+        mock_get_cert.assert_not_called()
-+
-+    @mock.patch(
-+        "pcs.lib.corosync.qdevice_net.qdevice_initialized",
-+        lambda: True
-+    )
-+    def test_input_write_error(self, mock_tmp_store, mock_get_cert):
-+        mock_tmp_store.side_effect = LibraryError
-+
-+        self.assertRaises(
-+            LibraryError,
-+            lambda: lib.qdevice_sign_certificate_request(
-+                self.mock_runner,
-+                "certificate request",
-+                "clusterName"
-+            )
-+        )
-+
-+        self.mock_runner.run.assert_not_called()
-+        mock_get_cert.assert_not_called()
-+
-+    @mock.patch(
-+        "pcs.lib.corosync.qdevice_net.qdevice_initialized",
-+        lambda: True
-+    )
-+    def test_sign_error(self, mock_tmp_store, mock_get_cert):
-+        mock_tmp_store.return_value = self.mock_tmpfile
-+        self.mock_runner.run.return_value = ("tool output error", 1)
-+
-+        assert_raise_library_error(
-+            lambda: lib.qdevice_sign_certificate_request(
-+                self.mock_runner,
-+                "certificate request",
-+                "clusterName"
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.QDEVICE_CERTIFICATE_SIGN_ERROR,
-+                {
-+                    "reason": "tool output error",
-+                }
-+            )
-+        )
-+
-+        mock_tmp_store.assert_called_once_with(
-+            "certificate request",
-+            reports.qdevice_certificate_sign_error
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            _qnetd_cert_tool,
-+            "-s", "-c", self.mock_tmpfile.name, "-n", "clusterName"
-+        ])
-+        mock_get_cert.assert_not_called()
-+
-+    @mock.patch(
-+        "pcs.lib.corosync.qdevice_net.qdevice_initialized",
-+        lambda: True
-+    )
-+    def test_output_read_error(self, mock_tmp_store, mock_get_cert):
-+        mock_tmp_store.return_value = self.mock_tmpfile
-+        self.mock_runner.run.return_value = ("tool output", 0)
-+        mock_get_cert.side_effect = LibraryError
-+
-+        self.assertRaises(
-+            LibraryError,
-+            lambda: lib.qdevice_sign_certificate_request(
-+                self.mock_runner,
-+                "certificate request",
-+                "clusterName"
-+            )
-+        )
-+
-+        mock_tmp_store.assert_called_once_with(
-+            "certificate request",
-+            reports.qdevice_certificate_sign_error
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            _qnetd_cert_tool,
-+            "-s", "-c", self.mock_tmpfile.name, "-n", "clusterName"
-+        ])
-+        mock_get_cert.assert_called_once_with(
-+            "tool output",
-+            reports.qdevice_certificate_sign_error
-+        )
-+
-+
-+@mock.patch("pcs.lib.corosync.qdevice_net.shutil.rmtree")
-+@mock.patch("pcs.lib.corosync.qdevice_net.client_initialized")
-+class ClientDestroyTest(TestCase):
-+    def test_success(self, mock_initialized, mock_rmtree):
-+        mock_initialized.return_value = True
-+        lib.client_destroy()
-+        mock_rmtree.assert_called_once_with(_client_cert_dir)
-+
-+    def test_not_initialized(self, mock_initialized, mock_rmtree):
-+        mock_initialized.return_value = False
-+        lib.client_destroy()
-+        mock_rmtree.assert_not_called()
-+
-+    def test_cert_dir_rm_error(self, mock_initialized, mock_rmtree):
-+        mock_initialized.return_value = True
-+        mock_rmtree.side_effect = EnvironmentError("test errno", "test message")
-+        assert_raise_library_error(
-+            lib.client_destroy,
-+            (
-+                severity.ERROR,
-+                report_codes.QDEVICE_DESTROY_ERROR,
-+                {
-+                    "model": "net",
-+                    "reason": "test message",
-+                }
-+            )
-+        )
-+        mock_rmtree.assert_called_once_with(_client_cert_dir)
-+
-+
-+class ClientSetupTest(TestCase):
-+    def setUp(self):
-+        self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
-+        self.original_path = settings.corosync_qdevice_net_client_certs_dir
-+        settings.corosync_qdevice_net_client_certs_dir = get_test_resource(
-+            "qdevice-certs"
-+        )
-+        self.ca_file_path = os.path.join(
-+            settings.corosync_qdevice_net_client_certs_dir,
-+            settings.corosync_qdevice_net_client_ca_file_name
-+        )
-+
-+    def tearDown(self):
-+        settings.corosync_qdevice_net_client_certs_dir = self.original_path
-+
-+    @mock.patch("pcs.lib.corosync.qdevice_net.client_destroy")
-+    def test_success(self, mock_destroy):
-+        self.mock_runner.run.return_value = ("tool output", 0)
-+
-+        lib.client_setup(self.mock_runner, "certificate data".encode("utf-8"))
-+
-+        self.assertEqual(
-+            "certificate data".encode("utf-8"),
-+            open(self.ca_file_path, "rb").read()
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            _client_cert_tool, "-i", "-c", self.ca_file_path
-+        ])
-+        mock_destroy.assert_called_once_with()
-+
-+    @mock.patch("pcs.lib.corosync.qdevice_net.client_destroy")
-+    def test_init_error(self, mock_destroy):
-+        self.mock_runner.run.return_value = ("tool output error", 1)
-+
-+        assert_raise_library_error(
-+            lambda: lib.client_setup(
-+                self.mock_runner,
-+                "certificate data".encode("utf-8")
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.QDEVICE_INITIALIZATION_ERROR,
-+                {
-+                    "model": "net",
-+                    "reason": "tool output error",
-+                }
-+            )
-+        )
-+
-+        self.assertEqual(
-+            "certificate data".encode("utf-8"),
-+            open(self.ca_file_path, "rb").read()
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            _client_cert_tool, "-i", "-c", self.ca_file_path
-+        ])
-+        mock_destroy.assert_called_once_with()
-+
-+
-+@mock.patch("pcs.lib.corosync.qdevice_net._get_output_certificate")
-+class ClientGenerateCertificateRequestTest(CertificateTestCase):
-+    @mock.patch(
-+        "pcs.lib.corosync.qdevice_net.client_initialized",
-+        lambda: True
-+    )
-+    def test_success(self, mock_get_cert):
-+        self.mock_runner.run.return_value = ("tool output", 0)
-+        mock_get_cert.return_value = "new certificate".encode("utf-8")
-+
-+        result = lib.client_generate_certificate_request(
-+            self.mock_runner,
-+            "clusterName"
-+        )
-+        self.assertEqual(result, mock_get_cert.return_value)
-+
-+        self.mock_runner.run.assert_called_once_with([
-+            _client_cert_tool, "-r", "-n", "clusterName"
-+        ])
-+        self.assertEqual(1, len(mock_get_cert.mock_calls))
-+        self.assertEqual(
-+            "tool output",
-+            mock_get_cert.call_args[0][0]
-+        )
-+
-+    @mock.patch(
-+        "pcs.lib.corosync.qdevice_net.client_initialized",
-+        lambda: False
-+    )
-+    def test_not_initialized(self, mock_get_cert):
-+        assert_raise_library_error(
-+            lambda: lib.client_generate_certificate_request(
-+                self.mock_runner,
-+                "clusterName"
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.QDEVICE_NOT_INITIALIZED,
-+                {
-+                    "model": "net",
-+                }
-+            )
-+        )
-+        self.mock_runner.run.assert_not_called()
-+        mock_get_cert.assert_not_called()
-+
-+    @mock.patch(
-+        "pcs.lib.corosync.qdevice_net.client_initialized",
-+        lambda: True
-+    )
-+    def test_tool_error(self, mock_get_cert):
-+        self.mock_runner.run.return_value = ("tool output error", 1)
-+
-+        assert_raise_library_error(
-+            lambda: lib.client_generate_certificate_request(
-+                self.mock_runner,
-+                "clusterName"
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.QDEVICE_INITIALIZATION_ERROR,
-+                {
-+                    "model": "net",
-+                    "reason": "tool output error",
-+                }
-+            )
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            _client_cert_tool, "-r", "-n", "clusterName"
-+        ])
-+        mock_get_cert.assert_not_called()
-+
-+
-+@mock.patch("pcs.lib.corosync.qdevice_net._get_output_certificate")
-+@mock.patch("pcs.lib.corosync.qdevice_net._store_to_tmpfile")
-+class ClientCertRequestToPk12Test(CertificateTestCase):
-+    @mock.patch(
-+        "pcs.lib.corosync.qdevice_net.client_initialized",
-+        lambda: True
-+    )
-+    def test_success(self, mock_tmp_store, mock_get_cert):
-+        mock_tmp_store.return_value = self.mock_tmpfile
-+        self.mock_runner.run.return_value = ("tool output", 0)
-+        mock_get_cert.return_value = "new certificate".encode("utf-8")
-+
-+        result = lib.client_cert_request_to_pk12(
-+            self.mock_runner,
-+            "certificate request"
-+        )
-+        self.assertEqual(result, mock_get_cert.return_value)
-+
-+        mock_tmp_store.assert_called_once_with(
-+            "certificate request",
-+            reports.qdevice_certificate_import_error
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            _client_cert_tool, "-M", "-c", self.mock_tmpfile.name
-+        ])
-+        mock_get_cert.assert_called_once_with(
-+            "tool output",
-+            reports.qdevice_certificate_import_error
-+        )
-+
-+    @mock.patch(
-+        "pcs.lib.corosync.qdevice_net.client_initialized",
-+        lambda: False
-+    )
-+    def test_not_initialized(self, mock_tmp_store, mock_get_cert):
-+        assert_raise_library_error(
-+            lambda: lib.client_cert_request_to_pk12(
-+                self.mock_runner,
-+                "certificate request"
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.QDEVICE_NOT_INITIALIZED,
-+                {
-+                    "model": "net",
-+                }
-+            )
-+        )
-+        mock_tmp_store.assert_not_called()
-+        self.mock_runner.run.assert_not_called()
-+        mock_get_cert.assert_not_called()
-+
-+    @mock.patch(
-+        "pcs.lib.corosync.qdevice_net.client_initialized",
-+        lambda: True
-+    )
-+    def test_input_write_error(self, mock_tmp_store, mock_get_cert):
-+        mock_tmp_store.side_effect = LibraryError
-+
-+        self.assertRaises(
-+            LibraryError,
-+            lambda: lib.client_cert_request_to_pk12(
-+                self.mock_runner,
-+                "certificate request"
-+            )
-+        )
-+
-+        mock_tmp_store.assert_called_once_with(
-+            "certificate request",
-+            reports.qdevice_certificate_import_error
-+        )
-+        self.mock_runner.run.assert_not_called()
-+        mock_get_cert.assert_not_called()
-+
-+    @mock.patch(
-+        "pcs.lib.corosync.qdevice_net.client_initialized",
-+        lambda: True
-+    )
-+    def test_transform_error(self, mock_tmp_store, mock_get_cert):
-+        mock_tmp_store.return_value = self.mock_tmpfile
-+        self.mock_runner.run.return_value = ("tool output error", 1)
-+
-+        assert_raise_library_error(
-+            lambda: lib.client_cert_request_to_pk12(
-+                self.mock_runner,
-+                "certificate request"
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR,
-+                {
-+                    "reason": "tool output error",
-+                }
-+            )
-+        )
-+
-+        mock_tmp_store.assert_called_once_with(
-+            "certificate request",
-+            reports.qdevice_certificate_import_error
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            _client_cert_tool, "-M", "-c", self.mock_tmpfile.name
-+        ])
-+        mock_get_cert.assert_not_called()
-+
-+    @mock.patch(
-+        "pcs.lib.corosync.qdevice_net.client_initialized",
-+        lambda: True
-+    )
-+    def test_output_read_error(self, mock_tmp_store, mock_get_cert):
-+        mock_tmp_store.return_value = self.mock_tmpfile
-+        self.mock_runner.run.return_value = ("tool output", 0)
-+        mock_get_cert.side_effect = LibraryError
-+
-+        self.assertRaises(
-+            LibraryError,
-+            lambda: lib.client_cert_request_to_pk12(
-+                self.mock_runner,
-+                "certificate request"
-+            )
-+        )
-+
-+        mock_tmp_store.assert_called_once_with(
-+            "certificate request",
-+            reports.qdevice_certificate_import_error
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            _client_cert_tool, "-M", "-c", self.mock_tmpfile.name
-+        ])
-+        mock_get_cert.assert_called_once_with(
-+            "tool output",
-+            reports.qdevice_certificate_import_error
-+        )
-+
-+
-+@mock.patch("pcs.lib.corosync.qdevice_net._store_to_tmpfile")
-+class ClientImportCertificateAndKeyTest(CertificateTestCase):
-+    @mock.patch(
-+        "pcs.lib.corosync.qdevice_net.client_initialized",
-+        lambda: True
-+    )
-+    def test_success(self, mock_tmp_store):
-+        mock_tmp_store.return_value = self.mock_tmpfile
-+        self.mock_runner.run.return_value = ("tool output", 0)
-+
-+        lib.client_import_certificate_and_key(
-+            self.mock_runner,
-+            "pk12 certificate"
-+        )
-+
-+        mock_tmp_store.assert_called_once_with(
-+            "pk12 certificate",
-+            reports.qdevice_certificate_import_error
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            _client_cert_tool, "-m", "-c", self.mock_tmpfile.name
-+        ])
-+
-+    @mock.patch(
-+        "pcs.lib.corosync.qdevice_net.client_initialized",
-+        lambda: False
-+    )
-+    def test_not_initialized(self, mock_tmp_store):
-+        assert_raise_library_error(
-+            lambda: lib.client_import_certificate_and_key(
-+                self.mock_runner,
-+                "pk12 certificate"
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.QDEVICE_NOT_INITIALIZED,
-+                {
-+                    "model": "net",
-+                }
-+            )
-+        )
-+
-+        mock_tmp_store.assert_not_called()
-+        self.mock_runner.run.assert_not_called()
-+
-+    @mock.patch(
-+        "pcs.lib.corosync.qdevice_net.client_initialized",
-+        lambda: True
-+    )
-+    def test_input_write_error(self, mock_tmp_store):
-+        mock_tmp_store.side_effect = LibraryError
-+
-+        self.assertRaises(
-+            LibraryError,
-+            lambda: lib.client_import_certificate_and_key(
-+                self.mock_runner,
-+                "pk12 certificate"
-+            )
-+        )
-+
-+        mock_tmp_store.assert_called_once_with(
-+            "pk12 certificate",
-+            reports.qdevice_certificate_import_error
-+        )
-+        self.mock_runner.run.assert_not_called()
-+
-+    @mock.patch(
-+        "pcs.lib.corosync.qdevice_net.client_initialized",
-+        lambda: True
-+    )
-+    def test_import_error(self, mock_tmp_store):
-+        mock_tmp_store.return_value = self.mock_tmpfile
-+        self.mock_runner.run.return_value = ("tool output error", 1)
-+
-+        assert_raise_library_error(
-+            lambda: lib.client_import_certificate_and_key(
-+                self.mock_runner,
-+                "pk12 certificate"
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR,
-+                {
-+                    "reason": "tool output error",
-+                }
-+            )
-+        )
-+
-+        mock_tmp_store.assert_called_once_with(
-+            "pk12 certificate",
-+            reports.qdevice_certificate_import_error
-+        )
-+        mock_tmp_store.assert_called_once_with(
-+            "pk12 certificate",
-+            reports.qdevice_certificate_import_error
-+        )
-+        self.mock_runner.run.assert_called_once_with([
-+            _client_cert_tool, "-m", "-c", self.mock_tmpfile.name
-+        ])
-+
-+
-+class RemoteQdeviceGetCaCertificate(TestCase):
-+    def test_success(self):
-+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-+        expected_result = "abcd".encode("utf-8")
-+        mock_communicator.call_host.return_value = base64.b64encode(
-+            expected_result
-+        )
-+
-+        result = lib.remote_qdevice_get_ca_certificate(
-+            mock_communicator,
-+            "qdevice host"
-+        )
-+        self.assertEqual(result, expected_result)
-+
-+        mock_communicator.call_host.assert_called_once_with(
-+            "qdevice host",
-+            "remote/qdevice_net_get_ca_certificate",
-+            None
-+        )
-+
-+    def test_decode_error(self):
-+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-+        mock_communicator.call_host.return_value = "error"
-+
-+        assert_raise_library_error(
-+            lambda: lib.remote_qdevice_get_ca_certificate(
-+                mock_communicator,
-+                "qdevice host"
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.INVALID_RESPONSE_FORMAT,
-+                {
-+                    "node": "qdevice host",
-+                }
-+            )
-+        )
-+
-+    def test_comunication_error(self):
-+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-+        mock_communicator.call_host.side_effect = NodeCommunicationException(
-+            "qdevice host", "command", "reason"
-+        )
-+
-+        self.assertRaises(
-+            NodeCommunicationException,
-+            lambda: lib.remote_qdevice_get_ca_certificate(
-+                mock_communicator,
-+                "qdevice host"
-+            )
-+        )
-+
-+
-+class RemoteClientSetupTest(TestCase):
-+    def test_success(self):
-+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-+        node = "node address"
-+        ca_cert = "CA certificate".encode("utf-8")
-+
-+        lib.remote_client_setup(mock_communicator, node, ca_cert)
-+
-+        mock_communicator.call_node.assert_called_once_with(
-+            node,
-+            "remote/qdevice_net_client_init_certificate_storage",
-+            "ca_certificate={0}".format(
-+                cert_to_url(ca_cert)
-+            )
-+        )
-+
-+    def test_comunication_error(self):
-+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-+        mock_communicator.call_node.side_effect = NodeCommunicationException(
-+            "node address", "command", "reason"
-+        )
-+
-+        self.assertRaises(
-+            NodeCommunicationException,
-+            lambda: lib.remote_client_setup(
-+                mock_communicator,
-+                "node address",
-+                "ca cert".encode("utf-8")
-+            )
-+        )
-+
-+
-+class RemoteSignCertificateRequestTest(TestCase):
-+    def test_success(self):
-+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-+        cert_request = "request".encode("utf-8")
-+        expected_result = "abcd".encode("utf-8")
-+        host = "qdevice host"
-+        cluster_name = "ClusterName"
-+        mock_communicator.call_host.return_value = base64.b64encode(
-+            expected_result
-+        )
-+
-+        result = lib.remote_sign_certificate_request(
-+            mock_communicator,
-+            host,
-+            cert_request,
-+            cluster_name
-+        )
-+        self.assertEqual(result, expected_result)
-+
-+        mock_communicator.call_host.assert_called_once_with(
-+            host,
-+            "remote/qdevice_net_sign_node_certificate",
-+            "certificate_request={0}&cluster_name={1}".format(
-+                cert_to_url(cert_request),
-+                cluster_name
-+            )
-+        )
-+
-+    def test_decode_error(self):
-+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-+        mock_communicator.call_host.return_value = "error"
-+
-+        assert_raise_library_error(
-+            lambda: lib.remote_sign_certificate_request(
-+                mock_communicator,
-+                "qdevice host",
-+                "cert request".encode("utf-8"),
-+                "cluster name"
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.INVALID_RESPONSE_FORMAT,
-+                {
-+                    "node": "qdevice host",
-+                }
-+            )
-+        )
-+
-+    def test_comunication_error(self):
-+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-+        mock_communicator.call_host.side_effect = NodeCommunicationException(
-+            "qdevice host", "command", "reason"
-+        )
-+
-+        self.assertRaises(
-+            NodeCommunicationException,
-+            lambda: lib.remote_sign_certificate_request(
-+                mock_communicator,
-+                "qdevice host",
-+                "cert request".encode("utf-8"),
-+                "cluster name"
-+            )
-+        )
-+
-+
-+class RemoteClientImportCertificateAndKeyTest(TestCase):
-+    def test_success(self):
-+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-+        node = "node address"
-+        pk12_cert = "pk12 certificate".encode("utf-8")
-+
-+        lib.remote_client_import_certificate_and_key(
-+            mock_communicator,
-+            node,
-+            pk12_cert
-+        )
-+
-+        mock_communicator.call_node.assert_called_once_with(
-+            node,
-+            "remote/qdevice_net_client_import_certificate",
-+            "certificate={0}".format(
-+                cert_to_url(pk12_cert)
-+            )
-+        )
-+
-+    def test_comunication_error(self):
-+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-+        mock_communicator.call_node.side_effect = NodeCommunicationException(
-+            "node address", "command", "reason"
-+        )
-+
-+        self.assertRaises(
-+            NodeCommunicationException,
-+            lambda: lib.remote_client_import_certificate_and_key(
-+                mock_communicator,
-+                "node address",
-+                "pk12 cert".encode("utf-8")
-+            )
-+        )
-+
-+
-+class RemoteClientDestroy(TestCase):
-+    def test_success(self):
-+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-+        node = "node address"
-+
-+        lib.remote_client_destroy(mock_communicator, node)
-+
-+        mock_communicator.call_node.assert_called_once_with(
-+            node,
-+            "remote/qdevice_net_client_destroy",
-+            None
-+        )
-+
-+    def test_comunication_error(self):
-+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-+        mock_communicator.call_node.side_effect = NodeCommunicationException(
-+            "node address", "command", "reason"
-+        )
-+
-+        self.assertRaises(
-+            NodeCommunicationException,
-+            lambda: lib.remote_client_destroy(mock_communicator, "node address")
-+        )
-+
-+
-+class GetOutputCertificateTest(TestCase):
-+    def setUp(self):
-+        self.file_path = get_test_resource("qdevice-certs/qnetd-cacert.crt")
-+        self.file_data = open(self.file_path, "rb").read()
-+
-+    def test_success(self):
-+        cert_tool_output = """
-+some line
-+Certificate stored in {0}
-+some other line
-+        """.format(self.file_path)
-+        report_func = mock.MagicMock()
-+
-+        self.assertEqual(
-+            self.file_data,
-+            lib._get_output_certificate(cert_tool_output, report_func)
-+        )
-+        report_func.assert_not_called()
-+
-+    def test_success_request(self):
-+        cert_tool_output = """
-+some line
-+Certificate request stored in {0}
-+some other line
-+        """.format(self.file_path)
-+        report_func = mock.MagicMock()
-+
-+        self.assertEqual(
-+            self.file_data,
-+            lib._get_output_certificate(cert_tool_output, report_func)
-+        )
-+        report_func.assert_not_called()
-+
-+    def test_message_not_found(self):
-+        cert_tool_output = "some rubbish output"
-+        report_func = reports.qdevice_certificate_import_error
-+
-+        assert_raise_library_error(
-+            lambda: lib._get_output_certificate(
-+                cert_tool_output,
-+                report_func
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR,
-+                {
-+                    "reason": cert_tool_output,
-+                }
-+            )
-+        )
-+
-+    def test_cannot_read_file(self):
-+        cert_tool_output = """
-+some line
-+Certificate request stored in {0}.bad
-+some other line
-+        """.format(self.file_path)
-+        report_func = reports.qdevice_certificate_import_error
-+
-+        assert_raise_library_error(
-+            lambda: lib._get_output_certificate(
-+                cert_tool_output,
-+                report_func
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR,
-+                {
-+                    "reason": "{0}.bad: No such file or directory".format(
-+                        self.file_path
-+                    ),
-+                }
-+            )
-+        )
-+
-diff --git a/pcs/test/test_lib_env.py b/pcs/test/test_lib_env.py
-index 95f7a00..c6322b7 100644
---- a/pcs/test/test_lib_env.py
-+++ b/pcs/test/test_lib_env.py
-@@ -235,13 +235,24 @@ class LibraryEnvironmentTest(TestCase):
-             )]
-         )
- 
-+    @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
-     @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes")
-     @mock.patch("pcs.lib.env.reload_corosync_config")
-     @mock.patch("pcs.lib.env.distribute_corosync_conf")
-     @mock.patch("pcs.lib.env.get_local_corosync_conf")
-+    @mock.patch.object(
-+        LibraryEnvironment,
-+        "node_communicator",
-+        lambda self: "mock node communicator"
-+    )
-+    @mock.patch.object(
-+        LibraryEnvironment,
-+        "cmd_runner",
-+        lambda self: "mock cmd runner"
-+    )
-     def test_corosync_conf_set(
-         self, mock_get_corosync, mock_distribute, mock_reload,
--        mock_check_offline
-+        mock_check_offline, mock_qdevice_reload
-     ):
-         corosync_data = "totem {\n    version: 2\n}\n"
-         new_corosync_data = "totem {\n    version: 3\n}\n"
-@@ -266,8 +277,11 @@ class LibraryEnvironmentTest(TestCase):
-         self.assertEqual(0, mock_get_corosync.call_count)
-         mock_check_offline.assert_not_called()
-         mock_reload.assert_not_called()
-+        mock_qdevice_reload.assert_not_called()
- 
-+    @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
-     @mock.patch("pcs.lib.env.reload_corosync_config")
-+    @mock.patch("pcs.lib.env.is_service_running")
-     @mock.patch("pcs.lib.env.distribute_corosync_conf")
-     @mock.patch("pcs.lib.env.get_local_corosync_conf")
-     @mock.patch.object(
-@@ -285,12 +299,14 @@ class LibraryEnvironmentTest(TestCase):
-         "cmd_runner",
-         lambda self: "mock cmd runner"
-     )
--    def test_corosync_conf_not_set(
--        self, mock_get_corosync, mock_distribute, mock_reload
-+    def test_corosync_conf_not_set_online(
-+        self, mock_get_corosync, mock_distribute, mock_is_running, mock_reload,
-+        mock_qdevice_reload
-     ):
-         corosync_data = open(rc("corosync.conf")).read()
-         new_corosync_data = corosync_data.replace("version: 2", "version: 3")
-         mock_get_corosync.return_value = corosync_data
-+        mock_is_running.return_value = True
-         env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
- 
-         self.assertTrue(env.is_corosync_conf_live)
-@@ -309,10 +325,120 @@ class LibraryEnvironmentTest(TestCase):
-             new_corosync_data,
-             False
-         )
-+        mock_is_running.assert_called_once_with("mock cmd runner", "corosync")
-         mock_reload.assert_called_once_with("mock cmd runner")
-+        mock_qdevice_reload.assert_not_called()
- 
-+    @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
-+    @mock.patch("pcs.lib.env.reload_corosync_config")
-+    @mock.patch("pcs.lib.env.is_service_running")
-+    @mock.patch("pcs.lib.env.distribute_corosync_conf")
-+    @mock.patch("pcs.lib.env.get_local_corosync_conf")
-+    @mock.patch.object(
-+        CorosyncConfigFacade,
-+        "get_nodes",
-+        lambda self: "mock node list"
-+    )
-+    @mock.patch.object(
-+        LibraryEnvironment,
-+        "node_communicator",
-+        lambda self: "mock node communicator"
-+    )
-+    @mock.patch.object(
-+        LibraryEnvironment,
-+        "cmd_runner",
-+        lambda self: "mock cmd runner"
-+    )
-+    def test_corosync_conf_not_set_offline(
-+        self, mock_get_corosync, mock_distribute, mock_is_running, mock_reload,
-+        mock_qdevice_reload
-+    ):
-+        corosync_data = open(rc("corosync.conf")).read()
-+        new_corosync_data = corosync_data.replace("version: 2", "version: 3")
-+        mock_get_corosync.return_value = corosync_data
-+        mock_is_running.return_value = False
-+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-+
-+        self.assertTrue(env.is_corosync_conf_live)
-+
-+        self.assertEqual(corosync_data, env.get_corosync_conf_data())
-+        self.assertEqual(corosync_data, env.get_corosync_conf().config.export())
-+        self.assertEqual(2, mock_get_corosync.call_count)
-+
-+        env.push_corosync_conf(
-+            CorosyncConfigFacade.from_string(new_corosync_data)
-+        )
-+        mock_distribute.assert_called_once_with(
-+            "mock node communicator",
-+            self.mock_reporter,
-+            "mock node list",
-+            new_corosync_data,
-+            False
-+        )
-+        mock_is_running.assert_called_once_with("mock cmd runner", "corosync")
-+        mock_reload.assert_not_called()
-+        mock_qdevice_reload.assert_not_called()
-+
-+    @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
-+    @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes")
-+    @mock.patch("pcs.lib.env.reload_corosync_config")
-+    @mock.patch("pcs.lib.env.is_service_running")
-+    @mock.patch("pcs.lib.env.distribute_corosync_conf")
-+    @mock.patch("pcs.lib.env.get_local_corosync_conf")
-+    @mock.patch.object(
-+        CorosyncConfigFacade,
-+        "get_nodes",
-+        lambda self: "mock node list"
-+    )
-+    @mock.patch.object(
-+        LibraryEnvironment,
-+        "node_communicator",
-+        lambda self: "mock node communicator"
-+    )
-+    @mock.patch.object(
-+        LibraryEnvironment,
-+        "cmd_runner",
-+        lambda self: "mock cmd runner"
-+    )
-+    def test_corosync_conf_not_set_need_qdevice_reload_success(
-+        self, mock_get_corosync, mock_distribute, mock_is_running, mock_reload,
-+        mock_check_offline, mock_qdevice_reload
-+    ):
-+        corosync_data = open(rc("corosync.conf")).read()
-+        new_corosync_data = corosync_data.replace("version: 2", "version: 3")
-+        mock_get_corosync.return_value = corosync_data
-+        mock_is_running.return_value = True
-+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-+
-+        self.assertTrue(env.is_corosync_conf_live)
-+
-+        self.assertEqual(corosync_data, env.get_corosync_conf_data())
-+        self.assertEqual(corosync_data, env.get_corosync_conf().config.export())
-+        self.assertEqual(2, mock_get_corosync.call_count)
-+
-+        conf_facade = CorosyncConfigFacade.from_string(new_corosync_data)
-+        conf_facade._need_qdevice_reload = True
-+        env.push_corosync_conf(conf_facade)
-+        mock_check_offline.assert_not_called()
-+        mock_distribute.assert_called_once_with(
-+            "mock node communicator",
-+            self.mock_reporter,
-+            "mock node list",
-+            new_corosync_data,
-+            False
-+        )
-+        mock_reload.assert_called_once_with("mock cmd runner")
-+        mock_qdevice_reload.assert_called_once_with(
-+            "mock node communicator",
-+            self.mock_reporter,
-+            "mock node list",
-+            False
-+        )
-+
-+    @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
-     @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes")
-     @mock.patch("pcs.lib.env.reload_corosync_config")
-+    @mock.patch("pcs.lib.env.is_service_running")
-     @mock.patch("pcs.lib.env.distribute_corosync_conf")
-     @mock.patch("pcs.lib.env.get_local_corosync_conf")
-     @mock.patch.object(
-@@ -326,12 +452,13 @@ class LibraryEnvironmentTest(TestCase):
-         lambda self: "mock node communicator"
-     )
-     def test_corosync_conf_not_set_need_offline_success(
--        self, mock_get_corosync, mock_distribute, mock_reload,
--        mock_check_offline
-+        self, mock_get_corosync, mock_distribute, mock_is_running, mock_reload,
-+        mock_check_offline, mock_qdevice_reload
-     ):
-         corosync_data = open(rc("corosync.conf")).read()
-         new_corosync_data = corosync_data.replace("version: 2", "version: 3")
-         mock_get_corosync.return_value = corosync_data
-+        mock_is_running.return_value = False
-         env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
- 
-         self.assertTrue(env.is_corosync_conf_live)
-@@ -357,7 +484,9 @@ class LibraryEnvironmentTest(TestCase):
-             False
-         )
-         mock_reload.assert_not_called()
-+        mock_qdevice_reload.assert_not_called()
- 
-+    @mock.patch("pcs.lib.env.qdevice_reload_on_nodes")
-     @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes")
-     @mock.patch("pcs.lib.env.reload_corosync_config")
-     @mock.patch("pcs.lib.env.distribute_corosync_conf")
-@@ -374,7 +503,7 @@ class LibraryEnvironmentTest(TestCase):
-     )
-     def test_corosync_conf_not_set_need_offline_fail(
-         self, mock_get_corosync, mock_distribute, mock_reload,
--        mock_check_offline
-+        mock_check_offline, mock_qdevice_reload
-     ):
-         corosync_data = open(rc("corosync.conf")).read()
-         new_corosync_data = corosync_data.replace("version: 2", "version: 3")
-@@ -410,6 +539,7 @@ class LibraryEnvironmentTest(TestCase):
-         )
-         mock_distribute.assert_not_called()
-         mock_reload.assert_not_called()
-+        mock_qdevice_reload.assert_not_called()
- 
-     @mock.patch("pcs.lib.env.CommandRunner")
-     def test_cmd_runner_no_options(self, mock_runner):
-diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py
-index c08b059..929a50d 100644
---- a/pcs/test/test_lib_external.py
-+++ b/pcs/test/test_lib_external.py
-@@ -31,7 +31,11 @@ from pcs.test.tools.pcs_mock import mock
- 
- from pcs import settings
- from pcs.common import report_codes
--from pcs.lib.errors import ReportItemSeverity as severity
-+from pcs.lib import reports
-+from pcs.lib.errors import (
-+    LibraryError,
-+    ReportItemSeverity as severity
-+)
- 
- import pcs.lib.external as lib
- 
-@@ -830,6 +834,126 @@ class NodeCommunicatorExceptionTransformTest(TestCase):
-         self.assertTrue(raised)
- 
- 
-+class ParallelCommunicationHelperTest(TestCase):
-+    def setUp(self):
-+        self.mock_reporter = MockLibraryReportProcessor()
-+
-+    def fixture_raiser(self):
-+        def raiser(x, *args, **kwargs):
-+            if x == 1:
-+                raise lib.NodeConnectionException("node", "command", "reason")
-+            elif x == 2:
-+                raise LibraryError(
-+                    reports.corosync_config_distribution_node_error("node")
-+                )
-+        return raiser
-+
-+    def test_success(self):
-+        func = mock.MagicMock()
-+        lib.parallel_nodes_communication_helper(
-+            func,
-+            [([x], {"a": x*2,}) for x in range(3)],
-+            self.mock_reporter,
-+            skip_offline_nodes=False
-+        )
-+        expected_calls = [
-+            mock.call(0, a=0),
-+            mock.call(1, a=2),
-+            mock.call(2, a=4),
-+        ]
-+        self.assertEqual(len(expected_calls), len(func.mock_calls))
-+        func.assert_has_calls(expected_calls)
-+        self.assertEqual(self.mock_reporter.report_item_list, [])
-+
-+    def test_errors(self):
-+        func = self.fixture_raiser()
-+        assert_raise_library_error(
-+            lambda: lib.parallel_nodes_communication_helper(
-+                func,
-+                [([x], {"a": x*2,}) for x in range(4)],
-+                self.mock_reporter,
-+                skip_offline_nodes=False
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                {
-+                    "node": "node",
-+                    "reason": "reason",
-+                    "command": "command",
-+                },
-+                report_codes.SKIP_OFFLINE_NODES
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
-+                {
-+                    "node": "node",
-+                }
-+            )
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.ERROR,
-+                    report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                    {
-+                        "node": "node",
-+                        "reason": "reason",
-+                        "command": "command",
-+                    },
-+                    report_codes.SKIP_OFFLINE_NODES
-+                ),
-+                (
-+                    severity.ERROR,
-+                    report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
-+                    {
-+                        "node": "node",
-+                    }
-+                )
-+            ]
-+        )
-+
-+    def test_errors_skip_offline(self):
-+        func = self.fixture_raiser()
-+        assert_raise_library_error(
-+            lambda: lib.parallel_nodes_communication_helper(
-+                func,
-+                [([x], {"a": x*2,}) for x in range(4)],
-+                self.mock_reporter,
-+                skip_offline_nodes=True
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
-+                {
-+                    "node": "node",
-+                }
-+            )
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.WARNING,
-+                    report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                    {
-+                        "node": "node",
-+                        "reason": "reason",
-+                        "command": "command",
-+                    }
-+                ),
-+                (
-+                    severity.ERROR,
-+                    report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
-+                    {
-+                        "node": "node",
-+                    }
-+                )
-+            ]
-+        )
-+
- class IsCmanClusterTest(TestCase):
-     def template_test(self, is_cman, corosync_output, corosync_retval=0):
-         mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
-diff --git a/pcs/test/test_lib_nodes_task.py b/pcs/test/test_lib_nodes_task.py
-index 6af47d7..cff88eb 100644
---- a/pcs/test/test_lib_nodes_task.py
-+++ b/pcs/test/test_lib_nodes_task.py
-@@ -27,14 +27,6 @@ class DistributeCorosyncConfTest(TestCase):
-         self.mock_reporter = MockLibraryReportProcessor()
-         self.mock_communicator = "mock node communicator"
- 
--    def assert_set_remote_corosync_conf_call(self, a_call, node_ring0, config):
--        self.assertEqual("set_remote_corosync_conf", a_call[0])
--        self.assertEqual(3, len(a_call[1]))
--        self.assertEqual(self.mock_communicator, a_call[1][0])
--        self.assertEqual(node_ring0, a_call[1][1].ring0)
--        self.assertEqual(config, a_call[1][2])
--        self.assertEqual(0, len(a_call[2]))
--
-     @mock.patch("pcs.lib.nodes_task.corosync_live")
-     def test_success(self, mock_corosync_live):
-         conf_text = "test conf text"
-@@ -53,21 +45,19 @@ class DistributeCorosyncConfTest(TestCase):
- 
-         corosync_live_calls = [
-             mock.call.set_remote_corosync_conf(
--                "mock node communicator", nodes[0], conf_text
-+                "mock node communicator", node_addrs_list[0], conf_text
-             ),
-             mock.call.set_remote_corosync_conf(
--                "mock node communicator", nodes[1], conf_text
-+                "mock node communicator", node_addrs_list[1], conf_text
-             ),
-         ]
-         self.assertEqual(
-             len(corosync_live_calls),
-             len(mock_corosync_live.mock_calls)
-         )
--        self.assert_set_remote_corosync_conf_call(
--            mock_corosync_live.mock_calls[0], nodes[0], conf_text
--        )
--        self.assert_set_remote_corosync_conf_call(
--            mock_corosync_live.mock_calls[1], nodes[1], conf_text
-+        mock_corosync_live.set_remote_corosync_conf.assert_has_calls(
-+            corosync_live_calls,
-+            any_order=True
-         )
- 
-         assert_report_item_list_equal(
-@@ -145,12 +135,10 @@ class DistributeCorosyncConfTest(TestCase):
-             len(corosync_live_calls),
-             len(mock_corosync_live.mock_calls)
-         )
--        self.assert_set_remote_corosync_conf_call(
--            mock_corosync_live.mock_calls[0], nodes[0], conf_text
--        )
--        self.assert_set_remote_corosync_conf_call(
--            mock_corosync_live.mock_calls[1], nodes[1], conf_text
--        )
-+        mock_corosync_live.set_remote_corosync_conf.assert_has_calls([
-+            mock.call("mock node communicator", node_addrs_list[0], conf_text),
-+            mock.call("mock node communicator", node_addrs_list[1], conf_text),
-+        ], any_order=True)
- 
-         assert_report_item_list_equal(
-             self.mock_reporter.report_item_list,
-@@ -221,12 +209,10 @@ class DistributeCorosyncConfTest(TestCase):
-             len(corosync_live_calls),
-             len(mock_corosync_live.mock_calls)
-         )
--        self.assert_set_remote_corosync_conf_call(
--            mock_corosync_live.mock_calls[0], nodes[0], conf_text
--        )
--        self.assert_set_remote_corosync_conf_call(
--            mock_corosync_live.mock_calls[1], nodes[1], conf_text
--        )
-+        mock_corosync_live.set_remote_corosync_conf.assert_has_calls([
-+            mock.call("mock node communicator", node_addrs_list[0], conf_text),
-+            mock.call("mock node communicator", node_addrs_list[1], conf_text),
-+        ], any_order=True)
- 
-         assert_report_item_list_equal(
-             self.mock_reporter.report_item_list,
-@@ -452,6 +438,134 @@ class CheckCorosyncOfflineTest(TestCase):
-         )
- 
- 
-+@mock.patch("pcs.lib.nodes_task.qdevice_client.remote_client_stop")
-+@mock.patch("pcs.lib.nodes_task.qdevice_client.remote_client_start")
-+class QdeviceReloadOnNodesTest(TestCase):
-+    def setUp(self):
-+        self.mock_reporter = MockLibraryReportProcessor()
-+        self.mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-+
-+    def test_success(self, mock_remote_start, mock_remote_stop):
-+        nodes = ["node1", "node2"]
-+        node_addrs_list = NodeAddressesList(
-+            [NodeAddresses(addr) for addr in nodes]
-+        )
-+
-+        lib.qdevice_reload_on_nodes(
-+            self.mock_communicator,
-+            self.mock_reporter,
-+            node_addrs_list
-+        )
-+
-+        node_calls = [
-+            mock.call(
-+                self.mock_reporter, self.mock_communicator, node_addrs_list[0]
-+            ),
-+            mock.call(
-+                self.mock_reporter, self.mock_communicator, node_addrs_list[1]
-+            ),
-+        ]
-+        self.assertEqual(len(node_calls), len(mock_remote_stop.mock_calls))
-+        self.assertEqual(len(node_calls), len(mock_remote_start.mock_calls))
-+        mock_remote_stop.assert_has_calls(node_calls, any_order=True)
-+        mock_remote_start.assert_has_calls(node_calls, any_order=True)
-+
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CLIENT_RELOAD_STARTED,
-+                    {}
-+                ),
-+            ]
-+        )
-+
-+    def test_fail_doesnt_prevent_start(
-+        self, mock_remote_start, mock_remote_stop
-+    ):
-+        nodes = ["node1", "node2"]
-+        node_addrs_list = NodeAddressesList(
-+            [NodeAddresses(addr) for addr in nodes]
-+        )
-+        def raiser(reporter, communicator, node):
-+            if node.ring0 == nodes[1]:
-+                raise NodeAuthenticationException(
-+                    node.label, "command", "HTTP error: 401"
-+                )
-+        mock_remote_stop.side_effect = raiser
-+
-+        assert_raise_library_error(
-+            lambda: lib.qdevice_reload_on_nodes(
-+                self.mock_communicator,
-+                self.mock_reporter,
-+                node_addrs_list
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
-+                {
-+                    "node": nodes[1],
-+                    "command": "command",
-+                    "reason" : "HTTP error: 401",
-+                },
-+                report_codes.SKIP_OFFLINE_NODES
-+            )
-+        )
-+
-+        node_calls = [
-+            mock.call(
-+                self.mock_reporter, self.mock_communicator, node_addrs_list[0]
-+            ),
-+            mock.call(
-+                self.mock_reporter, self.mock_communicator, node_addrs_list[1]
-+            ),
-+        ]
-+        self.assertEqual(len(node_calls), len(mock_remote_stop.mock_calls))
-+        self.assertEqual(len(node_calls), len(mock_remote_start.mock_calls))
-+        mock_remote_stop.assert_has_calls(node_calls, any_order=True)
-+        mock_remote_start.assert_has_calls(node_calls, any_order=True)
-+
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_CLIENT_RELOAD_STARTED,
-+                    {}
-+                ),
-+                # why the same error twice?
-+                # 1. Tested piece of code calls a function which puts an error
-+                # into the reporter. The reporter raises an exception. The
-+                # exception is caught in the tested piece of code, stored, and
-+                # later put to reporter again.
-+                # 2. Mock reporter remembers everything that goes through it
-+                # and by the machanism described in 1 the error goes througt it
-+                # twice.
-+                (
-+                    severity.ERROR,
-+                    report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
-+                    {
-+                        "node": nodes[1],
-+                        "command": "command",
-+                        "reason" : "HTTP error: 401",
-+                    },
-+                    report_codes.SKIP_OFFLINE_NODES
-+                ),
-+                (
-+                    severity.ERROR,
-+                    report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
-+                    {
-+                        "node": nodes[1],
-+                        "command": "command",
-+                        "reason" : "HTTP error: 401",
-+                    },
-+                    report_codes.SKIP_OFFLINE_NODES
-+                ),
-+            ]
-+        )
-+
-+
- class NodeCheckAuthTest(TestCase):
-     def test_success(self):
-         mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-diff --git a/pcs/test/test_quorum.py b/pcs/test/test_quorum.py
-index 8167ad9..86de4c6 100644
---- a/pcs/test/test_quorum.py
-+++ b/pcs/test/test_quorum.py
-@@ -144,7 +144,7 @@ class DeviceAddTest(TestBase):
- 
-     def test_success_model_only(self):
-         self.assert_pcs_success(
--            "quorum device add model net host=127.0.0.1 algorithm=ffsplit"
-+            "quorum device add model net host=127.0.0.1 algorithm=lms"
-         )
-         self.assert_pcs_success(
-             "quorum config",
-@@ -152,7 +152,7 @@ class DeviceAddTest(TestBase):
- Options:
- Device:
-   Model: net
--    algorithm: ffsplit
-+    algorithm: lms
-     host: 127.0.0.1
- """
-         )
-@@ -167,6 +167,7 @@ Device:
- Options:
- Device:
-   timeout: 12345
-+  votes: 1
-   Model: net
-     algorithm: ffsplit
-     host: 127.0.0.1
-@@ -193,7 +194,7 @@ Error: required option 'host' is missing
-         self.assert_pcs_fail(
-             "quorum device add a=b timeout=-1 model net host=127.0.0.1 algorithm=x c=d",
-             """\
--Error: 'x' is not a valid algorithm value, use 2nodelms, ffsplit, lms, use --force to override
-+Error: 'x' is not a valid algorithm value, use ffsplit, lms, use --force to override
- Error: invalid quorum device model option 'c', allowed options are: algorithm, connect_timeout, force_ip_version, host, port, tie_breaker, use --force to override
- Error: invalid quorum device option 'a', allowed options are: sync_timeout, timeout, use --force to override
- Error: '-1' is not a valid timeout value, use positive integer, use --force to override
-@@ -203,7 +204,7 @@ Error: '-1' is not a valid timeout value, use positive integer, use --force to o
-         self.assert_pcs_success(
-             "quorum device add a=b timeout=-1 model net host=127.0.0.1 algorithm=x c=d --force",
-             """\
--Warning: 'x' is not a valid algorithm value, use 2nodelms, ffsplit, lms
-+Warning: 'x' is not a valid algorithm value, use ffsplit, lms
- Warning: invalid quorum device model option 'c', allowed options are: algorithm, connect_timeout, force_ip_version, host, port, tie_breaker
- Warning: invalid quorum device option 'a', allowed options are: sync_timeout, timeout
- Warning: '-1' is not a valid timeout value, use positive integer
-diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py
-index c61a2b8..819f8ee 100644
---- a/pcs/test/test_utils.py
-+++ b/pcs/test/test_utils.py
-@@ -967,1359 +967,1607 @@ class UtilsTest(unittest.TestCase):
-             }
-         )
- 
--    def test_parse_cman_quorum_info(self):
--        parsed = utils.parse_cman_quorum_info("""\
--Version: 6.2.0
--Config Version: 23
--Cluster Name: cluster66
--Cluster Id: 22265
--Cluster Member: Yes
--Cluster Generation: 3612
--Membership state: Cluster-Member
--Nodes: 3
--Expected votes: 3
--Total votes: 3
--Node votes: 1
--Quorum: 2 
--Active subsystems: 8
--Flags: 
--Ports Bound: 0 
--Node name: rh66-node2
--Node ID: 2
--Multicast addresses: 239.192.86.80
--Node addresses: 192.168.122.61
-----Votes---
--1 M 3 rh66-node1
--2 M 2 rh66-node2
--3 M 1 rh66-node3
--""")
--        self.assertEqual(True, parsed["quorate"])
--        self.assertEqual(2, parsed["quorum"])
-+    def test_get_operations_from_transitions(self):
-+        transitions = utils.parse(rc("transitions01.xml"))
-         self.assertEqual(
-             [
--                {"name": "rh66-node1", "votes": 3, "local": False},
--                {"name": "rh66-node2", "votes": 2, "local": True},
--                {"name": "rh66-node3", "votes": 1, "local": False},
-+                {
-+                    'id': 'dummy',
-+                    'long_id': 'dummy',
-+                    'operation': 'stop',
-+                    'on_node': 'rh7-3',
-+                },
-+                {
-+                    'id': 'dummy',
-+                    'long_id': 'dummy',
-+                    'operation': 'start',
-+                    'on_node': 'rh7-2',
-+                },
-+                {
-+                    'id': 'd0',
-+                    'long_id': 'd0:1',
-+                    'operation': 'stop',
-+                    'on_node': 'rh7-1',
-+                },
-+                {
-+                    'id': 'd0',
-+                    'long_id': 'd0:1',
-+                    'operation': 'start',
-+                    'on_node': 'rh7-2',
-+                },
-+                {
-+                    'id': 'state',
-+                    'long_id': 'state:0',
-+                    'operation': 'stop',
-+                    'on_node': 'rh7-3',
-+                },
-+                {
-+                    'id': 'state',
-+                    'long_id': 'state:0',
-+                    'operation': 'start',
-+                    'on_node': 'rh7-2',
-+                },
-             ],
--            parsed["node_list"]
-+            utils.get_operations_from_transitions(transitions)
-         )
- 
--        parsed = utils.parse_cman_quorum_info("""\
--Version: 6.2.0
--Config Version: 23
--Cluster Name: cluster66
--Cluster Id: 22265
--Cluster Member: Yes
--Cluster Generation: 3612
--Membership state: Cluster-Member
--Nodes: 3
--Expected votes: 3
--Total votes: 3
--Node votes: 1
--Quorum: 2 Activity blocked
--Active subsystems: 8
--Flags: 
--Ports Bound: 0 
--Node name: rh66-node1
--Node ID: 1
--Multicast addresses: 239.192.86.80
--Node addresses: 192.168.122.61
-----Votes---
--1 M 3 rh66-node1
--2 X 2 rh66-node2
--3 X 1 rh66-node3
--""")
--        self.assertEqual(False, parsed["quorate"])
--        self.assertEqual(2, parsed["quorum"])
-+        transitions = utils.parse(rc("transitions02.xml"))
-         self.assertEqual(
-             [
--                {"name": "rh66-node1", "votes": 3, "local": True},
-+                {
-+                    "id": "RemoteNode",
-+                    "long_id": "RemoteNode",
-+                    "operation": "stop",
-+                    "on_node": "virt-143",
-+                },
-+                {
-+                    "id": "RemoteNode",
-+                    "long_id": "RemoteNode",
-+                    "operation": "migrate_to",
-+                    "on_node": "virt-143",
-+                },
-+                {
-+                    "id": "RemoteNode",
-+                    "long_id": "RemoteNode",
-+                    "operation": "migrate_from",
-+                    "on_node": "virt-142",
-+                },
-+                {
-+                    "id": "dummy8",
-+                    "long_id": "dummy8",
-+                    "operation": "stop",
-+                    "on_node": "virt-143",
-+                },
-+                {
-+                    "id": "dummy8",
-+                    "long_id": "dummy8",
-+                    "operation": "start",
-+                    "on_node": "virt-142",
-+                }
-             ],
--            parsed["node_list"]
-+            utils.get_operations_from_transitions(transitions)
-         )
- 
--        parsed = utils.parse_cman_quorum_info("")
--        self.assertEqual(None, parsed)
--
--        parsed = utils.parse_cman_quorum_info("""\
--Version: 6.2.0
--Config Version: 23
--Cluster Name: cluster66
--Cluster Id: 22265
--Cluster Member: Yes
--Cluster Generation: 3612
--Membership state: Cluster-Member
--Nodes: 3
--Expected votes: 3
--Total votes: 3
--Node votes: 1
--Quorum: 
--Active subsystems: 8
--Flags: 
--Ports Bound: 0 
--Node name: rh66-node2
--Node ID: 2
--Multicast addresses: 239.192.86.80
--Node addresses: 192.168.122.61
-----Votes---
--1 M 3 rh66-node1
--2 M 2 rh66-node2
--3 M 1 rh66-node3
--""")
--        self.assertEqual(None, parsed)
--
--        parsed = utils.parse_cman_quorum_info("""\
--Version: 6.2.0
--Config Version: 23
--Cluster Name: cluster66
--Cluster Id: 22265
--Cluster Member: Yes
--Cluster Generation: 3612
--Membership state: Cluster-Member
--Nodes: 3
--Expected votes: 3
--Total votes: 3
--Node votes: 1
--Quorum: Foo
--Active subsystems: 8
--Flags: 
--Ports Bound: 0 
--Node name: rh66-node2
--Node ID: 2
--Multicast addresses: 239.192.86.80
--Node addresses: 192.168.122.61
-----Votes---
--1 M 3 rh66-node1
--2 M 2 rh66-node2
--3 M 1 rh66-node3
--""")
--        self.assertEqual(None, parsed)
--
--        parsed = utils.parse_cman_quorum_info("""\
--Version: 6.2.0
--Config Version: 23
--Cluster Name: cluster66
--Cluster Id: 22265
--Cluster Member: Yes
--Cluster Generation: 3612
--Membership state: Cluster-Member
--Nodes: 3
--Expected votes: 3
--Total votes: 3
--Node votes: 1
--Quorum: 4
--Active subsystems: 8
--Flags: 
--Ports Bound: 0 
--Node name: rh66-node2
--Node ID: 2
--Multicast addresses: 239.192.86.80
--Node addresses: 192.168.122.61
-----Votes---
--1 M 3 rh66-node1
--2 M Foo rh66-node2
--3 M 1 rh66-node3
--""")
--        self.assertEqual(None, parsed)
--
--    def test_parse_quorumtool_output(self):
--        parsed = utils.parse_quorumtool_output("""\
--Quorum information
--------------------
--Date:             Fri Jan 16 13:03:28 2015
--Quorum provider:  corosync_votequorum
--Nodes:            3
--Node ID:          1
--Ring ID:          19860
--Quorate:          Yes
--
--Votequorum information
------------------------
--Expected votes:   3
--Highest expected: 3
--Total votes:      3
--Quorum:           2
--Flags:            Quorate
-+    def test_get_resources_location_from_operations(self):
-+        cib_dom = self.get_cib_resources()
- 
--Membership information
------------------------
--    Nodeid      Votes    Qdevice Name
--         1          3         NR rh70-node1
--         2          2         NR rh70-node2 (local)
--         3          1         NR rh70-node3
--""")
--        self.assertEqual(True, parsed["quorate"])
--        self.assertEqual(2, parsed["quorum"])
-+        operations = []
-         self.assertEqual(
--            [
--                {"name": "rh70-node1", "votes": 3, "local": False},
--                {"name": "rh70-node2", "votes": 2, "local": True},
--                {"name": "rh70-node3", "votes": 1, "local": False},
--            ],
--            parsed["node_list"]
-+            {},
-+            utils.get_resources_location_from_operations(cib_dom, operations)
-         )
- 
--        parsed = utils.parse_quorumtool_output("""\
--Quorum information
--------------------
--Date:             Fri Jan 16 13:03:35 2015
--Quorum provider:  corosync_votequorum
--Nodes:            1
--Node ID:          1
--Ring ID:          19868
--Quorate:          No
--
--Votequorum information
------------------------
--Expected votes:   3
--Highest expected: 3
--Total votes:      1
--Quorum:           2 Activity blocked
--Flags:            
-+        operations = [
-+            {
-+                "id": "myResource",
-+                "long_id": "myResource",
-+                "operation": "start",
-+                "on_node": "rh7-1",
-+            },
-+        ]
-+        self.assertEqual(
-+            {
-+                'myResource': {
-+                    'id': 'myResource',
-+                    'id_for_constraint': 'myResource',
-+                    'long_id': 'myResource',
-+                    'start_on_node': 'rh7-1',
-+                 },
-+            },
-+            utils.get_resources_location_from_operations(cib_dom, operations)
-+        )
- 
--Membership information
------------------------
--    Nodeid      Votes    Qdevice Name
--             1          1         NR rh70-node1 (local)
--""")
--        self.assertEqual(False, parsed["quorate"])
--        self.assertEqual(2, parsed["quorum"])
-+        operations = [
-+            {
-+                "id": "myResource",
-+                "long_id": "myResource",
-+                "operation": "start",
-+                "on_node": "rh7-1",
-+            },
-+            {
-+                "id": "myResource",
-+                "long_id": "myResource",
-+                "operation": "start",
-+                "on_node": "rh7-2",
-+            },
-+            {
-+                "id": "myResource",
-+                "long_id": "myResource",
-+                "operation": "monitor",
-+                "on_node": "rh7-3",
-+            },
-+            {
-+                "id": "myResource",
-+                "long_id": "myResource",
-+                "operation": "stop",
-+                "on_node": "rh7-3",
-+            },
-+        ]
-         self.assertEqual(
--            [
--                {"name": "rh70-node1", "votes": 1, "local": True},
--            ],
--            parsed["node_list"]
-+            {
-+                'myResource': {
-+                    'id': 'myResource',
-+                    'id_for_constraint': 'myResource',
-+                    'long_id': 'myResource',
-+                    'start_on_node': 'rh7-2',
-+                 },
-+            },
-+            utils.get_resources_location_from_operations(cib_dom, operations)
-         )
- 
--        parsed = utils.parse_quorumtool_output("")
--        self.assertEqual(None, parsed)
-+        operations = [
-+            {
-+                "id": "myResource",
-+                "long_id": "myResource",
-+                "operation": "start",
-+                "on_node": "rh7-1",
-+            },
-+            {
-+                "id": "myClonedResource",
-+                "long_id": "myClonedResource:0",
-+                "operation": "start",
-+                "on_node": "rh7-1",
-+            },
-+            {
-+                "id": "myClonedResource",
-+                "long_id": "myClonedResource:0",
-+                "operation": "start",
-+                "on_node": "rh7-2",
-+            },
-+            {
-+                "id": "myClonedResource",
-+                "long_id": "myClonedResource:1",
-+                "operation": "start",
-+                "on_node": "rh7-3",
-+            },
-+        ]
-+        self.assertEqual(
-+            {
-+                'myResource': {
-+                    'id': 'myResource',
-+                    'id_for_constraint': 'myResource',
-+                    'long_id': 'myResource',
-+                    'start_on_node': 'rh7-1',
-+                 },
-+                'myClonedResource:0': {
-+                    'id': 'myClonedResource',
-+                    'id_for_constraint': 'myClone',
-+                    'long_id': 'myClonedResource:0',
-+                    'start_on_node': 'rh7-2',
-+                 },
-+                'myClonedResource:1': {
-+                    'id': 'myClonedResource',
-+                    'id_for_constraint': 'myClone',
-+                    'long_id': 'myClonedResource:1',
-+                    'start_on_node': 'rh7-3',
-+                 },
-+            },
-+            utils.get_resources_location_from_operations(cib_dom, operations)
-+        )
- 
--        parsed = utils.parse_quorumtool_output("""\
--Quorum information
--------------------
--Date:             Fri Jan 16 13:03:28 2015
--Quorum provider:  corosync_votequorum
--Nodes:            3
--Node ID:          1
--Ring ID:          19860
--Quorate:          Yes
-+        operations = [
-+            {
-+                "id": "myUniqueClonedResource:0",
-+                "long_id": "myUniqueClonedResource:0",
-+                "operation": "start",
-+                "on_node": "rh7-1",
-+            },
-+            {
-+                "id": "myUniqueClonedResource:1",
-+                "long_id": "myUniqueClonedResource:1",
-+                "operation": "monitor",
-+                "on_node": "rh7-2",
-+            },
-+            {
-+                "id": "myUniqueClonedResource:2",
-+                "long_id": "myUniqueClonedResource:2",
-+                "operation": "start",
-+                "on_node": "rh7-3",
-+            },
-+        ]
-+        self.assertEqual(
-+            {
-+                'myUniqueClonedResource:0': {
-+                    'id': 'myUniqueClonedResource:0',
-+                    'id_for_constraint': 'myUniqueClone',
-+                    'long_id': 'myUniqueClonedResource:0',
-+                    'start_on_node': 'rh7-1',
-+                 },
-+                'myUniqueClonedResource:2': {
-+                    'id': 'myUniqueClonedResource:2',
-+                    'id_for_constraint': 'myUniqueClone',
-+                    'long_id': 'myUniqueClonedResource:2',
-+                    'start_on_node': 'rh7-3',
-+                 },
-+            },
-+            utils.get_resources_location_from_operations(cib_dom, operations)
-+        )
- 
--Votequorum information
------------------------
--Expected votes:   3
--Highest expected: 3
--Total votes:      3
--Quorum:           
--Flags:            Quorate
-+        operations = [
-+            {
-+                "id": "myMasteredGroupedResource",
-+                "long_id": "myMasteredGroupedResource:0",
-+                "operation": "start",
-+                "on_node": "rh7-1",
-+            },
-+            {
-+                "id": "myMasteredGroupedResource",
-+                "long_id": "myMasteredGroupedResource:1",
-+                "operation": "demote",
-+                "on_node": "rh7-2",
-+            },
-+            {
-+                "id": "myMasteredGroupedResource",
-+                "long_id": "myMasteredGroupedResource:1",
-+                "operation": "promote",
-+                "on_node": "rh7-3",
-+            },
-+        ]
-+        self.assertEqual(
-+            {
-+                'myMasteredGroupedResource:0': {
-+                    'id': 'myMasteredGroupedResource',
-+                    'id_for_constraint': 'myGroupMaster',
-+                    'long_id': 'myMasteredGroupedResource:0',
-+                    'start_on_node': 'rh7-1',
-+                 },
-+                'myMasteredGroupedResource:1': {
-+                    'id': 'myMasteredGroupedResource',
-+                    'id_for_constraint': 'myGroupMaster',
-+                    'long_id': 'myMasteredGroupedResource:1',
-+                    'promote_on_node': 'rh7-3',
-+                 },
-+            },
-+            utils.get_resources_location_from_operations(cib_dom, operations)
-+        )
- 
--Membership information
------------------------
--    Nodeid      Votes    Qdevice Name
--         1          1         NR rh70-node1 (local)
--         2          1         NR rh70-node2
--         3          1         NR rh70-node3
--""")
--        self.assertEqual(None, parsed)
-+        operations = [
-+            {
-+                "id": "myResource",
-+                "long_id": "myResource",
-+                "operation": "stop",
-+                "on_node": "rh7-1",
-+            },
-+            {
-+                "id": "myResource",
-+                "long_id": "myResource",
-+                "operation": "migrate_to",
-+                "on_node": "rh7-1",
-+            },
-+            {
-+                "id": "myResource",
-+                "long_id": "myResource",
-+                "operation": "migrate_from",
-+                "on_node": "rh7-2",
-+            },
-+        ]
-+        self.assertEqual(
-+            {
-+                "myResource": {
-+                    "id": "myResource",
-+                    "id_for_constraint": "myResource",
-+                    "long_id": "myResource",
-+                    "start_on_node": "rh7-2",
-+                },
-+            },
-+            utils.get_resources_location_from_operations(cib_dom, operations)
-+        )
- 
--        parsed = utils.parse_quorumtool_output("""\
--Quorum information
--------------------
--Date:             Fri Jan 16 13:03:28 2015
--Quorum provider:  corosync_votequorum
--Nodes:            3
--Node ID:          1
--Ring ID:          19860
--Quorate:          Yes
-+    def test_is_int(self):
-+        self.assertTrue(utils.is_int("-999"))
-+        self.assertTrue(utils.is_int("-1"))
-+        self.assertTrue(utils.is_int("0"))
-+        self.assertTrue(utils.is_int("1"))
-+        self.assertTrue(utils.is_int("99999"))
-+        self.assertTrue(utils.is_int(" 99999  "))
-+        self.assertFalse(utils.is_int("0.0"))
-+        self.assertFalse(utils.is_int("-1.0"))
-+        self.assertFalse(utils.is_int("-0.1"))
-+        self.assertFalse(utils.is_int("0.001"))
-+        self.assertFalse(utils.is_int("-999999.1"))
-+        self.assertFalse(utils.is_int("0.0001"))
-+        self.assertFalse(utils.is_int(""))
-+        self.assertFalse(utils.is_int("   "))
-+        self.assertFalse(utils.is_int("A"))
-+        self.assertFalse(utils.is_int("random 15 47 text  "))
- 
--Votequorum information
------------------------
--Expected votes:   3
--Highest expected: 3
--Total votes:      3
--Quorum:           Foo
--Flags:            Quorate
-+    def test_dom_get_node(self):
-+        cib = self.get_cib_with_nodes_minidom()
-+        #assertIsNone is not supported in python 2.6
-+        self.assertTrue(utils.dom_get_node(cib, "non-existing-node") is None)
-+        node = utils.dom_get_node(cib, "rh7-1")
-+        self.assertEqual(node.getAttribute("uname"), "rh7-1")
-+        self.assertEqual(node.getAttribute("id"), "1")
- 
--Membership information
------------------------
--    Nodeid      Votes    Qdevice Name
--         1          1         NR rh70-node1 (local)
--         2          1         NR rh70-node2
--         3          1         NR rh70-node3
--""")
--        self.assertEqual(None, parsed)
-+    def test_dom_prepare_child_element(self):
-+        cib = self.get_cib_with_nodes_minidom()
-+        node = cib.getElementsByTagName("node")[0]
-+        self.assertEqual(len(dom_get_child_elements(node)), 0)
-+        child = utils.dom_prepare_child_element(
-+            node, "utilization", "rh7-1-utilization"
-+        )
-+        self.assertEqual(len(dom_get_child_elements(node)), 1)
-+        self.assertEqual(child, dom_get_child_elements(node)[0])
-+        self.assertEqual(dom_get_child_elements(node)[0].tagName, "utilization")
-+        self.assertEqual(
-+            dom_get_child_elements(node)[0].getAttribute("id"),
-+            "rh7-1-utilization"
-+        )
-+        child2 = utils.dom_prepare_child_element(
-+            node, "utilization", "rh7-1-utilization"
-+        )
-+        self.assertEqual(len(dom_get_child_elements(node)), 1)
-+        self.assertEqual(child, child2)
- 
--        parsed = utils.parse_quorumtool_output("""\
--Quorum information
--------------------
--Date:             Fri Jan 16 13:03:28 2015
--Quorum provider:  corosync_votequorum
--Nodes:            3
--Node ID:          1
--Ring ID:          19860
--Quorate:          Yes
-+    def test_dom_update_nv_pair_add(self):
-+        nv_set = xml.dom.minidom.parseString("<nvset/>").documentElement
-+        utils.dom_update_nv_pair(nv_set, "test_name", "test_val", "prefix-")
-+        self.assertEqual(len(dom_get_child_elements(nv_set)), 1)
-+        pair = dom_get_child_elements(nv_set)[0]
-+        self.assertEqual(pair.getAttribute("name"), "test_name")
-+        self.assertEqual(pair.getAttribute("value"), "test_val")
-+        self.assertEqual(pair.getAttribute("id"), "prefix-test_name")
-+        utils.dom_update_nv_pair(nv_set, "another_name", "value", "prefix2-")
-+        self.assertEqual(len(dom_get_child_elements(nv_set)), 2)
-+        self.assertEqual(pair, dom_get_child_elements(nv_set)[0])
-+        pair = dom_get_child_elements(nv_set)[1]
-+        self.assertEqual(pair.getAttribute("name"), "another_name")
-+        self.assertEqual(pair.getAttribute("value"), "value")
-+        self.assertEqual(pair.getAttribute("id"), "prefix2-another_name")
- 
--Votequorum information
------------------------
--Expected votes:   3
--Highest expected: 3
--Total votes:      3
--Quorum:           2
--Flags:            Quorate
-+    def test_dom_update_nv_pair_update(self):
-+        nv_set = xml.dom.minidom.parseString("""
-+        <nv_set>
-+            <nvpair id="prefix-test_name" name="test_name" value="test_val"/>
-+            <nvpair id="prefix2-another_name" name="another_name" value="value"/>
-+        </nv_set>
-+        """).documentElement
-+        utils.dom_update_nv_pair(nv_set, "test_name", "new_value")
-+        self.assertEqual(len(dom_get_child_elements(nv_set)), 2)
-+        pair1 = dom_get_child_elements(nv_set)[0]
-+        pair2 = dom_get_child_elements(nv_set)[1]
-+        self.assertEqual(pair1.getAttribute("name"), "test_name")
-+        self.assertEqual(pair1.getAttribute("value"), "new_value")
-+        self.assertEqual(pair1.getAttribute("id"), "prefix-test_name")
-+        self.assertEqual(pair2.getAttribute("name"), "another_name")
-+        self.assertEqual(pair2.getAttribute("value"), "value")
-+        self.assertEqual(pair2.getAttribute("id"), "prefix2-another_name")
- 
--Membership information
------------------------
--    Nodeid      Votes    Qdevice Name
--         1          1         NR rh70-node1 (local)
--         2        foo         NR rh70-node2
--         3          1         NR rh70-node3
--""")
--        self.assertEqual(None, parsed)
-+    def test_dom_update_nv_pair_remove(self):
-+        nv_set = xml.dom.minidom.parseString("""
-+        <nv_set>
-+            <nvpair id="prefix-test_name" name="test_name" value="test_val"/>
-+            <nvpair id="prefix2-another_name" name="another_name" value="value"/>
-+        </nv_set>
-+        """).documentElement
-+        utils.dom_update_nv_pair(nv_set, "non_existing_name", "")
-+        self.assertEqual(len(dom_get_child_elements(nv_set)), 2)
-+        utils.dom_update_nv_pair(nv_set, "another_name", "")
-+        self.assertEqual(len(dom_get_child_elements(nv_set)), 1)
-+        pair = dom_get_child_elements(nv_set)[0]
-+        self.assertEqual(pair.getAttribute("name"), "test_name")
-+        self.assertEqual(pair.getAttribute("value"), "test_val")
-+        self.assertEqual(pair.getAttribute("id"), "prefix-test_name")
-+        utils.dom_update_nv_pair(nv_set, "test_name", "")
-+        self.assertEqual(len(dom_get_child_elements(nv_set)), 0)
- 
--    def test_is_node_stop_cause_quorum_loss(self):
--        quorum_info = {
--            "quorate": False,
--        }
-+    def test_convert_args_to_tuples(self):
-+        out = utils.convert_args_to_tuples(
-+            ["invalid_string", "key=value", "key2=val=ue", "k e y= v a l u e "]
-+        )
-         self.assertEqual(
--            False,
--            utils.is_node_stop_cause_quorum_loss(quorum_info, True)
-+            out,
-+            [("key", "value"), ("key2", "val=ue"), ("k e y", " v a l u e ")]
-         )
- 
--        quorum_info = {
--            "quorate": True,
--            "quorum": 1,
--            "node_list": [
--                {"name": "rh70-node3", "votes": 1, "local": False},
--            ],
--        }
--        self.assertEqual(
--            False,
--            utils.is_node_stop_cause_quorum_loss(quorum_info, True)
-+    def test_dom_update_utilization_invalid(self):
-+        #commands writes to stderr
-+        #we want clean test output, so we capture it
-+        tmp_stderr = sys.stderr
-+        sys.stderr = StringIO()
-+
-+        el = xml.dom.minidom.parseString("""
-+        <resource id="test_id"/>
-+        """).documentElement
-+        self.assertRaises(
-+            SystemExit,
-+            utils.dom_update_utilization, el, [("name", "invalid_val")]
-         )
- 
--        quorum_info = {
--            "quorate": True,
--            "quorum": 1,
--            "node_list": [
--                {"name": "rh70-node3", "votes": 1, "local": True},
--            ],
--        }
--        self.assertEqual(
--            True,
--            utils.is_node_stop_cause_quorum_loss(quorum_info, True)
-+        self.assertRaises(
-+            SystemExit,
-+            utils.dom_update_utilization, el, [("name", "0.01")]
-         )
- 
--        quorum_info = {
--            "quorate": True,
--            "quorum": 4,
--            "node_list": [
--                {"name": "rh70-node1", "votes": 3, "local": False},
--                {"name": "rh70-node2", "votes": 2, "local": False},
--                {"name": "rh70-node3", "votes": 1, "local": True},
--            ],
--        }
--        self.assertEqual(
--            False,
--            utils.is_node_stop_cause_quorum_loss(quorum_info, True)
-+        sys.stderr = tmp_stderr
-+
-+    def test_dom_update_utilization_add(self):
-+        el = xml.dom.minidom.parseString("""
-+        <resource id="test_id"/>
-+        """).documentElement
-+        utils.dom_update_utilization(
-+            el, [("name", ""), ("key", "-1"), ("keys", "90")]
-         )
- 
--        quorum_info = {
--            "quorate": True,
--            "quorum": 4,
--            "node_list": [
--                {"name": "rh70-node1", "votes": 3, "local": False},
--                {"name": "rh70-node2", "votes": 2, "local": True},
--                {"name": "rh70-node3", "votes": 1, "local": False},
--            ],
--        }
-+        self.assertEqual(len(dom_get_child_elements(el)), 1)
-+        u = dom_get_child_elements(el)[0]
-+        self.assertEqual(u.tagName, "utilization")
-+        self.assertEqual(u.getAttribute("id"), "test_id-utilization")
-+        self.assertEqual(len(dom_get_child_elements(u)), 2)
-+
-         self.assertEqual(
--            False,
--            utils.is_node_stop_cause_quorum_loss(quorum_info, True)
-+            dom_get_child_elements(u)[0].getAttribute("id"),
-+            "test_id-utilization-key"
-+        )
-+        self.assertEqual(
-+            dom_get_child_elements(u)[0].getAttribute("name"),
-+            "key"
-+        )
-+        self.assertEqual(
-+            dom_get_child_elements(u)[0].getAttribute("value"),
-+            "-1"
-         )
--
--        quorum_info = {
--            "quorate": True,
--            "quorum": 4,
--            "node_list": [
--                {"name": "rh70-node1", "votes": 3, "local": True},
--                {"name": "rh70-node2", "votes": 2, "local": False},
--                {"name": "rh70-node3", "votes": 1, "local": False},
--            ],
--        }
-         self.assertEqual(
--            True,
--            utils.is_node_stop_cause_quorum_loss(quorum_info, True)
-+            dom_get_child_elements(u)[1].getAttribute("id"),
-+            "test_id-utilization-keys"
-         )
--
--
--        quorum_info = {
--            "quorate": True,
--            "quorum": 4,
--            "node_list": [
--                {"name": "rh70-node1", "votes": 3, "local": True},
--                {"name": "rh70-node2", "votes": 2, "local": False},
--                {"name": "rh70-node3", "votes": 1, "local": False},
--            ],
--        }
-         self.assertEqual(
--            False,
--            utils.is_node_stop_cause_quorum_loss(
--                quorum_info, False, ["rh70-node3"]
--            )
-+            dom_get_child_elements(u)[1].getAttribute("name"),
-+            "keys"
-         )
--
--        quorum_info = {
--            "quorate": True,
--            "quorum": 4,
--            "node_list": [
--                {"name": "rh70-node1", "votes": 3, "local": True},
--                {"name": "rh70-node2", "votes": 2, "local": False},
--                {"name": "rh70-node3", "votes": 1, "local": False},
--            ],
--        }
-         self.assertEqual(
--            False,
--            utils.is_node_stop_cause_quorum_loss(
--                quorum_info, False, ["rh70-node2"]
--            )
-+            dom_get_child_elements(u)[1].getAttribute("value"),
-+            "90"
-         )
- 
--        quorum_info = {
--            "quorate": True,
--            "quorum": 4,
--            "node_list": [
--                {"name": "rh70-node1", "votes": 3, "local": True},
--                {"name": "rh70-node2", "votes": 2, "local": False},
--                {"name": "rh70-node3", "votes": 1, "local": False},
--            ],
--        }
--        self.assertEqual(
--            True,
--            utils.is_node_stop_cause_quorum_loss(
--                quorum_info, False, ["rh70-node1"]
--            )
-+    def test_dom_update_utilization_update_remove(self):
-+        el = xml.dom.minidom.parseString("""
-+        <resource id="test_id">
-+            <utilization id="test_id-utilization">
-+                <nvpair id="test_id-utilization-key" name="key" value="-1"/>
-+                <nvpair id="test_id-utilization-keys" name="keys" value="90"/>
-+            </utilization>
-+        </resource>
-+        """).documentElement
-+        utils.dom_update_utilization(
-+            el, [("key", "100"), ("keys", "")]
-         )
- 
--        quorum_info = {
--            "quorate": True,
--            "quorum": 4,
--            "node_list": [
--                {"name": "rh70-node1", "votes": 4, "local": True},
--                {"name": "rh70-node2", "votes": 1, "local": False},
--                {"name": "rh70-node3", "votes": 1, "local": False},
--            ],
--        }
-+        u = dom_get_child_elements(el)[0]
-+        self.assertEqual(len(dom_get_child_elements(u)), 1)
-         self.assertEqual(
--            False,
--            utils.is_node_stop_cause_quorum_loss(
--                quorum_info, False, ["rh70-node2", "rh70-node3"]
--            )
-+            dom_get_child_elements(u)[0].getAttribute("id"),
-+            "test_id-utilization-key"
-         )
--
--        quorum_info = {
--            "quorate": True,
--            "quorum": 4,
--            "node_list": [
--                {"name": "rh70-node1", "votes": 3, "local": True},
--                {"name": "rh70-node2", "votes": 2, "local": False},
--                {"name": "rh70-node3", "votes": 1, "local": False},
--            ],
--        }
-         self.assertEqual(
--            True,
--            utils.is_node_stop_cause_quorum_loss(
--                quorum_info, False, ["rh70-node2", "rh70-node3"]
--            )
-+            dom_get_child_elements(u)[0].getAttribute("name"),
-+            "key"
-         )
--
--    def test_get_operations_from_transitions(self):
--        transitions = utils.parse(rc("transitions01.xml"))
-         self.assertEqual(
--            [
--                {
--                    'id': 'dummy',
--                    'long_id': 'dummy',
--                    'operation': 'stop',
--                    'on_node': 'rh7-3',
--                },
--                {
--                    'id': 'dummy',
--                    'long_id': 'dummy',
--                    'operation': 'start',
--                    'on_node': 'rh7-2',
--                },
--                {
--                    'id': 'd0',
--                    'long_id': 'd0:1',
--                    'operation': 'stop',
--                    'on_node': 'rh7-1',
--                },
--                {
--                    'id': 'd0',
--                    'long_id': 'd0:1',
--                    'operation': 'start',
--                    'on_node': 'rh7-2',
--                },
--                {
--                    'id': 'state',
--                    'long_id': 'state:0',
--                    'operation': 'stop',
--                    'on_node': 'rh7-3',
--                },
--                {
--                    'id': 'state',
--                    'long_id': 'state:0',
--                    'operation': 'start',
--                    'on_node': 'rh7-2',
--                },
--            ],
--            utils.get_operations_from_transitions(transitions)
-+            dom_get_child_elements(u)[0].getAttribute("value"),
-+            "100"
-         )
- 
--        transitions = utils.parse(rc("transitions02.xml"))
--        self.assertEqual(
--            [
--                {
--                    "id": "RemoteNode",
--                    "long_id": "RemoteNode",
--                    "operation": "stop",
--                    "on_node": "virt-143",
--                },
--                {
--                    "id": "RemoteNode",
--                    "long_id": "RemoteNode",
--                    "operation": "migrate_to",
--                    "on_node": "virt-143",
--                },
--                {
--                    "id": "RemoteNode",
--                    "long_id": "RemoteNode",
--                    "operation": "migrate_from",
--                    "on_node": "virt-142",
--                },
--                {
--                    "id": "dummy8",
--                    "long_id": "dummy8",
--                    "operation": "stop",
--                    "on_node": "virt-143",
--                },
--                {
--                    "id": "dummy8",
--                    "long_id": "dummy8",
--                    "operation": "start",
--                    "on_node": "virt-142",
--                }
--            ],
--            utils.get_operations_from_transitions(transitions)
-+    def test_dom_update_meta_attr_add(self):
-+        el = xml.dom.minidom.parseString("""
-+        <resource id="test_id"/>
-+        """).documentElement
-+        utils.dom_update_meta_attr(
-+            el, [("name", ""), ("key", "test"), ("key2", "val")]
-         )
- 
--    def test_get_resources_location_from_operations(self):
--        cib_dom = self.get_cib_resources()
-+        self.assertEqual(len(dom_get_child_elements(el)), 1)
-+        u = dom_get_child_elements(el)[0]
-+        self.assertEqual(u.tagName, "meta_attributes")
-+        self.assertEqual(u.getAttribute("id"), "test_id-meta_attributes")
-+        self.assertEqual(len(dom_get_child_elements(u)), 2)
- 
--        operations = []
-         self.assertEqual(
--            {},
--            utils.get_resources_location_from_operations(cib_dom, operations)
-+            dom_get_child_elements(u)[0].getAttribute("id"),
-+            "test_id-meta_attributes-key"
-         )
--
--        operations = [
--            {
--                "id": "myResource",
--                "long_id": "myResource",
--                "operation": "start",
--                "on_node": "rh7-1",
--            },
--        ]
-         self.assertEqual(
--            {
--                'myResource': {
--                    'id': 'myResource',
--                    'id_for_constraint': 'myResource',
--                    'long_id': 'myResource',
--                    'start_on_node': 'rh7-1',
--                 },
--            },
--            utils.get_resources_location_from_operations(cib_dom, operations)
-+            dom_get_child_elements(u)[0].getAttribute("name"),
-+            "key"
-+        )
-+        self.assertEqual(
-+            dom_get_child_elements(u)[0].getAttribute("value"),
-+            "test"
-         )
--
--        operations = [
--            {
--                "id": "myResource",
--                "long_id": "myResource",
--                "operation": "start",
--                "on_node": "rh7-1",
--            },
--            {
--                "id": "myResource",
--                "long_id": "myResource",
--                "operation": "start",
--                "on_node": "rh7-2",
--            },
--            {
--                "id": "myResource",
--                "long_id": "myResource",
--                "operation": "monitor",
--                "on_node": "rh7-3",
--            },
--            {
--                "id": "myResource",
--                "long_id": "myResource",
--                "operation": "stop",
--                "on_node": "rh7-3",
--            },
--        ]
-         self.assertEqual(
--            {
--                'myResource': {
--                    'id': 'myResource',
--                    'id_for_constraint': 'myResource',
--                    'long_id': 'myResource',
--                    'start_on_node': 'rh7-2',
--                 },
--            },
--            utils.get_resources_location_from_operations(cib_dom, operations)
-+            dom_get_child_elements(u)[1].getAttribute("id"),
-+            "test_id-meta_attributes-key2"
-         )
--
--        operations = [
--            {
--                "id": "myResource",
--                "long_id": "myResource",
--                "operation": "start",
--                "on_node": "rh7-1",
--            },
--            {
--                "id": "myClonedResource",
--                "long_id": "myClonedResource:0",
--                "operation": "start",
--                "on_node": "rh7-1",
--            },
--            {
--                "id": "myClonedResource",
--                "long_id": "myClonedResource:0",
--                "operation": "start",
--                "on_node": "rh7-2",
--            },
--            {
--                "id": "myClonedResource",
--                "long_id": "myClonedResource:1",
--                "operation": "start",
--                "on_node": "rh7-3",
--            },
--        ]
-         self.assertEqual(
--            {
--                'myResource': {
--                    'id': 'myResource',
--                    'id_for_constraint': 'myResource',
--                    'long_id': 'myResource',
--                    'start_on_node': 'rh7-1',
--                 },
--                'myClonedResource:0': {
--                    'id': 'myClonedResource',
--                    'id_for_constraint': 'myClone',
--                    'long_id': 'myClonedResource:0',
--                    'start_on_node': 'rh7-2',
--                 },
--                'myClonedResource:1': {
--                    'id': 'myClonedResource',
--                    'id_for_constraint': 'myClone',
--                    'long_id': 'myClonedResource:1',
--                    'start_on_node': 'rh7-3',
--                 },
--            },
--            utils.get_resources_location_from_operations(cib_dom, operations)
-+            dom_get_child_elements(u)[1].getAttribute("name"),
-+            "key2"
-         )
--
--        operations = [
--            {
--                "id": "myUniqueClonedResource:0",
--                "long_id": "myUniqueClonedResource:0",
--                "operation": "start",
--                "on_node": "rh7-1",
--            },
--            {
--                "id": "myUniqueClonedResource:1",
--                "long_id": "myUniqueClonedResource:1",
--                "operation": "monitor",
--                "on_node": "rh7-2",
--            },
--            {
--                "id": "myUniqueClonedResource:2",
--                "long_id": "myUniqueClonedResource:2",
--                "operation": "start",
--                "on_node": "rh7-3",
--            },
--        ]
-         self.assertEqual(
--            {
--                'myUniqueClonedResource:0': {
--                    'id': 'myUniqueClonedResource:0',
--                    'id_for_constraint': 'myUniqueClone',
--                    'long_id': 'myUniqueClonedResource:0',
--                    'start_on_node': 'rh7-1',
--                 },
--                'myUniqueClonedResource:2': {
--                    'id': 'myUniqueClonedResource:2',
--                    'id_for_constraint': 'myUniqueClone',
--                    'long_id': 'myUniqueClonedResource:2',
--                    'start_on_node': 'rh7-3',
--                 },
--            },
--            utils.get_resources_location_from_operations(cib_dom, operations)
-+            dom_get_child_elements(u)[1].getAttribute("value"),
-+            "val"
-         )
- 
--        operations = [
--            {
--                "id": "myMasteredGroupedResource",
--                "long_id": "myMasteredGroupedResource:0",
--                "operation": "start",
--                "on_node": "rh7-1",
--            },
--            {
--                "id": "myMasteredGroupedResource",
--                "long_id": "myMasteredGroupedResource:1",
--                "operation": "demote",
--                "on_node": "rh7-2",
--            },
--            {
--                "id": "myMasteredGroupedResource",
--                "long_id": "myMasteredGroupedResource:1",
--                "operation": "promote",
--                "on_node": "rh7-3",
--            },
--        ]
-+    def test_dom_update_meta_attr_update_remove(self):
-+        el = xml.dom.minidom.parseString("""
-+        <resource id="test_id">
-+            <meta_attributes id="test_id-utilization">
-+                <nvpair id="test_id-meta_attributes-key" name="key" value="test"/>
-+                <nvpair id="test_id-meta_attributes-key2" name="key2" value="val"/>
-+            </meta_attributes>
-+        </resource>
-+        """).documentElement
-+        utils.dom_update_meta_attr(
-+            el, [("key", "another_val"), ("key2", "")]
-+        )
-+
-+        u = dom_get_child_elements(el)[0]
-+        self.assertEqual(len(dom_get_child_elements(u)), 1)
-         self.assertEqual(
--            {
--                'myMasteredGroupedResource:0': {
--                    'id': 'myMasteredGroupedResource',
--                    'id_for_constraint': 'myGroupMaster',
--                    'long_id': 'myMasteredGroupedResource:0',
--                    'start_on_node': 'rh7-1',
--                 },
--                'myMasteredGroupedResource:1': {
--                    'id': 'myMasteredGroupedResource',
--                    'id_for_constraint': 'myGroupMaster',
--                    'long_id': 'myMasteredGroupedResource:1',
--                    'promote_on_node': 'rh7-3',
--                 },
--            },
--            utils.get_resources_location_from_operations(cib_dom, operations)
-+            dom_get_child_elements(u)[0].getAttribute("id"),
-+            "test_id-meta_attributes-key"
-+        )
-+        self.assertEqual(
-+            dom_get_child_elements(u)[0].getAttribute("name"),
-+            "key"
-+        )
-+        self.assertEqual(
-+            dom_get_child_elements(u)[0].getAttribute("value"),
-+            "another_val"
-         )
- 
--        operations = [
--            {
--                "id": "myResource",
--                "long_id": "myResource",
--                "operation": "stop",
--                "on_node": "rh7-1",
--            },
--            {
--                "id": "myResource",
--                "long_id": "myResource",
--                "operation": "migrate_to",
--                "on_node": "rh7-1",
--            },
--            {
--                "id": "myResource",
--                "long_id": "myResource",
--                "operation": "migrate_from",
--                "on_node": "rh7-2",
-+    def test_get_utilization(self):
-+        el = xml.dom.minidom.parseString("""
-+        <resource id="test_id">
-+            <utilization id="test_id-utilization">
-+                <nvpair id="test_id-utilization-key" name="key" value="-1"/>
-+                <nvpair id="test_id-utilization-keys" name="keys" value="90"/>
-+            </utilization>
-+        </resource>
-+        """).documentElement
-+        self.assertEqual({"key": "-1", "keys": "90"}, utils.get_utilization(el))
-+
-+    def test_get_utilization_str(self):
-+        el = xml.dom.minidom.parseString("""
-+        <resource id="test_id">
-+            <utilization id="test_id-utilization">
-+                <nvpair id="test_id-utilization-key" name="key" value="-1"/>
-+                <nvpair id="test_id-utilization-keys" name="keys" value="90"/>
-+            </utilization>
-+        </resource>
-+        """).documentElement
-+        self.assertEqual("key=-1 keys=90", utils.get_utilization_str(el))
-+
-+    def test_get_cluster_property_from_xml_enum(self):
-+        el = ET.fromstring("""
-+        <parameter name="no-quorum-policy" unique="0">
-+            <shortdesc lang="en">What to do when the cluster does not have quorum</shortdesc>
-+            <content type="enum" default="stop"/>
-+            <longdesc lang="en">What to do when the cluster does not have quorum  Allowed values: stop, freeze, ignore, suicide</longdesc>
-+        </parameter>
-+        """)
-+        expected = {
-+            "name": "no-quorum-policy",
-+            "shortdesc": "What to do when the cluster does not have quorum",
-+            "longdesc": "",
-+            "type": "enum",
-+            "default": "stop",
-+            "enum": ["stop", "freeze", "ignore", "suicide"]
-+        }
-+        self.assertEqual(expected, utils.get_cluster_property_from_xml(el))
-+
-+    def test_get_cluster_property_from_xml(self):
-+        el = ET.fromstring("""
-+        <parameter name="default-resource-stickiness" unique="0">
-+            <shortdesc lang="en"></shortdesc>
-+            <content type="integer" default="0"/>
-+            <longdesc lang="en"></longdesc>
-+        </parameter>
-+        """)
-+        expected = {
-+            "name": "default-resource-stickiness",
-+            "shortdesc": "",
-+            "longdesc": "",
-+            "type": "integer",
-+            "default": "0"
-+        }
-+        self.assertEqual(expected, utils.get_cluster_property_from_xml(el))
-+
-+    def test_get_cluster_property_default(self):
-+        definition = {
-+            "default-resource-stickiness": {
-+                "name": "default-resource-stickiness",
-+                "shortdesc": "",
-+                "longdesc": "",
-+                "type": "integer",
-+                "default": "0",
-+                "source": "pengine"
-             },
--        ]
--        self.assertEqual(
--            {
--                "myResource": {
--                    "id": "myResource",
--                    "id_for_constraint": "myResource",
--                    "long_id": "myResource",
--                    "start_on_node": "rh7-2",
--                },
-+            "no-quorum-policy": {
-+                "name": "no-quorum-policy",
-+                "shortdesc": "What to do when the cluster does not have quorum",
-+                "longdesc": "What to do when the cluster does not have quorum  Allowed values: stop, freeze, ignore, suicide",
-+                "type": "enum",
-+                "default": "stop",
-+                "enum": ["stop", "freeze", "ignore", "suicide"],
-+                "source": "pengine"
-             },
--            utils.get_resources_location_from_operations(cib_dom, operations)
-+            "enable-acl": {
-+                "name": "enable-acl",
-+                "shortdesc": "Enable CIB ACL",
-+                "longdesc": "Enable CIB ACL",
-+                "type": "boolean",
-+                "default": "false",
-+                "source": "cib"
-+            }
-+        }
-+        self.assertEqual(
-+            utils.get_cluster_property_default(
-+                definition, "default-resource-stickiness"
-+            ),
-+            "0"
-         )
--
--    def test_is_int(self):
--        self.assertTrue(utils.is_int("-999"))
--        self.assertTrue(utils.is_int("-1"))
--        self.assertTrue(utils.is_int("0"))
--        self.assertTrue(utils.is_int("1"))
--        self.assertTrue(utils.is_int("99999"))
--        self.assertTrue(utils.is_int(" 99999  "))
--        self.assertFalse(utils.is_int("0.0"))
--        self.assertFalse(utils.is_int("-1.0"))
--        self.assertFalse(utils.is_int("-0.1"))
--        self.assertFalse(utils.is_int("0.001"))
--        self.assertFalse(utils.is_int("-999999.1"))
--        self.assertFalse(utils.is_int("0.0001"))
--        self.assertFalse(utils.is_int(""))
--        self.assertFalse(utils.is_int("   "))
--        self.assertFalse(utils.is_int("A"))
--        self.assertFalse(utils.is_int("random 15 47 text  "))
--
--    def test_dom_get_node(self):
--        cib = self.get_cib_with_nodes_minidom()
--        #assertIsNone is not supported in python 2.6
--        self.assertTrue(utils.dom_get_node(cib, "non-existing-node") is None)
--        node = utils.dom_get_node(cib, "rh7-1")
--        self.assertEqual(node.getAttribute("uname"), "rh7-1")
--        self.assertEqual(node.getAttribute("id"), "1")
--
--    def test_dom_prepare_child_element(self):
--        cib = self.get_cib_with_nodes_minidom()
--        node = cib.getElementsByTagName("node")[0]
--        self.assertEqual(len(dom_get_child_elements(node)), 0)
--        child = utils.dom_prepare_child_element(
--            node, "utilization", "rh7-1-utilization"
-+        self.assertEqual(
-+            utils.get_cluster_property_default(definition, "no-quorum-policy"),
-+            "stop"
-         )
--        self.assertEqual(len(dom_get_child_elements(node)), 1)
--        self.assertEqual(child, dom_get_child_elements(node)[0])
--        self.assertEqual(dom_get_child_elements(node)[0].tagName, "utilization")
-         self.assertEqual(
--            dom_get_child_elements(node)[0].getAttribute("id"),
--            "rh7-1-utilization"
-+            utils.get_cluster_property_default(definition, "enable-acl"),
-+            "false"
-         )
--        child2 = utils.dom_prepare_child_element(
--            node, "utilization", "rh7-1-utilization"
-+        self.assertRaises(
-+            utils.UnknownPropertyException,
-+            utils.get_cluster_property_default, definition, "non-existing"
-         )
--        self.assertEqual(len(dom_get_child_elements(node)), 1)
--        self.assertEqual(child, child2)
- 
--    def test_dom_update_nv_pair_add(self):
--        nv_set = xml.dom.minidom.parseString("<nvset/>").documentElement
--        utils.dom_update_nv_pair(nv_set, "test_name", "test_val", "prefix-")
--        self.assertEqual(len(dom_get_child_elements(nv_set)), 1)
--        pair = dom_get_child_elements(nv_set)[0]
--        self.assertEqual(pair.getAttribute("name"), "test_name")
--        self.assertEqual(pair.getAttribute("value"), "test_val")
--        self.assertEqual(pair.getAttribute("id"), "prefix-test_name")
--        utils.dom_update_nv_pair(nv_set, "another_name", "value", "prefix2-")
--        self.assertEqual(len(dom_get_child_elements(nv_set)), 2)
--        self.assertEqual(pair, dom_get_child_elements(nv_set)[0])
--        pair = dom_get_child_elements(nv_set)[1]
--        self.assertEqual(pair.getAttribute("name"), "another_name")
--        self.assertEqual(pair.getAttribute("value"), "value")
--        self.assertEqual(pair.getAttribute("id"), "prefix2-another_name")
-+    def test_is_valid_cib_value_unknown_type(self):
-+        # should be always true
-+        self.assertTrue(utils.is_valid_cib_value("unknown", "test"))
-+        self.assertTrue(utils.is_valid_cib_value("string", "string value"))
- 
--    def test_dom_update_nv_pair_update(self):
--        nv_set = xml.dom.minidom.parseString("""
--        <nv_set>
--            <nvpair id="prefix-test_name" name="test_name" value="test_val"/>
--            <nvpair id="prefix2-another_name" name="another_name" value="value"/>
--        </nv_set>
--        """).documentElement
--        utils.dom_update_nv_pair(nv_set, "test_name", "new_value")
--        self.assertEqual(len(dom_get_child_elements(nv_set)), 2)
--        pair1 = dom_get_child_elements(nv_set)[0]
--        pair2 = dom_get_child_elements(nv_set)[1]
--        self.assertEqual(pair1.getAttribute("name"), "test_name")
--        self.assertEqual(pair1.getAttribute("value"), "new_value")
--        self.assertEqual(pair1.getAttribute("id"), "prefix-test_name")
--        self.assertEqual(pair2.getAttribute("name"), "another_name")
--        self.assertEqual(pair2.getAttribute("value"), "value")
--        self.assertEqual(pair2.getAttribute("id"), "prefix2-another_name")
-+    def test_is_valid_cib_value_integer(self):
-+        self.assertTrue(utils.is_valid_cib_value("integer", "0"))
-+        self.assertTrue(utils.is_valid_cib_value("integer", "42"))
-+        self.assertTrue(utils.is_valid_cib_value("integer", "-90"))
-+        self.assertTrue(utils.is_valid_cib_value("integer", "+90"))
-+        self.assertTrue(utils.is_valid_cib_value("integer", "INFINITY"))
-+        self.assertTrue(utils.is_valid_cib_value("integer", "-INFINITY"))
-+        self.assertTrue(utils.is_valid_cib_value("integer", "+INFINITY"))
-+        self.assertFalse(utils.is_valid_cib_value("integer", "0.0"))
-+        self.assertFalse(utils.is_valid_cib_value("integer", "-10.9"))
-+        self.assertFalse(utils.is_valid_cib_value("integer", "string"))
- 
--    def test_dom_update_nv_pair_remove(self):
--        nv_set = xml.dom.minidom.parseString("""
--        <nv_set>
--            <nvpair id="prefix-test_name" name="test_name" value="test_val"/>
--            <nvpair id="prefix2-another_name" name="another_name" value="value"/>
--        </nv_set>
--        """).documentElement
--        utils.dom_update_nv_pair(nv_set, "non_existing_name", "")
--        self.assertEqual(len(dom_get_child_elements(nv_set)), 2)
--        utils.dom_update_nv_pair(nv_set, "another_name", "")
--        self.assertEqual(len(dom_get_child_elements(nv_set)), 1)
--        pair = dom_get_child_elements(nv_set)[0]
--        self.assertEqual(pair.getAttribute("name"), "test_name")
--        self.assertEqual(pair.getAttribute("value"), "test_val")
--        self.assertEqual(pair.getAttribute("id"), "prefix-test_name")
--        utils.dom_update_nv_pair(nv_set, "test_name", "")
--        self.assertEqual(len(dom_get_child_elements(nv_set)), 0)
-+    def test_is_valid_cib_value_enum(self):
-+        self.assertTrue(
-+            utils.is_valid_cib_value("enum", "this", ["another", "this", "1"])
-+        )
-+        self.assertFalse(
-+            utils.is_valid_cib_value("enum", "this", ["another", "this_not"])
-+        )
-+        self.assertFalse(utils.is_valid_cib_value("enum", "this", []))
-+        self.assertFalse(utils.is_valid_cib_value("enum", "this"))
- 
--    def test_convert_args_to_tuples(self):
--        out = utils.convert_args_to_tuples(
--            ["invalid_string", "key=value", "key2=val=ue", "k e y= v a l u e "]
-+    def test_is_valid_cib_value_boolean(self):
-+        self.assertTrue(utils.is_valid_cib_value("boolean", "true"))
-+        self.assertTrue(utils.is_valid_cib_value("boolean", "TrUe"))
-+        self.assertTrue(utils.is_valid_cib_value("boolean", "TRUE"))
-+        self.assertTrue(utils.is_valid_cib_value("boolean", "yes"))
-+        self.assertTrue(utils.is_valid_cib_value("boolean", "on"))
-+        self.assertTrue(utils.is_valid_cib_value("boolean", "y"))
-+        self.assertTrue(utils.is_valid_cib_value("boolean", "Y"))
-+        self.assertTrue(utils.is_valid_cib_value("boolean", "1"))
-+        self.assertTrue(utils.is_valid_cib_value("boolean", "false"))
-+        self.assertTrue(utils.is_valid_cib_value("boolean", "FaLse"))
-+        self.assertTrue(utils.is_valid_cib_value("boolean", "FALSE"))
-+        self.assertTrue(utils.is_valid_cib_value("boolean", "off"))
-+        self.assertTrue(utils.is_valid_cib_value("boolean", "no"))
-+        self.assertTrue(utils.is_valid_cib_value("boolean", "N"))
-+        self.assertTrue(utils.is_valid_cib_value("boolean", "n"))
-+        self.assertTrue(utils.is_valid_cib_value("boolean", "0"))
-+        self.assertFalse(utils.is_valid_cib_value("boolean", "-1"))
-+        self.assertFalse(utils.is_valid_cib_value("boolean", "not"))
-+        self.assertFalse(utils.is_valid_cib_value("boolean", "random_string"))
-+        self.assertFalse(utils.is_valid_cib_value("boolean", "truth"))
-+
-+    def test_is_valid_cib_value_time(self):
-+        self.assertTrue(utils.is_valid_cib_value("time", "10"))
-+        self.assertTrue(utils.is_valid_cib_value("time", "0"))
-+        self.assertTrue(utils.is_valid_cib_value("time", "9s"))
-+        self.assertTrue(utils.is_valid_cib_value("time", "10sec"))
-+        self.assertTrue(utils.is_valid_cib_value("time", "10min"))
-+        self.assertTrue(utils.is_valid_cib_value("time", "10m"))
-+        self.assertTrue(utils.is_valid_cib_value("time", "10h"))
-+        self.assertTrue(utils.is_valid_cib_value("time", "10hr"))
-+        self.assertFalse(utils.is_valid_cib_value("time", "5.2"))
-+        self.assertFalse(utils.is_valid_cib_value("time", "-10"))
-+        self.assertFalse(utils.is_valid_cib_value("time", "10m 2s"))
-+        self.assertFalse(utils.is_valid_cib_value("time", "hour"))
-+        self.assertFalse(utils.is_valid_cib_value("time", "day"))
-+
-+    def test_validate_cluster_property(self):
-+        definition = {
-+            "default-resource-stickiness": {
-+                "name": "default-resource-stickiness",
-+                "shortdesc": "",
-+                "longdesc": "",
-+                "type": "integer",
-+                "default": "0",
-+                "source": "pengine"
-+            },
-+            "no-quorum-policy": {
-+                "name": "no-quorum-policy",
-+                "shortdesc": "What to do when the cluster does not have quorum",
-+                "longdesc": "What to do when the cluster does not have quorum  Allowed values: stop, freeze, ignore, suicide",
-+                "type": "enum",
-+                "default": "stop",
-+                "enum": ["stop", "freeze", "ignore", "suicide"],
-+                "source": "pengine"
-+            },
-+            "enable-acl": {
-+                "name": "enable-acl",
-+                "shortdesc": "Enable CIB ACL",
-+                "longdesc": "Enable CIB ACL",
-+                "type": "boolean",
-+                "default": "false",
-+                "source": "cib"
-+            }
-+        }
-+        self.assertTrue(utils.is_valid_cluster_property(
-+            definition, "default-resource-stickiness", "10"
-+        ))
-+        self.assertTrue(utils.is_valid_cluster_property(
-+            definition, "default-resource-stickiness", "-1"
-+        ))
-+        self.assertTrue(utils.is_valid_cluster_property(
-+            definition, "no-quorum-policy", "freeze"
-+        ))
-+        self.assertTrue(utils.is_valid_cluster_property(
-+            definition, "no-quorum-policy", "suicide"
-+        ))
-+        self.assertTrue(utils.is_valid_cluster_property(
-+            definition, "enable-acl", "true"
-+        ))
-+        self.assertTrue(utils.is_valid_cluster_property(
-+            definition, "enable-acl", "false"
-+        ))
-+        self.assertTrue(utils.is_valid_cluster_property(
-+            definition, "enable-acl", "on"
-+        ))
-+        self.assertTrue(utils.is_valid_cluster_property(
-+            definition, "enable-acl", "OFF"
-+        ))
-+        self.assertFalse(utils.is_valid_cluster_property(
-+            definition, "default-resource-stickiness", "test"
-+        ))
-+        self.assertFalse(utils.is_valid_cluster_property(
-+            definition, "default-resource-stickiness", "1.2"
-+        ))
-+        self.assertFalse(utils.is_valid_cluster_property(
-+            definition, "no-quorum-policy", "invalid"
-+        ))
-+        self.assertFalse(utils.is_valid_cluster_property(
-+            definition, "enable-acl", "not"
-+        ))
-+        self.assertRaises(
-+            utils.UnknownPropertyException,
-+            utils.is_valid_cluster_property, definition, "unknown", "value"
-         )
--        self.assertEqual(
--            out,
--            [("key", "value"), ("key2", "val=ue"), ("k e y", " v a l u e ")]
-+
-+    def assert_element_id(self, node, node_id):
-+        self.assertTrue(
-+            isinstance(node, xml.dom.minidom.Element),
-+            "element with id '%s' not found" % node_id
-         )
-+        self.assertEqual(node.getAttribute("id"), node_id)
- 
--    def test_dom_update_utilization_invalid(self):
--        #commands writes to stderr
--        #we want clean test output, so we capture it
--        tmp_stderr = sys.stderr
--        sys.stderr = StringIO()
- 
--        el = xml.dom.minidom.parseString("""
--        <resource id="test_id"/>
--        """).documentElement
--        self.assertRaises(
--            SystemExit,
--            utils.dom_update_utilization, el, [("name", "invalid_val")]
--        )
-+class RunParallelTest(unittest.TestCase):
-+    def fixture_create_worker(self, log, name, sleepSeconds=0):
-+        def worker():
-+            sleep(sleepSeconds)
-+            log.append(name)
-+        return worker
- 
--        self.assertRaises(
--            SystemExit,
--            utils.dom_update_utilization, el, [("name", "0.01")]
-+    def test_run_all_workers(self):
-+        log = []
-+        utils.run_parallel(
-+            [
-+                self.fixture_create_worker(log, 'first'),
-+                self.fixture_create_worker(log, 'second'),
-+            ],
-+            wait_seconds=.1
-         )
- 
--        sys.stderr = tmp_stderr
-+        self.assertEqual(log, ['first', 'second'])
- 
--    def test_dom_update_utilization_add(self):
--        el = xml.dom.minidom.parseString("""
--        <resource id="test_id"/>
--        """).documentElement
--        utils.dom_update_utilization(
--            el, [("name", ""), ("key", "-1"), ("keys", "90")]
-+    def test_wait_for_slower_workers(self):
-+        log = []
-+        utils.run_parallel(
-+            [
-+                self.fixture_create_worker(log, 'first', .03),
-+                self.fixture_create_worker(log, 'second'),
-+            ],
-+            wait_seconds=.01
-         )
- 
--        self.assertEqual(len(dom_get_child_elements(el)), 1)
--        u = dom_get_child_elements(el)[0]
--        self.assertEqual(u.tagName, "utilization")
--        self.assertEqual(u.getAttribute("id"), "test_id-utilization")
--        self.assertEqual(len(dom_get_child_elements(u)), 2)
-+        self.assertEqual(log, ['second', 'first'])
-+
- 
-+class PrepareNodeNamesTest(unittest.TestCase):
-+    def test_return_original_when_is_in_pacemaker_nodes(self):
-+        node = 'test'
-         self.assertEqual(
--            dom_get_child_elements(u)[0].getAttribute("id"),
--            "test_id-utilization-key"
-+            node,
-+            utils.prepare_node_name(node, {1: node}, {})
-         )
-+
-+    def test_return_original_when_is_not_in_corosync_nodes(self):
-+        node = 'test'
-         self.assertEqual(
--            dom_get_child_elements(u)[0].getAttribute("name"),
--            "key"
-+            node,
-+            utils.prepare_node_name(node, {}, {})
-         )
-+
-+    def test_return_original_when_corosync_id_not_in_pacemaker(self):
-+        node = 'test'
-         self.assertEqual(
--            dom_get_child_elements(u)[0].getAttribute("value"),
--            "-1"
-+            node,
-+            utils.prepare_node_name(node, {}, {1: node})
-         )
-+
-+    def test_return_modified_name(self):
-+        node = 'test'
-         self.assertEqual(
--            dom_get_child_elements(u)[1].getAttribute("id"),
--            "test_id-utilization-keys"
-+            'another (test)',
-+            utils.prepare_node_name(node, {1: 'another'}, {1: node})
-         )
-+
-+    def test_return_modified_name_with_pm_null_case(self):
-+        node = 'test'
-         self.assertEqual(
--            dom_get_child_elements(u)[1].getAttribute("name"),
--            "keys"
-+            '*Unknown* (test)',
-+            utils.prepare_node_name(node, {1: '(null)'}, {1: node})
-+        )
-+
-+
-+class NodeActionTaskTest(unittest.TestCase):
-+    def test_can_run_action(self):
-+        def action(node, arg, kwarg=None):
-+            return (0, ':'.join([node, arg, kwarg]))
-+
-+        report_list = []
-+        def report(node, returncode, output):
-+            report_list.append('|'.join([node, str(returncode), output]))
-+
-+        task = utils.create_task(report, action, 'node', 'arg', kwarg='kwarg')
-+        task()
-+
-+        self.assertEqual(['node|0|node:arg:kwarg'], report_list)
-+
-+
-+class ParseCmanQuorumInfoTest(unittest.TestCase):
-+    def test_error_empty_string(self):
-+        parsed = utils.parse_cman_quorum_info("")
-+        self.assertEqual(None, parsed)
-+
-+    def test_quorate_no_qdevice(self):
-+        parsed = utils.parse_cman_quorum_info("""\
-+Version: 6.2.0
-+Config Version: 23
-+Cluster Name: cluster66
-+Cluster Id: 22265
-+Cluster Member: Yes
-+Cluster Generation: 3612
-+Membership state: Cluster-Member
-+Nodes: 3
-+Expected votes: 3
-+Total votes: 3
-+Node votes: 1
-+Quorum: 2 
-+Active subsystems: 8
-+Flags: 
-+Ports Bound: 0 
-+Node name: rh66-node2
-+Node ID: 2
-+Multicast addresses: 239.192.86.80
-+Node addresses: 192.168.122.61
-+---Votes---
-+1 M 3 rh66-node1
-+2 M 2 rh66-node2
-+3 M 1 rh66-node3
-+""")
-+        self.assertEqual(True, parsed["quorate"])
-+        self.assertEqual(2, parsed["quorum"])
-+        self.assertEqual(
-+            [
-+                {"name": "rh66-node1", "votes": 3, "local": False},
-+                {"name": "rh66-node2", "votes": 2, "local": True},
-+                {"name": "rh66-node3", "votes": 1, "local": False},
-+            ],
-+            parsed["node_list"]
-         )
-+        self.assertEqual([], parsed["qdevice_list"])
-+
-+    def test_no_quorate_no_qdevice(self):
-+        parsed = utils.parse_cman_quorum_info("""\
-+Version: 6.2.0
-+Config Version: 23
-+Cluster Name: cluster66
-+Cluster Id: 22265
-+Cluster Member: Yes
-+Cluster Generation: 3612
-+Membership state: Cluster-Member
-+Nodes: 3
-+Expected votes: 3
-+Total votes: 3
-+Node votes: 1
-+Quorum: 2 Activity blocked
-+Active subsystems: 8
-+Flags: 
-+Ports Bound: 0 
-+Node name: rh66-node1
-+Node ID: 1
-+Multicast addresses: 239.192.86.80
-+Node addresses: 192.168.122.61
-+---Votes---
-+1 M 3 rh66-node1
-+2 X 2 rh66-node2
-+3 X 1 rh66-node3
-+""")
-+        self.assertEqual(False, parsed["quorate"])
-+        self.assertEqual(2, parsed["quorum"])
-         self.assertEqual(
--            dom_get_child_elements(u)[1].getAttribute("value"),
--            "90"
-+            [
-+                {"name": "rh66-node1", "votes": 3, "local": True},
-+            ],
-+            parsed["node_list"]
-         )
-+        self.assertEqual([], parsed["qdevice_list"])
-+
-+    def test_error_missing_quorum(self):
-+        parsed = utils.parse_cman_quorum_info("""\
-+Version: 6.2.0
-+Config Version: 23
-+Cluster Name: cluster66
-+Cluster Id: 22265
-+Cluster Member: Yes
-+Cluster Generation: 3612
-+Membership state: Cluster-Member
-+Nodes: 3
-+Expected votes: 3
-+Total votes: 3
-+Node votes: 1
-+Quorum: 
-+Active subsystems: 8
-+Flags: 
-+Ports Bound: 0 
-+Node name: rh66-node2
-+Node ID: 2
-+Multicast addresses: 239.192.86.80
-+Node addresses: 192.168.122.61
-+---Votes---
-+1 M 3 rh66-node1
-+2 M 2 rh66-node2
-+3 M 1 rh66-node3
-+""")
-+        self.assertEqual(None, parsed)
-+
-+    def test_error_quorum_garbage(self):
-+        parsed = utils.parse_cman_quorum_info("""\
-+Version: 6.2.0
-+Config Version: 23
-+Cluster Name: cluster66
-+Cluster Id: 22265
-+Cluster Member: Yes
-+Cluster Generation: 3612
-+Membership state: Cluster-Member
-+Nodes: 3
-+Expected votes: 3
-+Total votes: 3
-+Node votes: 1
-+Quorum: Foo
-+Active subsystems: 8
-+Flags: 
-+Ports Bound: 0 
-+Node name: rh66-node2
-+Node ID: 2
-+Multicast addresses: 239.192.86.80
-+Node addresses: 192.168.122.61
-+---Votes---
-+1 M 3 rh66-node1
-+2 M 2 rh66-node2
-+3 M 1 rh66-node3
-+""")
-+        self.assertEqual(None, parsed)
-+
-+    def test_error_node_votes_garbage(self):
-+        parsed = utils.parse_cman_quorum_info("""\
-+Version: 6.2.0
-+Config Version: 23
-+Cluster Name: cluster66
-+Cluster Id: 22265
-+Cluster Member: Yes
-+Cluster Generation: 3612
-+Membership state: Cluster-Member
-+Nodes: 3
-+Expected votes: 3
-+Total votes: 3
-+Node votes: 1
-+Quorum: 4
-+Active subsystems: 8
-+Flags: 
-+Ports Bound: 0 
-+Node name: rh66-node2
-+Node ID: 2
-+Multicast addresses: 239.192.86.80
-+Node addresses: 192.168.122.61
-+---Votes---
-+1 M 3 rh66-node1
-+2 M Foo rh66-node2
-+3 M 1 rh66-node3
-+""")
-+        self.assertEqual(None, parsed)
- 
--    def test_dom_update_utilization_update_remove(self):
--        el = xml.dom.minidom.parseString("""
--        <resource id="test_id">
--            <utilization id="test_id-utilization">
--                <nvpair id="test_id-utilization-key" name="key" value="-1"/>
--                <nvpair id="test_id-utilization-keys" name="keys" value="90"/>
--            </utilization>
--        </resource>
--        """).documentElement
--        utils.dom_update_utilization(
--            el, [("key", "100"), ("keys", "")]
--        )
- 
--        u = dom_get_child_elements(el)[0]
--        self.assertEqual(len(dom_get_child_elements(u)), 1)
--        self.assertEqual(
--            dom_get_child_elements(u)[0].getAttribute("id"),
--            "test_id-utilization-key"
--        )
--        self.assertEqual(
--            dom_get_child_elements(u)[0].getAttribute("name"),
--            "key"
--        )
--        self.assertEqual(
--            dom_get_child_elements(u)[0].getAttribute("value"),
--            "100"
--        )
-+class ParseQuorumtoolOutputTest(unittest.TestCase):
-+    def test_error_empty_string(self):
-+        parsed = utils.parse_quorumtool_output("")
-+        self.assertEqual(None, parsed)
- 
--    def test_dom_update_meta_attr_add(self):
--        el = xml.dom.minidom.parseString("""
--        <resource id="test_id"/>
--        """).documentElement
--        utils.dom_update_meta_attr(
--            el, [("name", ""), ("key", "test"), ("key2", "val")]
--        )
-+    def test_quorate_no_qdevice(self):
-+        parsed = utils.parse_quorumtool_output("""\
-+Quorum information
-+------------------
-+Date:             Fri Jan 16 13:03:28 2015
-+Quorum provider:  corosync_votequorum
-+Nodes:            3
-+Node ID:          1
-+Ring ID:          19860
-+Quorate:          Yes
- 
--        self.assertEqual(len(dom_get_child_elements(el)), 1)
--        u = dom_get_child_elements(el)[0]
--        self.assertEqual(u.tagName, "meta_attributes")
--        self.assertEqual(u.getAttribute("id"), "test_id-meta_attributes")
--        self.assertEqual(len(dom_get_child_elements(u)), 2)
-+Votequorum information
-+----------------------
-+Expected votes:   3
-+Highest expected: 3
-+Total votes:      3
-+Quorum:           2
-+Flags:            Quorate
- 
-+Membership information
-+----------------------
-+    Nodeid      Votes    Qdevice Name
-+         1          3         NR rh70-node1
-+         2          2         NR rh70-node2 (local)
-+         3          1         NR rh70-node3
-+""")
-+        self.assertEqual(True, parsed["quorate"])
-+        self.assertEqual(2, parsed["quorum"])
-         self.assertEqual(
--            dom_get_child_elements(u)[0].getAttribute("id"),
--            "test_id-meta_attributes-key"
--        )
--        self.assertEqual(
--            dom_get_child_elements(u)[0].getAttribute("name"),
--            "key"
--        )
--        self.assertEqual(
--            dom_get_child_elements(u)[0].getAttribute("value"),
--            "test"
--        )
--        self.assertEqual(
--            dom_get_child_elements(u)[1].getAttribute("id"),
--            "test_id-meta_attributes-key2"
-+            [
-+                {"name": "rh70-node1", "votes": 3, "local": False},
-+                {"name": "rh70-node2", "votes": 2, "local": True},
-+                {"name": "rh70-node3", "votes": 1, "local": False},
-+            ],
-+            parsed["node_list"]
-         )
-+        self.assertEqual([], parsed["qdevice_list"])
-+
-+    def test_quorate_with_qdevice(self):
-+        parsed = utils.parse_quorumtool_output("""\
-+Quorum information
-+------------------
-+Date:             Fri Jan 16 13:03:28 2015
-+Quorum provider:  corosync_votequorum
-+Nodes:            3
-+Node ID:          1
-+Ring ID:          19860
-+Quorate:          Yes
-+
-+Votequorum information
-+----------------------
-+Expected votes:   10
-+Highest expected: 10
-+Total votes:      10
-+Quorum:           6
-+Flags:            Quorate Qdevice
-+
-+Membership information
-+----------------------
-+    Nodeid      Votes    Qdevice Name
-+         1          3    A,V,MNW rh70-node1
-+         2          2    A,V,MNW rh70-node2 (local)
-+         3          1    A,V,MNW rh70-node3
-+         0          4            Qdevice
-+""")
-+        self.assertEqual(True, parsed["quorate"])
-+        self.assertEqual(6, parsed["quorum"])
-         self.assertEqual(
--            dom_get_child_elements(u)[1].getAttribute("name"),
--            "key2"
-+            [
-+                {"name": "rh70-node1", "votes": 3, "local": False},
-+                {"name": "rh70-node2", "votes": 2, "local": True},
-+                {"name": "rh70-node3", "votes": 1, "local": False},
-+            ],
-+            parsed["node_list"]
-         )
-         self.assertEqual(
--            dom_get_child_elements(u)[1].getAttribute("value"),
--            "val"
-+            [
-+                {"name": "Qdevice", "votes": 4, "local": False},
-+            ],
-+            parsed["qdevice_list"]
-         )
- 
--    def test_dom_update_meta_attr_update_remove(self):
--        el = xml.dom.minidom.parseString("""
--        <resource id="test_id">
--            <meta_attributes id="test_id-utilization">
--                <nvpair id="test_id-meta_attributes-key" name="key" value="test"/>
--                <nvpair id="test_id-meta_attributes-key2" name="key2" value="val"/>
--            </meta_attributes>
--        </resource>
--        """).documentElement
--        utils.dom_update_meta_attr(
--            el, [("key", "another_val"), ("key2", "")]
--        )
-+    def test_quorate_with_qdevice_lost(self):
-+        parsed = utils.parse_quorumtool_output("""\
-+Quorum information
-+------------------
-+Date:             Fri Jan 16 13:03:28 2015
-+Quorum provider:  corosync_votequorum
-+Nodes:            3
-+Node ID:          1
-+Ring ID:          19860
-+Quorate:          Yes
- 
--        u = dom_get_child_elements(el)[0]
--        self.assertEqual(len(dom_get_child_elements(u)), 1)
--        self.assertEqual(
--            dom_get_child_elements(u)[0].getAttribute("id"),
--            "test_id-meta_attributes-key"
--        )
-+Votequorum information
-+----------------------
-+Expected votes:   10
-+Highest expected: 10
-+Total votes:      6
-+Quorum:           6
-+Flags:            Quorate Qdevice
-+
-+Membership information
-+----------------------
-+    Nodeid      Votes    Qdevice Name
-+         1          3   NA,V,MNW rh70-node1
-+         2          2   NA,V,MNW rh70-node2 (local)
-+         3          1   NA,V,MNW rh70-node3
-+         0          0            Qdevice (votes 4)
-+""")
-+        self.assertEqual(True, parsed["quorate"])
-+        self.assertEqual(6, parsed["quorum"])
-         self.assertEqual(
--            dom_get_child_elements(u)[0].getAttribute("name"),
--            "key"
-+            [
-+                {"name": "rh70-node1", "votes": 3, "local": False},
-+                {"name": "rh70-node2", "votes": 2, "local": True},
-+                {"name": "rh70-node3", "votes": 1, "local": False},
-+            ],
-+            parsed["node_list"]
-         )
-         self.assertEqual(
--            dom_get_child_elements(u)[0].getAttribute("value"),
--            "another_val"
-+            [
-+                {"name": "Qdevice", "votes": 0, "local": False},
-+            ],
-+            parsed["qdevice_list"]
-         )
- 
--    def test_get_utilization(self):
--        el = xml.dom.minidom.parseString("""
--        <resource id="test_id">
--            <utilization id="test_id-utilization">
--                <nvpair id="test_id-utilization-key" name="key" value="-1"/>
--                <nvpair id="test_id-utilization-keys" name="keys" value="90"/>
--            </utilization>
--        </resource>
--        """).documentElement
--        self.assertEqual({"key": "-1", "keys": "90"}, utils.get_utilization(el))
--
--    def test_get_utilization_str(self):
--        el = xml.dom.minidom.parseString("""
--        <resource id="test_id">
--            <utilization id="test_id-utilization">
--                <nvpair id="test_id-utilization-key" name="key" value="-1"/>
--                <nvpair id="test_id-utilization-keys" name="keys" value="90"/>
--            </utilization>
--        </resource>
--        """).documentElement
--        self.assertEqual("key=-1 keys=90", utils.get_utilization_str(el))
--
--    def test_get_cluster_property_from_xml_enum(self):
--        el = ET.fromstring("""
--        <parameter name="no-quorum-policy" unique="0">
--            <shortdesc lang="en">What to do when the cluster does not have quorum</shortdesc>
--            <content type="enum" default="stop"/>
--            <longdesc lang="en">What to do when the cluster does not have quorum  Allowed values: stop, freeze, ignore, suicide</longdesc>
--        </parameter>
--        """)
--        expected = {
--            "name": "no-quorum-policy",
--            "shortdesc": "What to do when the cluster does not have quorum",
--            "longdesc": "",
--            "type": "enum",
--            "default": "stop",
--            "enum": ["stop", "freeze", "ignore", "suicide"]
--        }
--        self.assertEqual(expected, utils.get_cluster_property_from_xml(el))
--
--    def test_get_cluster_property_from_xml(self):
--        el = ET.fromstring("""
--        <parameter name="default-resource-stickiness" unique="0">
--            <shortdesc lang="en"></shortdesc>
--            <content type="integer" default="0"/>
--            <longdesc lang="en"></longdesc>
--        </parameter>
--        """)
--        expected = {
--            "name": "default-resource-stickiness",
--            "shortdesc": "",
--            "longdesc": "",
--            "type": "integer",
--            "default": "0"
--        }
--        self.assertEqual(expected, utils.get_cluster_property_from_xml(el))
--
--    def test_get_cluster_property_default(self):
--        definition = {
--            "default-resource-stickiness": {
--                "name": "default-resource-stickiness",
--                "shortdesc": "",
--                "longdesc": "",
--                "type": "integer",
--                "default": "0",
--                "source": "pengine"
--            },
--            "no-quorum-policy": {
--                "name": "no-quorum-policy",
--                "shortdesc": "What to do when the cluster does not have quorum",
--                "longdesc": "What to do when the cluster does not have quorum  Allowed values: stop, freeze, ignore, suicide",
--                "type": "enum",
--                "default": "stop",
--                "enum": ["stop", "freeze", "ignore", "suicide"],
--                "source": "pengine"
--            },
--            "enable-acl": {
--                "name": "enable-acl",
--                "shortdesc": "Enable CIB ACL",
--                "longdesc": "Enable CIB ACL",
--                "type": "boolean",
--                "default": "false",
--                "source": "cib"
--            }
--        }
-+    def test_no_quorate_no_qdevice(self):
-+        parsed = utils.parse_quorumtool_output("""\
-+Quorum information
-+------------------
-+Date:             Fri Jan 16 13:03:35 2015
-+Quorum provider:  corosync_votequorum
-+Nodes:            1
-+Node ID:          1
-+Ring ID:          19868
-+Quorate:          No
-+
-+Votequorum information
-+----------------------
-+Expected votes:   3
-+Highest expected: 3
-+Total votes:      1
-+Quorum:           2 Activity blocked
-+Flags:            
-+
-+Membership information
-+----------------------
-+    Nodeid      Votes    Qdevice Name
-+             1          1         NR rh70-node1 (local)
-+""")
-+        self.assertEqual(False, parsed["quorate"])
-+        self.assertEqual(2, parsed["quorum"])
-         self.assertEqual(
--            utils.get_cluster_property_default(
--                definition, "default-resource-stickiness"
--            ),
--            "0"
-+            [
-+                {"name": "rh70-node1", "votes": 1, "local": True},
-+            ],
-+            parsed["node_list"]
-         )
-+        self.assertEqual([], parsed["qdevice_list"])
-+
-+    def test_no_quorate_with_qdevice(self):
-+        parsed = utils.parse_quorumtool_output("""\
-+Quorum information
-+------------------
-+Date:             Fri Jan 16 13:03:35 2015
-+Quorum provider:  corosync_votequorum
-+Nodes:            1
-+Node ID:          1
-+Ring ID:          19868
-+Quorate:          No
-+
-+Votequorum information
-+----------------------
-+Expected votes:   3
-+Highest expected: 3
-+Total votes:      1
-+Quorum:           2 Activity blocked
-+Flags:            Qdevice
-+
-+Membership information
-+----------------------
-+    Nodeid      Votes    Qdevice Name
-+         1          1         NR rh70-node1 (local)
-+         0          0            Qdevice (votes 1)
-+""")
-+        self.assertEqual(False, parsed["quorate"])
-+        self.assertEqual(2, parsed["quorum"])
-         self.assertEqual(
--            utils.get_cluster_property_default(definition, "no-quorum-policy"),
--            "stop"
-+            [
-+                {"name": "rh70-node1", "votes": 1, "local": True},
-+            ],
-+            parsed["node_list"]
-         )
-         self.assertEqual(
--            utils.get_cluster_property_default(definition, "enable-acl"),
--            "false"
--        )
--        self.assertRaises(
--            utils.UnknownPropertyException,
--            utils.get_cluster_property_default, definition, "non-existing"
-+            [
-+                {"name": "Qdevice", "votes": 0, "local": False},
-+            ],
-+            parsed["qdevice_list"]
-         )
- 
--    def test_is_valid_cib_value_unknown_type(self):
--        # should be always true
--        self.assertTrue(utils.is_valid_cib_value("unknown", "test"))
--        self.assertTrue(utils.is_valid_cib_value("string", "string value"))
-+    def test_error_missing_quorum(self):
-+        parsed = utils.parse_quorumtool_output("""\
-+Quorum information
-+------------------
-+Date:             Fri Jan 16 13:03:28 2015
-+Quorum provider:  corosync_votequorum
-+Nodes:            3
-+Node ID:          1
-+Ring ID:          19860
-+Quorate:          Yes
- 
--    def test_is_valid_cib_value_integer(self):
--        self.assertTrue(utils.is_valid_cib_value("integer", "0"))
--        self.assertTrue(utils.is_valid_cib_value("integer", "42"))
--        self.assertTrue(utils.is_valid_cib_value("integer", "-90"))
--        self.assertTrue(utils.is_valid_cib_value("integer", "+90"))
--        self.assertTrue(utils.is_valid_cib_value("integer", "INFINITY"))
--        self.assertTrue(utils.is_valid_cib_value("integer", "-INFINITY"))
--        self.assertTrue(utils.is_valid_cib_value("integer", "+INFINITY"))
--        self.assertFalse(utils.is_valid_cib_value("integer", "0.0"))
--        self.assertFalse(utils.is_valid_cib_value("integer", "-10.9"))
--        self.assertFalse(utils.is_valid_cib_value("integer", "string"))
-+Votequorum information
-+----------------------
-+Expected votes:   3
-+Highest expected: 3
-+Total votes:      3
-+Quorum:           
-+Flags:            Quorate
- 
--    def test_is_valid_cib_value_enum(self):
--        self.assertTrue(
--            utils.is_valid_cib_value("enum", "this", ["another", "this", "1"])
--        )
--        self.assertFalse(
--            utils.is_valid_cib_value("enum", "this", ["another", "this_not"])
--        )
--        self.assertFalse(utils.is_valid_cib_value("enum", "this", []))
--        self.assertFalse(utils.is_valid_cib_value("enum", "this"))
-+Membership information
-+----------------------
-+    Nodeid      Votes    Qdevice Name
-+         1          1         NR rh70-node1 (local)
-+         2          1         NR rh70-node2
-+         3          1         NR rh70-node3
-+""")
-+        self.assertEqual(None, parsed)
- 
--    def test_is_valid_cib_value_boolean(self):
--        self.assertTrue(utils.is_valid_cib_value("boolean", "true"))
--        self.assertTrue(utils.is_valid_cib_value("boolean", "TrUe"))
--        self.assertTrue(utils.is_valid_cib_value("boolean", "TRUE"))
--        self.assertTrue(utils.is_valid_cib_value("boolean", "yes"))
--        self.assertTrue(utils.is_valid_cib_value("boolean", "on"))
--        self.assertTrue(utils.is_valid_cib_value("boolean", "y"))
--        self.assertTrue(utils.is_valid_cib_value("boolean", "Y"))
--        self.assertTrue(utils.is_valid_cib_value("boolean", "1"))
--        self.assertTrue(utils.is_valid_cib_value("boolean", "false"))
--        self.assertTrue(utils.is_valid_cib_value("boolean", "FaLse"))
--        self.assertTrue(utils.is_valid_cib_value("boolean", "FALSE"))
--        self.assertTrue(utils.is_valid_cib_value("boolean", "off"))
--        self.assertTrue(utils.is_valid_cib_value("boolean", "no"))
--        self.assertTrue(utils.is_valid_cib_value("boolean", "N"))
--        self.assertTrue(utils.is_valid_cib_value("boolean", "n"))
--        self.assertTrue(utils.is_valid_cib_value("boolean", "0"))
--        self.assertFalse(utils.is_valid_cib_value("boolean", "-1"))
--        self.assertFalse(utils.is_valid_cib_value("boolean", "not"))
--        self.assertFalse(utils.is_valid_cib_value("boolean", "random_string"))
--        self.assertFalse(utils.is_valid_cib_value("boolean", "truth"))
-+    def test_error_quorum_garbage(self):
-+        parsed = utils.parse_quorumtool_output("""\
-+Quorum information
-+------------------
-+Date:             Fri Jan 16 13:03:28 2015
-+Quorum provider:  corosync_votequorum
-+Nodes:            3
-+Node ID:          1
-+Ring ID:          19860
-+Quorate:          Yes
- 
--    def test_is_valid_cib_value_time(self):
--        self.assertTrue(utils.is_valid_cib_value("time", "10"))
--        self.assertTrue(utils.is_valid_cib_value("time", "0"))
--        self.assertTrue(utils.is_valid_cib_value("time", "9s"))
--        self.assertTrue(utils.is_valid_cib_value("time", "10sec"))
--        self.assertTrue(utils.is_valid_cib_value("time", "10min"))
--        self.assertTrue(utils.is_valid_cib_value("time", "10m"))
--        self.assertTrue(utils.is_valid_cib_value("time", "10h"))
--        self.assertTrue(utils.is_valid_cib_value("time", "10hr"))
--        self.assertFalse(utils.is_valid_cib_value("time", "5.2"))
--        self.assertFalse(utils.is_valid_cib_value("time", "-10"))
--        self.assertFalse(utils.is_valid_cib_value("time", "10m 2s"))
--        self.assertFalse(utils.is_valid_cib_value("time", "hour"))
--        self.assertFalse(utils.is_valid_cib_value("time", "day"))
-+Votequorum information
-+----------------------
-+Expected votes:   3
-+Highest expected: 3
-+Total votes:      3
-+Quorum:           Foo
-+Flags:            Quorate
- 
--    def test_validate_cluster_property(self):
--        definition = {
--            "default-resource-stickiness": {
--                "name": "default-resource-stickiness",
--                "shortdesc": "",
--                "longdesc": "",
--                "type": "integer",
--                "default": "0",
--                "source": "pengine"
--            },
--            "no-quorum-policy": {
--                "name": "no-quorum-policy",
--                "shortdesc": "What to do when the cluster does not have quorum",
--                "longdesc": "What to do when the cluster does not have quorum  Allowed values: stop, freeze, ignore, suicide",
--                "type": "enum",
--                "default": "stop",
--                "enum": ["stop", "freeze", "ignore", "suicide"],
--                "source": "pengine"
--            },
--            "enable-acl": {
--                "name": "enable-acl",
--                "shortdesc": "Enable CIB ACL",
--                "longdesc": "Enable CIB ACL",
--                "type": "boolean",
--                "default": "false",
--                "source": "cib"
--            }
--        }
--        self.assertTrue(utils.is_valid_cluster_property(
--            definition, "default-resource-stickiness", "10"
--        ))
--        self.assertTrue(utils.is_valid_cluster_property(
--            definition, "default-resource-stickiness", "-1"
--        ))
--        self.assertTrue(utils.is_valid_cluster_property(
--            definition, "no-quorum-policy", "freeze"
--        ))
--        self.assertTrue(utils.is_valid_cluster_property(
--            definition, "no-quorum-policy", "suicide"
--        ))
--        self.assertTrue(utils.is_valid_cluster_property(
--            definition, "enable-acl", "true"
--        ))
--        self.assertTrue(utils.is_valid_cluster_property(
--            definition, "enable-acl", "false"
--        ))
--        self.assertTrue(utils.is_valid_cluster_property(
--            definition, "enable-acl", "on"
--        ))
--        self.assertTrue(utils.is_valid_cluster_property(
--            definition, "enable-acl", "OFF"
--        ))
--        self.assertFalse(utils.is_valid_cluster_property(
--            definition, "default-resource-stickiness", "test"
--        ))
--        self.assertFalse(utils.is_valid_cluster_property(
--            definition, "default-resource-stickiness", "1.2"
--        ))
--        self.assertFalse(utils.is_valid_cluster_property(
--            definition, "no-quorum-policy", "invalid"
--        ))
--        self.assertFalse(utils.is_valid_cluster_property(
--            definition, "enable-acl", "not"
--        ))
--        self.assertRaises(
--            utils.UnknownPropertyException,
--            utils.is_valid_cluster_property, definition, "unknown", "value"
--        )
-+Membership information
-+----------------------
-+    Nodeid      Votes    Qdevice Name
-+         1          1         NR rh70-node1 (local)
-+         2          1         NR rh70-node2
-+         3          1         NR rh70-node3
-+""")
-+        self.assertEqual(None, parsed)
-+
-+    def test_error_node_votes_garbage(self):
-+        parsed = utils.parse_quorumtool_output("""\
-+Quorum information
-+------------------
-+Date:             Fri Jan 16 13:03:28 2015
-+Quorum provider:  corosync_votequorum
-+Nodes:            3
-+Node ID:          1
-+Ring ID:          19860
-+Quorate:          Yes
-+
-+Votequorum information
-+----------------------
-+Expected votes:   3
-+Highest expected: 3
-+Total votes:      3
-+Quorum:           2
-+Flags:            Quorate
- 
--    def assert_element_id(self, node, node_id):
--        self.assertTrue(
--            isinstance(node, xml.dom.minidom.Element),
--            "element with id '%s' not found" % node_id
-+Membership information
-+----------------------
-+    Nodeid      Votes    Qdevice Name
-+         1          1         NR rh70-node1 (local)
-+         2        foo         NR rh70-node2
-+         3          1         NR rh70-node3
-+""")
-+        self.assertEqual(None, parsed)
-+
-+
-+class IsNodeStopCauseQuorumLossTest(unittest.TestCase):
-+    def test_not_quorate(self):
-+        quorum_info = {
-+            "quorate": False,
-+        }
-+        self.assertEqual(
-+            False,
-+            utils.is_node_stop_cause_quorum_loss(quorum_info, True)
-         )
--        self.assertEqual(node.getAttribute("id"), node_id)
- 
--class RunParallelTest(unittest.TestCase):
--    def fixture_create_worker(self, log, name, sleepSeconds=0):
--        def worker():
--            sleep(sleepSeconds)
--            log.append(name)
--        return worker
-+    def test_local_node_not_in_list(self):
-+        quorum_info = {
-+            "quorate": True,
-+            "quorum": 1,
-+            "node_list": [
-+                {"name": "rh70-node3", "votes": 1, "local": False},
-+            ],
-+            "qdevice_list": [],
-+        }
-+        self.assertEqual(
-+            False,
-+            utils.is_node_stop_cause_quorum_loss(quorum_info, True)
-+        )
- 
--    def test_run_all_workers(self):
--        log = []
--        utils.run_parallel(
--            [
--                self.fixture_create_worker(log, 'first'),
--                self.fixture_create_worker(log, 'second'),
-+    def test_local_node_alone_in_list(self):
-+        quorum_info = {
-+            "quorate": True,
-+            "quorum": 1,
-+            "node_list": [
-+                {"name": "rh70-node3", "votes": 1, "local": True},
-             ],
--            wait_seconds=.1
-+            "qdevice_list": [],
-+        }
-+        self.assertEqual(
-+            True,
-+            utils.is_node_stop_cause_quorum_loss(quorum_info, True)
-         )
- 
--        self.assertEqual(log, ['first', 'second'])
-+    def test_local_node_still_quorate(self):
-+        quorum_info = {
-+            "quorate": True,
-+            "quorum": 4,
-+            "node_list": [
-+                {"name": "rh70-node1", "votes": 3, "local": False},
-+                {"name": "rh70-node2", "votes": 2, "local": False},
-+                {"name": "rh70-node3", "votes": 1, "local": True},
-+            ],
-+            "qdevice_list": [],
-+        }
-+        self.assertEqual(
-+            False,
-+            utils.is_node_stop_cause_quorum_loss(quorum_info, True)
-+        )
- 
--    def test_wait_for_slower_workers(self):
--        log = []
--        utils.run_parallel(
--            [
--                self.fixture_create_worker(log, 'first', .03),
--                self.fixture_create_worker(log, 'second'),
-+        quorum_info = {
-+            "quorate": True,
-+            "quorum": 4,
-+            "node_list": [
-+                {"name": "rh70-node1", "votes": 3, "local": False},
-+                {"name": "rh70-node2", "votes": 2, "local": True},
-+                {"name": "rh70-node3", "votes": 1, "local": False},
-             ],
--            wait_seconds=.01
-+            "qdevice_list": [],
-+        }
-+        self.assertEqual(
-+            False,
-+            utils.is_node_stop_cause_quorum_loss(quorum_info, True)
-         )
- 
--        self.assertEqual(log, ['second', 'first'])
-+    def test_local_node_quorum_loss(self):
-+        quorum_info = {
-+            "quorate": True,
-+            "quorum": 4,
-+            "node_list": [
-+                {"name": "rh70-node1", "votes": 3, "local": True},
-+                {"name": "rh70-node2", "votes": 2, "local": False},
-+                {"name": "rh70-node3", "votes": 1, "local": False},
-+            ],
-+            "qdevice_list": [],
-+        }
-+        self.assertEqual(
-+            True,
-+            utils.is_node_stop_cause_quorum_loss(quorum_info, True)
-+        )
- 
--class PrepareNodeNamesTest(unittest.TestCase):
--    def test_return_original_when_is_in_pacemaker_nodes(self):
--        node = 'test'
-+    def test_one_node_still_quorate(self):
-+        quorum_info = {
-+            "quorate": True,
-+            "quorum": 4,
-+            "node_list": [
-+                {"name": "rh70-node1", "votes": 3, "local": True},
-+                {"name": "rh70-node2", "votes": 2, "local": False},
-+                {"name": "rh70-node3", "votes": 1, "local": False},
-+            ],
-+            "qdevice_list": [],
-+        }
-         self.assertEqual(
--            node,
--            utils.prepare_node_name(node, {1: node}, {})
-+            False,
-+            utils.is_node_stop_cause_quorum_loss(
-+                quorum_info, False, ["rh70-node3"]
-+            )
-         )
- 
--    def test_return_original_when_is_not_in_corosync_nodes(self):
--        node = 'test'
-+        quorum_info = {
-+            "quorate": True,
-+            "quorum": 4,
-+            "node_list": [
-+                {"name": "rh70-node1", "votes": 3, "local": True},
-+                {"name": "rh70-node2", "votes": 2, "local": False},
-+                {"name": "rh70-node3", "votes": 1, "local": False},
-+            ],
-+            "qdevice_list": [],
-+        }
-         self.assertEqual(
--            node,
--            utils.prepare_node_name(node, {}, {})
-+            False,
-+            utils.is_node_stop_cause_quorum_loss(
-+                quorum_info, False, ["rh70-node2"]
-+            )
-         )
- 
--    def test_return_original_when_corosync_id_not_in_pacemaker(self):
--        node = 'test'
-+    def test_one_node_quorum_loss(self):
-+        quorum_info = {
-+            "quorate": True,
-+            "quorum": 4,
-+            "node_list": [
-+                {"name": "rh70-node1", "votes": 3, "local": True},
-+                {"name": "rh70-node2", "votes": 2, "local": False},
-+                {"name": "rh70-node3", "votes": 1, "local": False},
-+            ],
-+            "qdevice_list": [],
-+        }
-         self.assertEqual(
--            node,
--            utils.prepare_node_name(node, {}, {1: node})
-+            True,
-+            utils.is_node_stop_cause_quorum_loss(
-+                quorum_info, False, ["rh70-node1"]
-+            )
-         )
- 
--    def test_return_modified_name(self):
--        node = 'test'
-+    def test_more_nodes_still_quorate(self):
-+        quorum_info = {
-+            "quorate": True,
-+            "quorum": 4,
-+            "node_list": [
-+                {"name": "rh70-node1", "votes": 4, "local": True},
-+                {"name": "rh70-node2", "votes": 1, "local": False},
-+                {"name": "rh70-node3", "votes": 1, "local": False},
-+            ],
-+            "qdevice_list": [],
-+        }
-         self.assertEqual(
--            'another (test)',
--            utils.prepare_node_name(node, {1: 'another'}, {1: node})
-+            False,
-+            utils.is_node_stop_cause_quorum_loss(
-+                quorum_info, False, ["rh70-node2", "rh70-node3"]
-+            )
-         )
- 
--    def test_return_modified_name_with_pm_null_case(self):
--        node = 'test'
-+    def test_more_nodes_quorum_loss(self):
-+        quorum_info = {
-+            "quorate": True,
-+            "quorum": 4,
-+            "node_list": [
-+                {"name": "rh70-node1", "votes": 3, "local": True},
-+                {"name": "rh70-node2", "votes": 2, "local": False},
-+                {"name": "rh70-node3", "votes": 1, "local": False},
-+            ],
-+            "qdevice_list": [],
-+        }
-         self.assertEqual(
--            '*Unknown* (test)',
--            utils.prepare_node_name(node, {1: '(null)'}, {1: node})
-+            True,
-+            utils.is_node_stop_cause_quorum_loss(
-+                quorum_info, False, ["rh70-node2", "rh70-node3"]
-+            )
-         )
- 
--class NodeActionTaskTest(unittest.TestCase):
--    def test_can_run_action(self):
--        def action(node, arg, kwarg=None):
--            return (0, ':'.join([node, arg, kwarg]))
-+    def test_qdevice_still_quorate(self):
-+        quorum_info = {
-+            "quorate": True,
-+            "quorum": 3,
-+            "node_list": [
-+                {"name": "rh70-node1", "votes": 1, "local": True},
-+                {"name": "rh70-node2", "votes": 1, "local": False},
-+                {"name": "rh70-node3", "votes": 1, "local": False},
-+            ],
-+            "qdevice_list": [
-+                {"name": "Qdevice", "votes": 1, "local": False},
-+            ],
-+        }
-+        self.assertEqual(
-+            False,
-+            utils.is_node_stop_cause_quorum_loss(
-+                quorum_info, False, ["rh70-node2"]
-+            )
-+        )
- 
--        report_list = []
--        def report(node, returncode, output):
--            report_list.append('|'.join([node, str(returncode), output]))
-+    def test_qdevice_quorum_lost(self):
-+        quorum_info = {
-+            "quorate": True,
-+            "quorum": 3,
-+            "node_list": [
-+                {"name": "rh70-node1", "votes": 1, "local": True},
-+                {"name": "rh70-node2", "votes": 1, "local": False},
-+                {"name": "rh70-node3", "votes": 1, "local": False},
-+            ],
-+            "qdevice_list": [
-+                {"name": "Qdevice", "votes": 1, "local": False},
-+            ],
-+        }
-+        self.assertEqual(
-+            True,
-+            utils.is_node_stop_cause_quorum_loss(
-+                quorum_info, False, ["rh70-node2", "rh70-node3"]
-+            )
-+        )
- 
--        task = utils.create_task(report, action, 'node', 'arg', kwarg='kwarg')
--        task()
-+    def test_qdevice_lost_still_quorate(self):
-+        quorum_info = {
-+            "quorate": True,
-+            "quorum": 4, # expect qdevice votes == 1
-+            "node_list": [
-+                {"name": "rh70-node1", "votes": 2, "local": True},
-+                {"name": "rh70-node2", "votes": 2, "local": False},
-+                {"name": "rh70-node3", "votes": 2, "local": False},
-+            ],
-+            "qdevice_list": [
-+                {"name": "Qdevice", "votes": 0, "local": False},
-+            ],
-+        }
-+        self.assertEqual(
-+            False,
-+            utils.is_node_stop_cause_quorum_loss(
-+                quorum_info, False, ["rh70-node2"]
-+            )
-+        )
- 
--        self.assertEqual(['node|0|node:arg:kwarg'], report_list)
-+    def test_qdevice_lost_quorum_lost(self):
-+        quorum_info = {
-+            "quorate": True,
-+            "quorum": 4, # expect qdevice votes == 1
-+            "node_list": [
-+                {"name": "rh70-node1", "votes": 2, "local": True},
-+                {"name": "rh70-node2", "votes": 2, "local": False},
-+                {"name": "rh70-node3", "votes": 2, "local": False},
-+            ],
-+            "qdevice_list": [
-+                {"name": "Qdevice", "votes": 0, "local": False},
-+            ],
-+        }
-+        self.assertEqual(
-+            True,
-+            utils.is_node_stop_cause_quorum_loss(
-+                quorum_info, False, ["rh70-node2", "rh70-node3"]
-+            )
-+        )
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 8ae6839..42e03e6 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -1272,14 +1272,20 @@ Commands:
- def qdevice(args=[], pout=True):
-     output = """
- Usage: pcs qdevice <command>
--Manage quorum device provider on the local host
-+Manage quorum device provider on the local host, currently only 'net' model is
-+supported.
- 
- Commands:
-+    status <device model> [--full] [<cluster name>]
-+        Show runtime status of specified model of quorum device provider.  Using
-+        --full will give more detailed output.  If <cluster name> is specified,
-+        only information about the specified cluster will be displayed.
-+
-     setup model <device model> [--enable] [--start]
-         Configure specified model of quorum device provider.  Quorum device then
--        may be added to clusters by "pcs quorum device add" command.
--        --start will also start the provider.  --enable will configure
--        the provider to start on boot.
-+        can be added to clusters by running "pcs quorum device add" command
-+        in a cluster.  --start will also start the provider.  --enable will
-+        configure the provider to start on boot.
- 
-     destroy <device model>
-         Disable and stop specified model of quorum device provider and delete
-@@ -1292,8 +1298,10 @@ Commands:
-         Stop specified model of quorum device provider.
- 
-     kill <device model>
--        Force specified model of quorum device provider to stop (performs
--        kill -9).
-+        Force specified model of quorum device provider to stop (performs kill
-+        -9).  Note that init system (e.g. systemd) can detect that the qdevice
-+        is not running and start it again.  If you want to stop the qdevice, run
-+        "pcs qdevice stop" command.
- 
-     enable <device model>
-         Configure specified model of quorum device provider to start on boot.
-@@ -1310,21 +1318,38 @@ Commands:
- def quorum(args=[], pout=True):
-     output = """
- Usage: pcs quorum <command>
--Manage cluster quorum settings
-+Manage cluster quorum settings.
- 
- Commands:
-     config
-         Show quorum configuration.
- 
--    device add [generic options] model <device model> [model options]
--        Add quorum device to cluster.  Quorum device needs to be created first
--        by "pcs qdevice setup" command.
-+    status
-+        Show quorum runtime status.
-+
-+    device add [<generic options>] model <device model> [<model options>]
-+        Add a quorum device to the cluster.  Quorum device needs to be created
-+        first by "pcs qdevice setup" command.  It is not possible to use more
-+        than one quorum device in a cluster simultaneously.  Generic options,
-+        model and model options are all documented in corosync's
-+        corosync-qdevice(8) man page.
- 
-     device remove
--        Remove quorum device from cluster.
-+        Remove a quorum device from the cluster.
-+
-+    device status [--full]
-+        Show quorum device runtime status.  Using --full will give more detailed
-+        output.
-+
-+    device update [<generic options>] [model <model options>]
-+        Add/Change quorum device options.  Generic options and model options are
-+        all documented in corosync's corosync-qdevice(8) man page.  Requires
-+        the cluster to be stopped.
- 
--    device update [generic options] [model <model options>]
--        Add/Change quorum device options.  Requires cluster to be stopped.
-+        WARNING: If you want to change "host" option of qdevice model net, use
-+        "pcs quorum device remove" and "pcs quorum device add" commands
-+        to set up configuration properly unless old and new host is the same
-+        machine.
- 
-     unblock [--force]
-         Cancel waiting for all nodes when establishing quorum.  Useful in
-@@ -1343,7 +1368,7 @@ Commands:
-             [last_man_standing_window=[<time in ms>]] [wait_for_all=[0|1]]
-         Add/Change quorum options.  At least one option must be specified.
-         Options are documented in corosync's votequorum(5) man page.  Requires
--        cluster to be stopped.
-+        the cluster to be stopped.
- """
-     if pout:
-         print(sub_usage(args, output))
-diff --git a/pcs/utils.py b/pcs/utils.py
-index f9cdb1c..171fbdd 100644
---- a/pcs/utils.py
-+++ b/pcs/utils.py
-@@ -56,7 +56,6 @@ except ImportError:
- 
- 
- from pcs import settings, usage
--from pcs.common import report_codes
- from pcs.cli.common.reports import (
-     process_library_reports,
-     LibraryReportProcessorToConsole as LibraryReportProcessorToConsole,
-@@ -64,18 +63,21 @@ from pcs.cli.common.reports import (
- from pcs.common.tools import simple_cache
- from pcs.lib import reports
- from pcs.lib.env import LibraryEnvironment
--from pcs.lib.errors import LibraryError, ReportItemSeverity
--import pcs.lib.corosync.config_parser as corosync_conf_parser
-+from pcs.lib.errors import LibraryError
- from pcs.lib.external import (
--    is_cman_cluster,
-     CommandRunner,
--    is_service_running,
--    is_service_enabled,
-+    is_cman_cluster,
-     is_systemctl,
-+    is_service_enabled,
-+    is_service_running,
-+    disable_service,
-+    DisableServiceError,
-+    enable_service,
-+    EnableServiceError,
- )
- import pcs.lib.resource_agent as lib_ra
-+import pcs.lib.corosync.config_parser as corosync_conf_parser
- from pcs.lib.corosync.config_facade import ConfigFacade as corosync_conf_facade
--from pcs.lib.nodes_task import check_corosync_offline_on_nodes
- from pcs.lib.pacemaker import has_resource_wait_support
- from pcs.lib.pacemaker_state import ClusterState
- from pcs.lib.pacemaker_values import(
-@@ -686,50 +688,18 @@ def autoset_2node_corosync(corosync_conf):
-     facade._ConfigFacade__update_two_node()
-     return facade.config
- 
--# when adding or removing a node, changing number of nodes to or from two,
--# we need to change qdevice algorith lms <-> 2nodelms, which cannot be done when
--# the cluster is running
--def check_qdevice_algorithm_and_running_cluster(corosync_conf, add=True):
-+# is it needed to handle corosync-qdevice service when managing cluster services
-+def need_to_handle_qdevice_service():
-     if is_rhel6():
--        return
--    facade = corosync_conf_facade.from_string(corosync_conf)
--    if not facade.has_quorum_device():
--        return
--    node_list = facade.get_nodes()
--    node_count_target = len(node_list) + (1 if add else -1)
--    model, model_opts, dummy_generic_opts = facade.get_quorum_device_settings()
--    if model != "net":
--        return
--    algorithm = model_opts.get("algorithm", "")
--    need_stopped = (
--        (algorithm == "lms" and node_count_target == 2)
--        or
--        (algorithm == "2nodelms" and node_count_target != 2)
--    )
--    if not need_stopped:
--        return
--
-+        return False
-     try:
--        lib_env = get_lib_env()
--        check_corosync_offline_on_nodes(
--            lib_env.node_communicator(),
--            lib_env.report_processor,
--            node_list,
--            get_modificators()["skip_offline_nodes"]
-+        cfg = corosync_conf_facade.from_string(
-+            open(settings.corosync_conf_file).read()
-         )
--    except LibraryError as e:
--        report_item_list = list(e.args)
--        for report_item in report_item_list:
--            if (
--                report_item.code == report_codes.COROSYNC_RUNNING_ON_NODE
--                and
--                report_item.severity == ReportItemSeverity.ERROR
--            ):
--                report_item_list.append(
--                    reports.qdevice_remove_or_cluster_stop_needed()
--                )
--                break
--        process_library_reports(report_item_list)
-+        return cfg.has_quorum_device()
-+    except (EnvironmentError, corosync_conf_parser.CorosyncConfParserException):
-+        # corosync.conf not present or not valid => no qdevice specified
-+        return False
- 
- def getNextNodeID(corosync_conf):
-     currentNodes = []
-@@ -2070,28 +2040,43 @@ def serviceStatus(prefix):
-         pass
- 
- def enableServices():
-+    # do NOT handle SBD in here, it is started by pacemaker not systemd or init
-     if is_rhel6():
--        run(["chkconfig", "pacemaker", "on"])
-+        service_list = ["pacemaker"]
-     else:
--        if is_systemctl():
--            run(["systemctl", "enable", "corosync.service"])
--            run(["systemctl", "enable", "pacemaker.service"])
--        else:
--            run(["chkconfig", "corosync", "on"])
--            run(["chkconfig", "pacemaker", "on"])
-+        service_list = ["corosync", "pacemaker"]
-+        if need_to_handle_qdevice_service():
-+            service_list.append("corosync-qdevice")
-+
-+    report_item_list = []
-+    for service in service_list:
-+        try:
-+            enable_service(cmd_runner(), service)
-+        except EnableServiceError as e:
-+            report_item_list.append(
-+                reports.service_enable_error(e.service, e.message)
-+            )
-+    if report_item_list:
-+        raise LibraryError(*report_item_list)
- 
- def disableServices():
--    if is_rhel6():
--        run(["chkconfig", "pacemaker", "off"])
--        run(["chkconfig", "corosync", "off"]) # Left here for users of old pcs
--                                              # which enabled corosync
--    else:
--        if is_systemctl():
--            run(["systemctl", "disable", "corosync.service"])
--            run(["systemctl", "disable", "pacemaker.service"])
--        else:
--            run(["chkconfig", "corosync", "off"])
--            run(["chkconfig", "pacemaker", "off"])
-+    # Disable corosync on RHEL6 as well - left here for users of old pcs which
-+    # enabled corosync.
-+    # do NOT handle SBD in here, it is started by pacemaker not systemd or init
-+    service_list = ["corosync", "pacemaker"]
-+    if need_to_handle_qdevice_service():
-+        service_list.append("corosync-qdevice")
-+
-+    report_item_list = []
-+    for service in service_list:
-+        try:
-+            disable_service(cmd_runner(), service)
-+        except DisableServiceError as e:
-+            report_item_list.append(
-+                reports.service_disable_error(e.service, e.message)
-+            )
-+    if report_item_list:
-+        raise LibraryError(*report_item_list)
- 
- def write_file(path, data, permissions=0o644, binary=False):
-     if os.path.exists(path):
-@@ -2248,7 +2233,7 @@ def parse_cman_quorum_info(cman_info):
-     in_node_list = False
-     local_node_id = ""
-     try:
--        for line in cman_info.split("\n"):
-+        for line in cman_info.splitlines():
-             line = line.strip()
-             if not line:
-                 continue
-@@ -2260,12 +2245,13 @@ def parse_cman_quorum_info(cman_info):
-                 parsed["node_list"].append({
-                     "name": parts[3],
-                     "votes": int(parts[2]),
--                    "local": local_node_id == parts[0]
-+                    "local": local_node_id == parts[0],
-                 })
-             else:
-                 if line == "---Votes---":
-                     in_node_list = True
-                     parsed["node_list"] = []
-+                    parsed["qdevice_list"] = []
-                     continue
-                 if not ":" in line:
-                     continue
-@@ -2290,7 +2276,7 @@ def parse_quorumtool_output(quorumtool_output):
-     parsed = {}
-     in_node_list = False
-     try:
--        for line in quorumtool_output.split("\n"):
-+        for line in quorumtool_output.splitlines():
-             line = line.strip()
-             if not line:
-                 continue
-@@ -2299,15 +2285,25 @@ def parse_quorumtool_output(quorumtool_output):
-                     # skip headers
-                     continue
-                 parts = line.split()
--                parsed["node_list"].append({
--                    "name": parts[3],
--                    "votes": int(parts[1]),
--                    "local": len(parts) > 4 and parts[4] == "(local)"
--                })
-+                if parts[0] == "0":
-+                    # this line has nodeid == 0, this is a qdevice line
-+                    parsed["qdevice_list"].append({
-+                        "name": parts[2],
-+                        "votes": int(parts[1]),
-+                        "local": False,
-+                    })
-+                else:
-+                    # this line has non-zero nodeid, this is a node line
-+                    parsed["node_list"].append({
-+                        "name": parts[3],
-+                        "votes": int(parts[1]),
-+                        "local": len(parts) > 4 and parts[4] == "(local)",
-+                    })
-             else:
-                 if line == "Membership information":
-                     in_node_list = True
-                     parsed["node_list"] = []
-+                    parsed["qdevice_list"] = []
-                     continue
-                 if not ":" in line:
-                     continue
-@@ -2340,6 +2336,8 @@ def is_node_stop_cause_quorum_loss(quorum_info, local=True, node_list=None):
-         if node_list and node_info["name"] in node_list:
-             continue
-         votes_after_stop += node_info["votes"]
-+    for qdevice_info in quorum_info.get("qdevice_list", []):
-+        votes_after_stop += qdevice_info["votes"]
-     return votes_after_stop < quorum_info["quorum"]
- 
- def dom_prepare_child_element(dom_element, tag_name, id):
-@@ -2661,6 +2659,7 @@ def get_modificators():
-         "enable": "--enable" in pcs_options,
-         "force": "--force" in pcs_options,
-         "full": "--full" in pcs_options,
-+        "name": pcs_options.get("--name", None),
-         "skip_offline_nodes": "--skip-offline" in pcs_options,
-         "start": "--start" in pcs_options,
-         "watchdog": pcs_options.get("--watchdog", []),
-diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
-index 415e02a..7c25e10 100644
---- a/pcsd/pcs.rb
-+++ b/pcsd/pcs.rb
-@@ -1965,6 +1965,23 @@ def disable_service(service)
-   return (retcode == 0)
- end
- 
-+def start_service(service)
-+  _, _, retcode = run_cmd(
-+    PCSAuth.getSuperuserAuth(), "service", service, "start"
-+  )
-+  return (retcode == 0)
-+end
-+
-+def stop_service(service)
-+  if not is_service_installed?(service)
-+    return true
-+  end
-+  _, _, retcode = run_cmd(
-+    PCSAuth.getSuperuserAuth(), "service", service, "stop"
-+  )
-+  return (retcode == 0)
-+end
-+
- def set_cluster_prop_force(auth_user, prop, val)
-   cmd = [PCS, 'property', 'set', "#{prop}=#{val}", '--force']
-   if pacemaker_running?
-diff --git a/pcsd/remote.rb b/pcsd/remote.rb
-index f002d5b..0b2dc61 100644
---- a/pcsd/remote.rb
-+++ b/pcsd/remote.rb
-@@ -4,6 +4,7 @@ require 'open4'
- require 'set'
- require 'timeout'
- require 'rexml/document'
-+require 'base64'
- 
- require 'pcs.rb'
- require 'resource.rb'
-@@ -71,7 +72,16 @@ def remote(params, request, auth_user)
-       :remove_stonith_watchdog_timeout=> method(:remove_stonith_watchdog_timeout),
-       :set_stonith_watchdog_timeout_to_zero => method(:set_stonith_watchdog_timeout_to_zero),
-       :remote_enable_sbd => method(:remote_enable_sbd),
--      :remote_disable_sbd => method(:remote_disable_sbd)
-+      :remote_disable_sbd => method(:remote_disable_sbd),
-+      :qdevice_net_get_ca_certificate => method(:qdevice_net_get_ca_certificate),
-+      :qdevice_net_sign_node_certificate => method(:qdevice_net_sign_node_certificate),
-+      :qdevice_net_client_init_certificate_storage => method(:qdevice_net_client_init_certificate_storage),
-+      :qdevice_net_client_import_certificate => method(:qdevice_net_client_import_certificate),
-+      :qdevice_net_client_destroy => method(:qdevice_net_client_destroy),
-+      :qdevice_client_enable => method(:qdevice_client_enable),
-+      :qdevice_client_disable => method(:qdevice_client_disable),
-+      :qdevice_client_start => method(:qdevice_client_start),
-+      :qdevice_client_stop => method(:qdevice_client_stop),
-   }
-   remote_cmd_with_pacemaker = {
-       :pacemaker_node_status => method(:remote_pacemaker_node_status),
-@@ -2377,3 +2387,154 @@ def remote_disable_sbd(params, request, auth_user)
- 
-   return [200, 'Sbd has been disabled.']
- end
-+
-+def qdevice_net_get_ca_certificate(params, request, auth_user)
-+  unless allowed_for_local_cluster(auth_user, Permissions::READ)
-+    return 403, 'Permission denied'
-+  end
-+  begin
-+    return [
-+      200,
-+      Base64.encode64(File.read(COROSYNC_QDEVICE_NET_SERVER_CA_FILE))
-+    ]
-+  rescue => e
-+    return [400, "Unable to read certificate: #{e}"]
-+  end
-+end
-+
-+def qdevice_net_sign_node_certificate(params, request, auth_user)
-+  unless allowed_for_local_cluster(auth_user, Permissions::READ)
-+    return 403, 'Permission denied'
-+  end
-+  stdout, stderr, retval = run_cmd_options(
-+    auth_user,
-+    {'stdin' => params[:certificate_request]},
-+    PCS, 'qdevice', 'sign-net-cert-request', '--name', params[:cluster_name]
-+  )
-+  if retval != 0
-+    return [400, stderr.join('')]
-+  end
-+  return [200, stdout.join('')]
-+end
-+
-+def qdevice_net_client_init_certificate_storage(params, request, auth_user)
-+  # Last step of adding qdevice into a cluster is distribution of corosync.conf
-+  # file with qdevice settings. This requires FULL permissions currently.
-+  # If that gets relaxed, we can require lower permissions in here as well.
-+  unless allowed_for_local_cluster(auth_user, Permissions::FULL)
-+    return 403, 'Permission denied'
-+  end
-+  stdout, stderr, retval = run_cmd_options(
-+    auth_user,
-+    {'stdin' => params[:ca_certificate]},
-+    PCS, 'qdevice', 'net-client', 'setup'
-+  )
-+  if retval != 0
-+    return [400, stderr.join('')]
-+  end
-+  return [200, stdout.join('')]
-+end
-+
-+def qdevice_net_client_import_certificate(params, request, auth_user)
-+  # Last step of adding qdevice into a cluster is distribution of corosync.conf
-+  # file with qdevice settings. This requires FULL permissions currently.
-+  # If that gets relaxed, we can require lower permissions in here as well.
-+  unless allowed_for_local_cluster(auth_user, Permissions::FULL)
-+    return 403, 'Permission denied'
-+  end
-+  stdout, stderr, retval = run_cmd_options(
-+    auth_user,
-+    {'stdin' => params[:certificate]},
-+    PCS, 'qdevice', 'net-client', 'import-certificate'
-+  )
-+  if retval != 0
-+    return [400, stderr.join('')]
-+  end
-+  return [200, stdout.join('')]
-+end
-+
-+def qdevice_net_client_destroy(param, request, auth_user)
-+  # When removing a qdevice from a cluster, an updated corosync.conf file
-+  # with removed qdevice settings is distributed. This requires FULL permissions
-+  # currently. If that gets relaxed, we can require lower permissions in here
-+  # as well.
-+  unless allowed_for_local_cluster(auth_user, Permissions::FULL)
-+    return 403, 'Permission denied'
-+  end
-+  stdout, stderr, retval = run_cmd(
-+    auth_user,
-+    PCS, 'qdevice', 'net-client', 'destroy'
-+  )
-+  if retval != 0
-+    return [400, stderr.join('')]
-+  end
-+  return [200, stdout.join('')]
-+end
-+
-+def qdevice_client_disable(param, request, auth_user)
-+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
-+    return 403, 'Permission denied'
-+  end
-+  if disable_service('corosync-qdevice')
-+    msg = 'corosync-qdevice disabled'
-+    $logger.info(msg)
-+    return [200, msg]
-+  else
-+    msg = 'Disabling corosync-qdevice failed'
-+    $logger.error(msg)
-+    return [400, msg]
-+  end
-+end
-+
-+def qdevice_client_enable(param, request, auth_user)
-+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
-+    return 403, 'Permission denied'
-+  end
-+  if not is_service_enabled?('corosync')
-+    msg = 'corosync is not enabled, skipping'
-+    $logger.info(msg)
-+    return [200, msg]
-+  elsif enable_service('corosync-qdevice')
-+    msg = 'corosync-qdevice enabled'
-+    $logger.info(msg)
-+    return [200, msg]
-+  else
-+    msg = 'Enabling corosync-qdevice failed'
-+    $logger.error(msg)
-+    return [400, msg]
-+  end
-+end
-+
-+def qdevice_client_stop(param, request, auth_user)
-+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
-+    return 403, 'Permission denied'
-+  end
-+  if stop_service('corosync-qdevice')
-+    msg = 'corosync-qdevice stopped'
-+    $logger.info(msg)
-+    return [200, msg]
-+  else
-+    msg = 'Stopping corosync-qdevice failed'
-+    $logger.error(msg)
-+    return [400, msg]
-+  end
-+end
-+
-+def qdevice_client_start(param, request, auth_user)
-+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
-+    return 403, 'Permission denied'
-+  end
-+  if not is_service_running?('corosync')
-+    msg = 'corosync is not running, skipping'
-+    $logger.info(msg)
-+    return [200, msg]
-+  elsif start_service('corosync-qdevice')
-+    msg = 'corosync-qdevice started'
-+    $logger.info(msg)
-+    return [200, msg]
-+  else
-+    msg = 'Starting corosync-qdevice failed'
-+    $logger.error(msg)
-+    return [400, msg]
-+  end
-+end
-diff --git a/pcsd/settings.rb b/pcsd/settings.rb
-index 6229161..51f00ac 100644
---- a/pcsd/settings.rb
-+++ b/pcsd/settings.rb
-@@ -21,6 +21,12 @@ CIBADMIN = "/usr/sbin/cibadmin"
- SBD_CONFIG = '/etc/sysconfig/sbd'
- CIB_PATH='/var/lib/pacemaker/cib/cib.xml'
- 
-+COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR = "/etc/corosync/qnetd/nssdb"
-+COROSYNC_QDEVICE_NET_SERVER_CA_FILE = (
-+  COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR + "/qnetd-cacert.crt"
-+)
-+COROSYNC_QDEVICE_NET_CLIENT_CERTS_DIR = "/etc/corosync/qdevice/net/nssdb"
-+
- SUPERUSER = 'hacluster'
- ADMIN_GROUP = 'haclient'
- $user_pass_file = "pcs_users.conf"
-diff --git a/pcsd/settings.rb.debian b/pcsd/settings.rb.debian
-index 7bc92a9..aae1b11 100644
---- a/pcsd/settings.rb.debian
-+++ b/pcsd/settings.rb.debian
-@@ -18,8 +18,14 @@ COROSYNC_BINARIES = "/usr/sbin/"
- CMAN_TOOL = "/usr/sbin/cman_tool"
- PACEMAKERD = "/usr/sbin/pacemakerd"
- CIBADMIN = "/usr/sbin/cibadmin"
--SBD_CONFIG = '/etc/sysconfig/sbd'
--CIB_PATH='/var/lib/pacemaker/cib/cib.xml'
-+SBD_CONFIG = "/etc/sysconfig/sbd"
-+CIB_PATH = "/var/lib/pacemaker/cib/cib.xml"
-+
-+COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR = "/etc/corosync/qnetd/nssdb"
-+COROSYNC_QDEVICE_NET_SERVER_CA_FILE = (
-+  COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR + "/qnetd-cacert.crt"
-+)
-+COROSYNC_QDEVICE_NET_CLIENT_CERTS_DIR = "/etc/corosync/qdevice/net/nssdb"
- 
- SUPERUSER = 'hacluster'
- ADMIN_GROUP = 'haclient'
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1158805-01-cli-improve-quorum-device-commands-syntax.patch b/SOURCES/bz1158805-01-cli-improve-quorum-device-commands-syntax.patch
deleted file mode 100644
index 9ad60bf..0000000
--- a/SOURCES/bz1158805-01-cli-improve-quorum-device-commands-syntax.patch
+++ /dev/null
@@ -1,298 +0,0 @@
-From 32d9dde2936b9f8b690ce3dd6c9bdc685f3ac5f0 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Mon, 11 Jul 2016 15:19:30 +0200
-Subject: [PATCH] cli: improve quorum device commands syntax
-
-* add alias "pcs status quorum" to "pcs quorum status"
-* add alias "pcs status qdevice" to "pcs qdevice status"
-* add alias "pcs quorum" to "pcs quorum config"
----
- pcs/cluster.py | 59 +++------------------------------------------------
- pcs/pcs.8      |  8 ++++++-
- pcs/quorum.py  | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++++-----
- pcs/status.py  | 25 ++++++++++++++++++++++
- pcs/usage.py   | 10 ++++++++-
- 5 files changed, 106 insertions(+), 63 deletions(-)
-
-diff --git a/pcs/cluster.py b/pcs/cluster.py
-index 9d4798c..4155103 100644
---- a/pcs/cluster.py
-+++ b/pcs/cluster.py
-@@ -26,7 +26,7 @@ from pcs import (
-     constraint,
-     node,
-     pcsd,
--    prop,
-+    quorum,
-     resource,
-     settings,
-     status,
-@@ -143,9 +143,9 @@ def cluster_cmd(argv):
-         cluster_report(argv)
-     elif (sub_cmd == "quorum"):
-         if argv and argv[0] == "unblock":
--            cluster_quorum_unblock(argv[1:])
-+            quorum.quorum_unblock_cmd(argv[1:])
-         else:
--            usage.cluster(["quorum"])
-+            usage.cluster()
-             sys.exit(1)
-     else:
-         usage.cluster()
-@@ -1890,56 +1890,3 @@ def cluster_remote_node(argv):
-         usage.cluster(["remote-node"])
-         sys.exit(1)
- 
--def cluster_quorum_unblock(argv):
--    if len(argv) > 0:
--        usage.quorum(["unblock"])
--        sys.exit(1)
--
--    if utils.is_rhel6():
--        utils.err("operation is not supported on CMAN clusters")
--
--    output, retval = utils.run(
--        ["corosync-cmapctl", "-g", "runtime.votequorum.wait_for_all_status"]
--    )
--    if retval != 0:
--        utils.err("unable to check quorum status")
--    if output.split("=")[-1].strip() != "1":
--        utils.err("cluster is not waiting for nodes to establish quorum")
--
--    unjoined_nodes = (
--        set(utils.getNodesFromCorosyncConf())
--        -
--        set(utils.getCorosyncActiveNodes())
--    )
--    if not unjoined_nodes:
--        utils.err("no unjoined nodes found")
--    if "--force" not in utils.pcs_options:
--        answer = utils.get_terminal_input(
--            (
--                "WARNING: If node(s) {nodes} are not powered off or they do"
--                + " have access to shared resources, data corruption and/or"
--                + " cluster failure may occur. Are you sure you want to"
--                + " continue? [y/N] "
--            ).format(nodes=", ".join(unjoined_nodes))
--        )
--        if answer.lower() not in ["y", "yes"]:
--            print("Canceled")
--            return
--    for node in unjoined_nodes:
--        stonith.stonith_confirm([node], skip_question=True)
--
--    output, retval = utils.run(
--        ["corosync-cmapctl", "-s", "quorum.cancel_wait_for_all", "u8", "1"]
--    )
--    if retval != 0:
--        utils.err("unable to cancel waiting for nodes")
--    print("Quorum unblocked")
--
--    startup_fencing = prop.get_set_properties().get("startup-fencing", "")
--    utils.set_cib_property(
--        "startup-fencing",
--        "false" if startup_fencing.lower() != "false" else "true"
--    )
--    utils.set_cib_property("startup-fencing", startup_fencing)
--    print("Waiting for nodes canceled")
--
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 223ef1b..a26c94b 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -543,7 +543,7 @@ disable <device model>
- Configure specified model of quorum device provider to not start on boot.
- .SS "quorum"
- .TP
--config
-+[config]
- Show quorum configuration.
- .TP
- status
-@@ -590,6 +590,12 @@ View current cluster status.
- corosync
- View current membership information as seen by corosync.
- .TP
-+quorum
-+View current quorum status.
-+.TP
-+qdevice <device model> [\fB\-\-full\fR] [<cluster name>]
-+Show runtime status of specified model of quorum device provider.  Using \fB\-\-full\fR will give more detailed output.  If <cluster name> is specified, only information about the specified cluster will be displayed.
-+.TP
- nodes [corosync|both|config]
- View current status of nodes from pacemaker. If 'corosync' is specified, print nodes currently configured in corosync, if 'both' is specified, print nodes from both corosync & pacemaker.  If 'config' is specified, print nodes from corosync & pacemaker configuration.
- .TP
-diff --git a/pcs/quorum.py b/pcs/quorum.py
-index 2d54ed7..a849282 100644
---- a/pcs/quorum.py
-+++ b/pcs/quorum.py
-@@ -8,10 +8,11 @@ from __future__ import (
- import sys
- 
- from pcs import (
-+    prop,
-+    stonith,
-     usage,
-     utils,
- )
--from pcs.cluster import cluster_quorum_unblock
- from pcs.cli.common import parse_args
- from pcs.cli.common.console_report import indent
- from pcs.cli.common.errors import CmdLineInputError
-@@ -19,10 +20,10 @@ from pcs.lib.errors import LibraryError
- 
- def quorum_cmd(lib, argv, modificators):
-     if len(argv) < 1:
--        usage.quorum()
--        sys.exit(1)
-+        sub_cmd, argv_next = "config", []
-+    else:
-+        sub_cmd, argv_next = argv[0], argv[1:]
- 
--    sub_cmd, argv_next = argv[0], argv[1:]
-     try:
-         if sub_cmd == "help":
-             usage.quorum(argv)
-@@ -35,7 +36,8 @@ def quorum_cmd(lib, argv, modificators):
-         elif sub_cmd == "device":
-             quorum_device_cmd(lib, argv_next, modificators)
-         elif sub_cmd == "unblock":
--            cluster_quorum_unblock(argv_next)
-+            # TODO switch to new architecture
-+            quorum_unblock_cmd(argv_next)
-         elif sub_cmd == "update":
-             quorum_update_cmd(lib, argv_next, modificators)
-         else:
-@@ -185,3 +187,58 @@ def quorum_device_update_cmd(lib, argv, modificators):
-         force_options=modificators["force"],
-         skip_offline_nodes=modificators["skip_offline_nodes"]
-     )
-+
-+# TODO switch to new architecture, move to lib
-+def quorum_unblock_cmd(argv):
-+    if len(argv) > 0:
-+        usage.quorum(["unblock"])
-+        sys.exit(1)
-+
-+    if utils.is_rhel6():
-+        utils.err("operation is not supported on CMAN clusters")
-+
-+    output, retval = utils.run(
-+        ["corosync-cmapctl", "-g", "runtime.votequorum.wait_for_all_status"]
-+    )
-+    if retval != 0:
-+        utils.err("unable to check quorum status")
-+    if output.split("=")[-1].strip() != "1":
-+        utils.err("cluster is not waiting for nodes to establish quorum")
-+
-+    unjoined_nodes = (
-+        set(utils.getNodesFromCorosyncConf())
-+        -
-+        set(utils.getCorosyncActiveNodes())
-+    )
-+    if not unjoined_nodes:
-+        utils.err("no unjoined nodes found")
-+    if "--force" not in utils.pcs_options:
-+        answer = utils.get_terminal_input(
-+            (
-+                "WARNING: If node(s) {nodes} are not powered off or they do"
-+                + " have access to shared resources, data corruption and/or"
-+                + " cluster failure may occur. Are you sure you want to"
-+                + " continue? [y/N] "
-+            ).format(nodes=", ".join(unjoined_nodes))
-+        )
-+        if answer.lower() not in ["y", "yes"]:
-+            print("Canceled")
-+            return
-+    for node in unjoined_nodes:
-+        stonith.stonith_confirm([node], skip_question=True)
-+
-+    output, retval = utils.run(
-+        ["corosync-cmapctl", "-s", "quorum.cancel_wait_for_all", "u8", "1"]
-+    )
-+    if retval != 0:
-+        utils.err("unable to cancel waiting for nodes")
-+    print("Quorum unblocked")
-+
-+    startup_fencing = prop.get_set_properties().get("startup-fencing", "")
-+    utils.set_cib_property(
-+        "startup-fencing",
-+        "false" if startup_fencing.lower() != "false" else "true"
-+    )
-+    utils.set_cib_property("startup-fencing", startup_fencing)
-+    print("Waiting for nodes canceled")
-+
-diff --git a/pcs/status.py b/pcs/status.py
-index e1f367f..bdfcc85 100644
---- a/pcs/status.py
-+++ b/pcs/status.py
-@@ -13,6 +13,9 @@ from pcs import (
-     usage,
-     utils,
- )
-+from pcs.qdevice import qdevice_status_cmd
-+from pcs.quorum import quorum_status_cmd
-+from pcs.cli.common.errors import CmdLineInputError
- from pcs.lib.errors import LibraryError
- from pcs.lib.pacemaker_state import ClusterState
- 
-@@ -38,6 +41,28 @@ def status_cmd(argv):
-         xml_status()
-     elif (sub_cmd == "corosync"):
-         corosync_status()
-+    elif sub_cmd == "qdevice":
-+        try:
-+            qdevice_status_cmd(
-+                utils.get_library_wrapper(),
-+                argv,
-+                utils.get_modificators()
-+            )
-+        except LibraryError as e:
-+            utils.process_library_reports(e.args)
-+        except CmdLineInputError as e:
-+            utils.exit_on_cmdline_input_errror(e, "status", sub_cmd)
-+    elif sub_cmd == "quorum":
-+        try:
-+            quorum_status_cmd(
-+                utils.get_library_wrapper(),
-+                argv,
-+                utils.get_modificators()
-+            )
-+        except LibraryError as e:
-+            utils.process_library_reports(e.args)
-+        except CmdLineInputError as e:
-+            utils.exit_on_cmdline_input_errror(e, "status", sub_cmd)
-     else:
-         usage.status()
-         sys.exit(1)
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 77b496e..0605cd7 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -1118,6 +1118,14 @@ Commands:
-     corosync
-         View current membership information as seen by corosync.
- 
-+    quorum
-+        View current quorum status.
-+
-+    qdevice <device model> [--full] [<cluster name>]
-+        Show runtime status of specified model of quorum device provider.  Using
-+        --full will give more detailed output.  If <cluster name> is specified,
-+        only information about the specified cluster will be displayed.
-+
-     nodes [corosync|both|config]
-         View current status of nodes from pacemaker. If 'corosync' is
-         specified, print nodes currently configured in corosync, if 'both'
-@@ -1322,7 +1330,7 @@ Usage: pcs quorum <command>
- Manage cluster quorum settings.
- 
- Commands:
--    config
-+    [config]
-         Show quorum configuration.
- 
-     status
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1158805-02-add-support-for-qdeviceqnetd-provided-by-corosync.patch b/SOURCES/bz1158805-02-add-support-for-qdeviceqnetd-provided-by-corosync.patch
deleted file mode 100644
index 1642614..0000000
--- a/SOURCES/bz1158805-02-add-support-for-qdeviceqnetd-provided-by-corosync.patch
+++ /dev/null
@@ -1,4078 +0,0 @@
-From bc599f0f30c039a72540002d9a41a93c15626837 Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Wed, 14 Sep 2016 09:04:57 +0200
-Subject: [PATCH] squash bz1158805 Add support for qdevice/qnetd pro
-
-9c7f37ef37bb lib: do not merge external processes' stdout and stderr
-
-db3ada5e27aa warn on stopping/destroying currently used qdevice
-
-18df73397b54 handle SBD when removing qdevice from a cluster
-
-766f86954b46 Allow to re-run "cluster node add" if failed due to qdevice
----
- pcs/cluster.py                               |  44 ++--
- pcs/common/report_codes.py                   |   6 +-
- pcs/common/tools.py                          |   3 +
- pcs/lib/booth/status.py                      |  27 +-
- pcs/lib/booth/test/test_status.py            |  26 +-
- pcs/lib/cib/tools.py                         |   7 +-
- pcs/lib/commands/booth.py                    |   5 +-
- pcs/lib/commands/qdevice.py                  |  35 ++-
- pcs/lib/commands/quorum.py                   |  12 +-
- pcs/lib/commands/test/test_booth.py          |   4 +-
- pcs/lib/corosync/live.py                     |  26 +-
- pcs/lib/corosync/qdevice_client.py           |   9 +-
- pcs/lib/corosync/qdevice_net.py              |  77 ++++--
- pcs/lib/external.py                          | 105 +++++---
- pcs/lib/pacemaker.py                         |  71 ++++--
- pcs/lib/reports.py                           |  97 ++++----
- pcs/lib/resource_agent.py                    |  31 ++-
- pcs/lib/sbd.py                               |   4 +-
- pcs/qdevice.py                               |   4 +-
- pcs/test/resources/corosync-qdevice.conf     |  34 +++
- pcs/test/test_common_tools.py                |  32 +++
- pcs/test/test_lib_cib_tools.py               |  10 +-
- pcs/test/test_lib_commands_qdevice.py        | 155 +++++++++++-
- pcs/test/test_lib_commands_quorum.py         | 105 +++++++-
- pcs/test/test_lib_corosync_live.py           |  30 ++-
- pcs/test/test_lib_corosync_qdevice_client.py |   8 +-
- pcs/test/test_lib_corosync_qdevice_net.py    | 110 +++++---
- pcs/test/test_lib_external.py                | 167 +++++++------
- pcs/test/test_lib_pacemaker.py               | 359 ++++++++++++++++++---------
- pcs/test/test_lib_resource_agent.py          |  39 ++-
- pcs/test/test_lib_sbd.py                     |  12 +-
- 31 files changed, 1166 insertions(+), 488 deletions(-)
- create mode 100644 pcs/test/resources/corosync-qdevice.conf
-
-diff --git a/pcs/cluster.py b/pcs/cluster.py
-index 577e08e..e5ad1ec 100644
---- a/pcs/cluster.py
-+++ b/pcs/cluster.py
-@@ -1414,7 +1414,6 @@ def cluster_node(argv):
-                 "cluster is not configured for RRP, "
-                 "you must not specify ring 1 address for the node"
-             )
--        corosync_conf = None
-         (canAdd, error) =  utils.canAddNodeToCluster(node0)
-         if not canAdd:
-             utils.err("Unable to add '%s' to cluster: %s" % (node0, error))
-@@ -1422,7 +1421,29 @@ def cluster_node(argv):
-         report_processor = lib_env.report_processor
-         node_communicator = lib_env.node_communicator()
-         node_addr = NodeAddresses(node0, node1)
-+
-+        # First set up everything else than corosync. Once the new node is
-+        # present in corosync.conf / cluster.conf, it's considered part of a
-+        # cluster and the node add command cannot be run again. So we need to
-+        # minimize the amout of actions (and therefore possible failures) after
-+        # adding the node to corosync.
-         try:
-+            # qdevice setup
-+            if not utils.is_rhel6():
-+                conf_facade = corosync_conf_facade.from_string(
-+                    utils.getCorosyncConf()
-+                )
-+                qdevice_model, qdevice_model_options, _ = conf_facade.get_quorum_device_settings()
-+                if qdevice_model == "net":
-+                    _add_device_model_net(
-+                        lib_env,
-+                        qdevice_model_options["host"],
-+                        conf_facade.get_cluster_name(),
-+                        [node_addr],
-+                        skip_offline_nodes=False
-+                    )
-+
-+            # sbd setup
-             if lib_sbd.is_sbd_enabled(utils.cmd_runner()):
-                 if "--watchdog" not in utils.pcs_options:
-                     watchdog = settings.sbd_watchdog_default
-@@ -1463,6 +1484,7 @@ def cluster_node(argv):
-                     report_processor, node_communicator, node_addr
-                 )
- 
-+            # booth setup
-             booth_sync.send_all_config_to_node(
-                 node_communicator,
-                 report_processor,
-@@ -1477,6 +1499,8 @@ def cluster_node(argv):
-                 [node_communicator_exception_to_report_item(e)]
-             )
- 
-+        # Now add the new node to corosync.conf / cluster.conf
-+        corosync_conf = None
-         for my_node in utils.getNodesFromCorosyncConf():
-             retval, output = utils.addLocalNode(my_node, node0, node1)
-             if retval != 0:
-@@ -1512,24 +1536,6 @@ def cluster_node(argv):
-                 except:
-                     utils.err('Unable to communicate with pcsd')
- 
--            # set qdevice-net certificates if needed
--            if not utils.is_rhel6():
--                try:
--                    conf_facade = corosync_conf_facade.from_string(
--                        corosync_conf
--                    )
--                    qdevice_model, qdevice_model_options, _ = conf_facade.get_quorum_device_settings()
--                    if qdevice_model == "net":
--                        _add_device_model_net(
--                            lib_env,
--                            qdevice_model_options["host"],
--                            conf_facade.get_cluster_name(),
--                            [node_addr],
--                            skip_offline_nodes=False
--                        )
--                except LibraryError as e:
--                    process_library_reports(e.args)
--
-             print("Setting up corosync...")
-             utils.setCorosyncConfig(node0, corosync_conf)
-             if "--enable" in utils.pcs_options:
-diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
-index e6a86ec..23e931f 100644
---- a/pcs/common/report_codes.py
-+++ b/pcs/common/report_codes.py
-@@ -8,17 +8,18 @@ from __future__ import (
- # force cathegories
- FORCE_ACTIVE_RRP = "ACTIVE_RRP"
- FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE = "FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE"
--FORCE_BOOTH_REMOVE_FROM_CIB = "FORCE_BOOTH_REMOVE_FROM_CIB"
- FORCE_BOOTH_DESTROY = "FORCE_BOOTH_DESTROY"
-+FORCE_BOOTH_REMOVE_FROM_CIB = "FORCE_BOOTH_REMOVE_FROM_CIB"
- FORCE_CONSTRAINT_DUPLICATE = "CONSTRAINT_DUPLICATE"
- FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE = "CONSTRAINT_MULTIINSTANCE_RESOURCE"
- FORCE_FILE_OVERWRITE = "FORCE_FILE_OVERWRITE"
- FORCE_LOAD_THRESHOLD = "LOAD_THRESHOLD"
-+FORCE_METADATA_ISSUE = "METADATA_ISSUE"
- FORCE_OPTIONS = "OPTIONS"
- FORCE_QDEVICE_MODEL = "QDEVICE_MODEL"
-+FORCE_QDEVICE_USED = "QDEVICE_USED"
- FORCE_UNKNOWN_AGENT = "UNKNOWN_AGENT"
- FORCE_UNSUPPORTED_AGENT = "UNSUPPORTED_AGENT"
--FORCE_METADATA_ISSUE = "METADATA_ISSUE"
- SKIP_OFFLINE_NODES = "SKIP_OFFLINE_NODES"
- SKIP_UNREADABLE_CONFIG = "SKIP_UNREADABLE_CONFIG"
- 
-@@ -135,6 +136,7 @@ QDEVICE_NOT_DEFINED = "QDEVICE_NOT_DEFINED"
- QDEVICE_NOT_INITIALIZED = "QDEVICE_NOT_INITIALIZED"
- QDEVICE_CLIENT_RELOAD_STARTED = "QDEVICE_CLIENT_RELOAD_STARTED"
- QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED = "QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED"
-+QDEVICE_USED_BY_CLUSTERS = "QDEVICE_USED_BY_CLUSTERS"
- REQUIRED_OPTION_IS_MISSING = "REQUIRED_OPTION_IS_MISSING"
- RESOURCE_CLEANUP_ERROR = "RESOURCE_CLEANUP_ERROR"
- RESOURCE_CLEANUP_TOO_TIME_CONSUMING = 'RESOURCE_CLEANUP_TOO_TIME_CONSUMING'
-diff --git a/pcs/common/tools.py b/pcs/common/tools.py
-index 275f6b9..01194a5 100644
---- a/pcs/common/tools.py
-+++ b/pcs/common/tools.py
-@@ -38,3 +38,6 @@ def format_environment_error(e):
-     if e.filename:
-         return "{0}: '{1}'".format(e.strerror, e.filename)
-     return e.strerror
-+
-+def join_multilines(strings):
-+    return "\n".join([a.strip() for a in strings if a.strip()])
-diff --git a/pcs/lib/booth/status.py b/pcs/lib/booth/status.py
-index 4b93161..87cdc05 100644
---- a/pcs/lib/booth/status.py
-+++ b/pcs/lib/booth/status.py
-@@ -6,6 +6,7 @@ from __future__ import (
- )
- 
- from pcs import settings
-+from pcs.common.tools import join_multilines
- from pcs.lib.booth import reports
- from pcs.lib.errors import LibraryError
- 
-@@ -14,28 +15,36 @@ def get_daemon_status(runner, name=None):
-     cmd = [settings.booth_binary, "status"]
-     if name:
-         cmd += ["-c", name]
--    output, return_value = runner.run(cmd)
-+    stdout, stderr, return_value = runner.run(cmd)
-     # 7 means that there is no booth instance running
-     if return_value not in [0, 7]:
--        raise LibraryError(reports.booth_daemon_status_error(output))
--    return output
-+        raise LibraryError(
-+            reports.booth_daemon_status_error(join_multilines([stderr, stdout]))
-+        )
-+    return stdout
- 
- 
- def get_tickets_status(runner, name=None):
-     cmd = [settings.booth_binary, "list"]
-     if name:
-         cmd += ["-c", name]
--    output, return_value = runner.run(cmd)
-+    stdout, stderr, return_value = runner.run(cmd)
-     if return_value != 0:
--        raise LibraryError(reports.booth_tickets_status_error(output))
--    return output
-+        raise LibraryError(
-+            reports.booth_tickets_status_error(
-+                join_multilines([stderr, stdout])
-+            )
-+        )
-+    return stdout
- 
- 
- def get_peers_status(runner, name=None):
-     cmd = [settings.booth_binary, "peers"]
-     if name:
-         cmd += ["-c", name]
--    output, return_value = runner.run(cmd)
-+    stdout, stderr, return_value = runner.run(cmd)
-     if return_value != 0:
--        raise LibraryError(reports.booth_peers_status_error(output))
--    return output
-+        raise LibraryError(
-+            reports.booth_peers_status_error(join_multilines([stderr, stdout]))
-+        )
-+    return stdout
-diff --git a/pcs/lib/booth/test/test_status.py b/pcs/lib/booth/test/test_status.py
-index d47ffca..dfb7354 100644
---- a/pcs/lib/booth/test/test_status.py
-+++ b/pcs/lib/booth/test/test_status.py
-@@ -30,34 +30,34 @@ class GetDaemonStatusTest(TestCase):
-         self.mock_run = mock.MagicMock(spec_set=CommandRunner)
- 
-     def test_no_name(self):
--        self.mock_run.run.return_value = ("output", 0)
-+        self.mock_run.run.return_value = ("output", "", 0)
-         self.assertEqual("output", lib.get_daemon_status(self.mock_run))
-         self.mock_run.run.assert_called_once_with(
-             [settings.booth_binary, "status"]
-         )
- 
-     def test_with_name(self):
--        self.mock_run.run.return_value = ("output", 0)
-+        self.mock_run.run.return_value = ("output", "", 0)
-         self.assertEqual("output", lib.get_daemon_status(self.mock_run, "name"))
-         self.mock_run.run.assert_called_once_with(
-             [settings.booth_binary, "status", "-c", "name"]
-         )
- 
-     def test_daemon_not_running(self):
--        self.mock_run.run.return_value = ("", 7)
-+        self.mock_run.run.return_value = ("", "error", 7)
-         self.assertEqual("", lib.get_daemon_status(self.mock_run))
-         self.mock_run.run.assert_called_once_with(
-             [settings.booth_binary, "status"]
-         )
- 
-     def test_failure(self):
--        self.mock_run.run.return_value = ("out", 1)
-+        self.mock_run.run.return_value = ("out", "error", 1)
-         assert_raise_library_error(
-             lambda: lib.get_daemon_status(self.mock_run),
-             (
-                 Severities.ERROR,
-                 report_codes.BOOTH_DAEMON_STATUS_ERROR,
--                {"reason": "out"}
-+                {"reason": "error\nout"}
-             )
-         )
-         self.mock_run.run.assert_called_once_with(
-@@ -70,14 +70,14 @@ class GetTicketsStatusTest(TestCase):
-         self.mock_run = mock.MagicMock(spec_set=CommandRunner)
- 
-     def test_no_name(self):
--        self.mock_run.run.return_value = ("output", 0)
-+        self.mock_run.run.return_value = ("output", "", 0)
-         self.assertEqual("output", lib.get_tickets_status(self.mock_run))
-         self.mock_run.run.assert_called_once_with(
-             [settings.booth_binary, "list"]
-         )
- 
-     def test_with_name(self):
--        self.mock_run.run.return_value = ("output", 0)
-+        self.mock_run.run.return_value = ("output", "", 0)
-         self.assertEqual(
-             "output", lib.get_tickets_status(self.mock_run, "name")
-         )
-@@ -86,14 +86,14 @@ class GetTicketsStatusTest(TestCase):
-         )
- 
-     def test_failure(self):
--        self.mock_run.run.return_value = ("out", 1)
-+        self.mock_run.run.return_value = ("out", "error", 1)
-         assert_raise_library_error(
-             lambda: lib.get_tickets_status(self.mock_run),
-             (
-                 Severities.ERROR,
-                 report_codes.BOOTH_TICKET_STATUS_ERROR,
-                 {
--                    "reason": "out"
-+                    "reason": "error\nout"
-                 }
-             )
-         )
-@@ -107,28 +107,28 @@ class GetPeersStatusTest(TestCase):
-         self.mock_run = mock.MagicMock(spec_set=CommandRunner)
- 
-     def test_no_name(self):
--        self.mock_run.run.return_value = ("output", 0)
-+        self.mock_run.run.return_value = ("output", "", 0)
-         self.assertEqual("output", lib.get_peers_status(self.mock_run))
-         self.mock_run.run.assert_called_once_with(
-             [settings.booth_binary, "peers"]
-         )
- 
-     def test_with_name(self):
--        self.mock_run.run.return_value = ("output", 0)
-+        self.mock_run.run.return_value = ("output", "", 0)
-         self.assertEqual("output", lib.get_peers_status(self.mock_run, "name"))
-         self.mock_run.run.assert_called_once_with(
-             [settings.booth_binary, "peers", "-c", "name"]
-         )
- 
-     def test_failure(self):
--        self.mock_run.run.return_value = ("out", 1)
-+        self.mock_run.run.return_value = ("out", "error", 1)
-         assert_raise_library_error(
-             lambda: lib.get_peers_status(self.mock_run),
-             (
-                 Severities.ERROR,
-                 report_codes.BOOTH_PEERS_STATUS_ERROR,
-                 {
--                    "reason": "out"
-+                    "reason": "error\nout"
-                 }
-             )
-         )
-diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
-index 8141360..6285931 100644
---- a/pcs/lib/cib/tools.py
-+++ b/pcs/lib/cib/tools.py
-@@ -11,6 +11,7 @@ import tempfile
- from lxml import etree
- 
- from pcs import settings
-+from pcs.common.tools import join_multilines
- from pcs.lib import reports
- from pcs.lib.errors import LibraryError
- from pcs.lib.pacemaker_values import validate_id
-@@ -181,7 +182,7 @@ def upgrade_cib(cib, runner):
-         temp_file = tempfile.NamedTemporaryFile("w+", suffix=".pcs")
-         temp_file.write(etree.tostring(cib).decode())
-         temp_file.flush()
--        output, retval = runner.run(
-+        stdout, stderr, retval = runner.run(
-             [
-                 os.path.join(settings.pacemaker_binaries, "cibadmin"),
-                 "--upgrade",
-@@ -192,7 +193,9 @@ def upgrade_cib(cib, runner):
- 
-         if retval != 0:
-             temp_file.close()
--            raise LibraryError(reports.cib_upgrade_failed(output))
-+            raise LibraryError(
-+                reports.cib_upgrade_failed(join_multilines([stderr, stdout]))
-+            )
- 
-         temp_file.seek(0)
-         return etree.fromstring(temp_file.read())
-diff --git a/pcs/lib/commands/booth.py b/pcs/lib/commands/booth.py
-index 7a3d348..bea966c 100644
---- a/pcs/lib/commands/booth.py
-+++ b/pcs/lib/commands/booth.py
-@@ -10,6 +10,7 @@ import os.path
- from functools import partial
- 
- from pcs import settings
-+from pcs.common.tools import join_multilines
- from pcs.lib import external, reports
- from pcs.lib.booth import (
-     config_exchange,
-@@ -185,7 +186,7 @@ def ticket_operation(operation, env, name, ticket, site_ip):
-             )
-         site_ip = site_ip_list[0]
- 
--    command_output, return_code = env.cmd_runner().run([
-+    stdout, stderr, return_code = env.cmd_runner().run([
-         settings.booth_binary, operation,
-         "-s", site_ip,
-         ticket
-@@ -195,7 +196,7 @@ def ticket_operation(operation, env, name, ticket, site_ip):
-         raise LibraryError(
-             booth_reports.booth_ticket_operation_failed(
-                 operation,
--                command_output,
-+                join_multilines([stderr, stdout]),
-                 site_ip,
-                 ticket
-             )
-diff --git a/pcs/lib/commands/qdevice.py b/pcs/lib/commands/qdevice.py
-index 1d1d85f..ca0ae86 100644
---- a/pcs/lib/commands/qdevice.py
-+++ b/pcs/lib/commands/qdevice.py
-@@ -8,9 +8,10 @@ from __future__ import (
- import base64
- import binascii
- 
-+from pcs.common import report_codes
- from pcs.lib import external, reports
- from pcs.lib.corosync import qdevice_net
--from pcs.lib.errors import LibraryError
-+from pcs.lib.errors import LibraryError, ReportItemSeverity
- 
- 
- def qdevice_setup(lib_env, model, enable, start):
-@@ -31,13 +32,20 @@ def qdevice_setup(lib_env, model, enable, start):
-     if start:
-         _service_start(lib_env, qdevice_net.qdevice_start)
- 
--def qdevice_destroy(lib_env, model):
-+def qdevice_destroy(lib_env, model, proceed_if_used=False):
-     """
-     Stop and disable qdevice on local host and remove its configuration
-     string model qdevice model to destroy
-+    bool procced_if_used destroy qdevice even if it is used by clusters
-     """
-     _ensure_not_cman(lib_env)
-     _check_model(model)
-+    _check_qdevice_not_used(
-+        lib_env.report_processor,
-+        lib_env.cmd_runner(),
-+        model,
-+        proceed_if_used
-+    )
-     _service_stop(lib_env, qdevice_net.qdevice_stop)
-     _service_disable(lib_env, qdevice_net.qdevice_disable)
-     qdevice_net.qdevice_destroy()
-@@ -83,12 +91,20 @@ def qdevice_start(lib_env, model):
-     _check_model(model)
-     _service_start(lib_env, qdevice_net.qdevice_start)
- 
--def qdevice_stop(lib_env, model):
-+def qdevice_stop(lib_env, model, proceed_if_used=False):
-     """
-     stop qdevice now on local host
-+    string model qdevice model to destroy
-+    bool procced_if_used stop qdevice even if it is used by clusters
-     """
-     _ensure_not_cman(lib_env)
-     _check_model(model)
-+    _check_qdevice_not_used(
-+        lib_env.report_processor,
-+        lib_env.cmd_runner(),
-+        model,
-+        proceed_if_used
-+    )
-     _service_stop(lib_env, qdevice_net.qdevice_stop)
- 
- def qdevice_kill(lib_env, model):
-@@ -176,6 +192,19 @@ def _check_model(model):
-             reports.invalid_option_value("model", model, ["net"])
-         )
- 
-+def _check_qdevice_not_used(reporter, runner, model, force=False):
-+    _check_model(model)
-+    connected_clusters = []
-+    if model == "net":
-+        status = qdevice_net.qdevice_status_cluster_text(runner)
-+        connected_clusters = qdevice_net.qdevice_connected_clusters(status)
-+    if connected_clusters:
-+        reporter.process(reports.qdevice_used_by_clusters(
-+            connected_clusters,
-+            ReportItemSeverity.WARNING if force else ReportItemSeverity.ERROR,
-+            None if force else report_codes.FORCE_QDEVICE_USED
-+        ))
-+
- def _service_start(lib_env, func):
-     lib_env.report_processor.process(
-         reports.service_start_started("quorum device")
-diff --git a/pcs/lib/commands/quorum.py b/pcs/lib/commands/quorum.py
-index 7fb7bb4..8390fc6 100644
---- a/pcs/lib/commands/quorum.py
-+++ b/pcs/lib/commands/quorum.py
-@@ -283,14 +283,23 @@ def remove_device(lib_env, skip_offline_nodes=False):
-     cfg = lib_env.get_corosync_conf()
-     model, dummy_options, dummy_options = cfg.get_quorum_device_settings()
-     cfg.remove_quorum_device()
-+
-+    if lib_env.is_corosync_conf_live:
-+        # fix quorum options for SBD to work properly
-+        if sbd.atb_has_to_be_enabled(lib_env.cmd_runner(), cfg):
-+            lib_env.report_processor.process(reports.sbd_requires_atb())
-+            cfg.set_quorum_options(
-+                lib_env.report_processor, {"auto_tie_breaker": "1"}
-+            )
-+
-     lib_env.push_corosync_conf(cfg, skip_offline_nodes)
- 
-     if lib_env.is_corosync_conf_live:
-+        communicator = lib_env.node_communicator()
-         # disable qdevice
-         lib_env.report_processor.process(
-             reports.service_disable_started("corosync-qdevice")
-         )
--        communicator = lib_env.node_communicator()
-         parallel_nodes_communication_helper(
-             qdevice_client.remote_client_disable,
-             [
-@@ -304,7 +313,6 @@ def remove_device(lib_env, skip_offline_nodes=False):
-         lib_env.report_processor.process(
-             reports.service_stop_started("corosync-qdevice")
-         )
--        communicator = lib_env.node_communicator()
-         parallel_nodes_communication_helper(
-             qdevice_client.remote_client_stop,
-             [
-diff --git a/pcs/lib/commands/test/test_booth.py b/pcs/lib/commands/test/test_booth.py
-index 08d2c79..6bcab2b 100644
---- a/pcs/lib/commands/test/test_booth.py
-+++ b/pcs/lib/commands/test/test_booth.py
-@@ -520,7 +520,7 @@ class TicketOperationTest(TestCase):
-         )
- 
-     def test_raises_when_command_fail(self):
--        mock_run = mock.Mock(return_value=("some message", 1))
-+        mock_run = mock.Mock(return_value=("some message", "error", 1))
-         mock_env = mock.MagicMock(
-             cmd_runner=mock.Mock(return_value=mock.MagicMock(run=mock_run))
-         )
-@@ -533,7 +533,7 @@ class TicketOperationTest(TestCase):
-                 report_codes.BOOTH_TICKET_OPERATION_FAILED,
-                 {
-                     "operation": "grant",
--                    "reason": "some message",
-+                    "reason": "error\nsome message",
-                     "site_ip": "1.2.3.4",
-                     "ticket_name": "ABC",
-                 }
-diff --git a/pcs/lib/corosync/live.py b/pcs/lib/corosync/live.py
-index 1e68c31..67aa0e4 100644
---- a/pcs/lib/corosync/live.py
-+++ b/pcs/lib/corosync/live.py
-@@ -8,6 +8,7 @@ from __future__ import (
- import os.path
- 
- from pcs import settings
-+from pcs.common.tools import join_multilines
- from pcs.lib import reports
- from pcs.lib.errors import LibraryError
- from pcs.lib.external import NodeCommunicator
-@@ -41,42 +42,39 @@ def reload_config(runner):
-     """
-     Ask corosync to reload its configuration
-     """
--    output, retval = runner.run([
-+    stdout, stderr, retval = runner.run([
-         os.path.join(settings.corosync_binaries, "corosync-cfgtool"),
-         "-R"
-     ])
--    if retval != 0 or "invalid option" in output:
--        raise LibraryError(
--            reports.corosync_config_reload_error(output.rstrip())
--        )
-+    message = join_multilines([stderr, stdout])
-+    if retval != 0 or "invalid option" in message:
-+        raise LibraryError(reports.corosync_config_reload_error(message))
- 
- def get_quorum_status_text(runner):
-     """
-     Get runtime quorum status from the local node
-     """
--    output, retval = runner.run([
-+    stdout, stderr, retval = runner.run([
-         os.path.join(settings.corosync_binaries, "corosync-quorumtool"),
-         "-p"
-     ])
-     # retval is 0 on success if node is not in partition with quorum
-     # retval is 1 on error OR on success if node has quorum
--    if retval not in [0, 1]:
--        raise LibraryError(
--            reports.corosync_quorum_get_status_error(output)
--        )
--    return output
-+    if retval not in [0, 1] or stderr.strip():
-+        raise LibraryError(reports.corosync_quorum_get_status_error(stderr))
-+    return stdout
- 
- def set_expected_votes(runner, votes):
-     """
-     set expected votes in live cluster to specified value
-     """
--    output, retval = runner.run([
-+    stdout, stderr, retval = runner.run([
-         os.path.join(settings.corosync_binaries, "corosync-quorumtool"),
-         # format votes to handle the case where they are int
-         "-e", "{0}".format(votes)
-     ])
-     if retval != 0:
-         raise LibraryError(
--            reports.corosync_quorum_set_expected_votes_error(output)
-+            reports.corosync_quorum_set_expected_votes_error(stderr)
-         )
--    return output
-+    return stdout
-diff --git a/pcs/lib/corosync/qdevice_client.py b/pcs/lib/corosync/qdevice_client.py
-index 98fbb0e..c9d0095 100644
---- a/pcs/lib/corosync/qdevice_client.py
-+++ b/pcs/lib/corosync/qdevice_client.py
-@@ -8,6 +8,7 @@ from __future__ import (
- import os.path
- 
- from pcs import settings
-+from pcs.common.tools import join_multilines
- from pcs.lib import reports
- from pcs.lib.errors import LibraryError
- 
-@@ -23,12 +24,14 @@ def get_status_text(runner, verbose=False):
-     ]
-     if verbose:
-         cmd.append("-v")
--    output, retval = runner.run(cmd)
-+    stdout, stderr, retval = runner.run(cmd)
-     if retval != 0:
-         raise LibraryError(
--            reports.corosync_quorum_get_status_error(output)
-+            reports.corosync_quorum_get_status_error(
-+                join_multilines([stderr, stdout])
-+            )
-         )
--    return output
-+    return stdout
- 
- def remote_client_enable(reporter, node_communicator, node):
-     """
-diff --git a/pcs/lib/corosync/qdevice_net.py b/pcs/lib/corosync/qdevice_net.py
-index 4054592..200e45a 100644
---- a/pcs/lib/corosync/qdevice_net.py
-+++ b/pcs/lib/corosync/qdevice_net.py
-@@ -15,6 +15,7 @@ import shutil
- import tempfile
- 
- from pcs import settings
-+from pcs.common.tools import join_multilines
- from pcs.lib import external, reports
- from pcs.lib.errors import LibraryError
- 
-@@ -41,12 +42,15 @@ def qdevice_setup(runner):
-     if external.is_dir_nonempty(settings.corosync_qdevice_net_server_certs_dir):
-         raise LibraryError(reports.qdevice_already_initialized(__model))
- 
--    output, retval = runner.run([
-+    stdout, stderr, retval = runner.run([
-         __qnetd_certutil, "-i"
-     ])
-     if retval != 0:
-         raise LibraryError(
--            reports.qdevice_initialization_error(__model, output.rstrip())
-+            reports.qdevice_initialization_error(
-+                __model,
-+                join_multilines([stderr, stdout])
-+            )
-         )
- 
- def qdevice_initialized():
-@@ -78,10 +82,15 @@ def qdevice_status_generic_text(runner, verbose=False):
-     cmd = [__qnetd_tool, "-s"]
-     if verbose:
-         cmd.append("-v")
--    output, retval = runner.run(cmd)
-+    stdout, stderr, retval = runner.run(cmd)
-     if retval != 0:
--        raise LibraryError(reports.qdevice_get_status_error(__model, output))
--    return output
-+        raise LibraryError(
-+            reports.qdevice_get_status_error(
-+                __model,
-+                join_multilines([stderr, stdout])
-+            )
-+        )
-+    return stdout
- 
- def qdevice_status_cluster_text(runner, cluster=None, verbose=False):
-     """
-@@ -94,10 +103,24 @@ def qdevice_status_cluster_text(runner, cluster=None, verbose=False):
-         cmd.append("-v")
-     if cluster:
-         cmd.extend(["-c", cluster])
--    output, retval = runner.run(cmd)
-+    stdout, stderr, retval = runner.run(cmd)
-     if retval != 0:
--        raise LibraryError(reports.qdevice_get_status_error(__model, output))
--    return output
-+        raise LibraryError(
-+            reports.qdevice_get_status_error(
-+                __model,
-+                join_multilines([stderr, stdout])
-+            )
-+        )
-+    return stdout
-+
-+def qdevice_connected_clusters(status_cluster_text):
-+    connected_clusters = []
-+    regexp = re.compile(r'^Cluster "(?P<cluster>[^"]+)":$')
-+    for line in status_cluster_text.splitlines():
-+        match = regexp.search(line)
-+        if match:
-+            connected_clusters.append(match.group("cluster"))
-+    return connected_clusters
- 
- def qdevice_enable(runner):
-     """
-@@ -143,17 +166,19 @@ def qdevice_sign_certificate_request(runner, cert_request, cluster_name):
-         reports.qdevice_certificate_sign_error
-     )
-     # sign the request
--    output, retval = runner.run([
-+    stdout, stderr, retval = runner.run([
-         __qnetd_certutil, "-s", "-c", tmpfile.name, "-n", cluster_name
-     ])
-     tmpfile.close() # temp file is deleted on close
-     if retval != 0:
-         raise LibraryError(
--            reports.qdevice_certificate_sign_error(output.strip())
-+            reports.qdevice_certificate_sign_error(
-+                join_multilines([stderr, stdout])
-+            )
-         )
-     # get signed certificate, corosync tool only works with files
-     return _get_output_certificate(
--        output,
-+        stdout,
-         reports.qdevice_certificate_sign_error
-     )
- 
-@@ -181,12 +206,15 @@ def client_setup(runner, ca_certificate):
-             reports.qdevice_initialization_error(__model, e.strerror)
-         )
-     # initialize client's certificate storage
--    output, retval = runner.run([
-+    stdout, stderr, retval = runner.run([
-         __qdevice_certutil, "-i", "-c", ca_file_path
-     ])
-     if retval != 0:
-         raise LibraryError(
--            reports.qdevice_initialization_error(__model, output.rstrip())
-+            reports.qdevice_initialization_error(
-+                __model,
-+                join_multilines([stderr, stdout])
-+            )
-         )
- 
- def client_initialized():
-@@ -217,15 +245,18 @@ def client_generate_certificate_request(runner, cluster_name):
-     """
-     if not client_initialized():
-         raise LibraryError(reports.qdevice_not_initialized(__model))
--    output, retval = runner.run([
-+    stdout, stderr, retval = runner.run([
-         __qdevice_certutil, "-r", "-n", cluster_name
-     ])
-     if retval != 0:
-         raise LibraryError(
--            reports.qdevice_initialization_error(__model, output.rstrip())
-+            reports.qdevice_initialization_error(
-+                __model,
-+                join_multilines([stderr, stdout])
-+            )
-         )
-     return _get_output_certificate(
--        output,
-+        stdout,
-         functools.partial(reports.qdevice_initialization_error, __model)
-     )
- 
-@@ -243,17 +274,19 @@ def client_cert_request_to_pk12(runner, cert_request):
-         reports.qdevice_certificate_import_error
-     )
-     # transform it
--    output, retval = runner.run([
-+    stdout, stderr, retval = runner.run([
-         __qdevice_certutil, "-M", "-c", tmpfile.name
-     ])
-     tmpfile.close() # temp file is deleted on close
-     if retval != 0:
-         raise LibraryError(
--            reports.qdevice_certificate_import_error(output)
-+            reports.qdevice_certificate_import_error(
-+                join_multilines([stderr, stdout])
-+            )
-         )
-     # get resulting pk12, corosync tool only works with files
-     return _get_output_certificate(
--        output,
-+        stdout,
-         reports.qdevice_certificate_import_error
-     )
- 
-@@ -268,13 +301,15 @@ def client_import_certificate_and_key(runner, pk12_certificate):
-         pk12_certificate,
-         reports.qdevice_certificate_import_error
-     )
--    output, retval = runner.run([
-+    stdout, stderr, retval = runner.run([
-         __qdevice_certutil, "-m", "-c", tmpfile.name
-     ])
-     tmpfile.close() # temp file is deleted on close
-     if retval != 0:
-         raise LibraryError(
--            reports.qdevice_certificate_import_error(output)
-+            reports.qdevice_certificate_import_error(
-+                join_multilines([stderr, stdout])
-+            )
-         )
- 
- def remote_qdevice_get_ca_certificate(node_communicator, host):
-diff --git a/pcs/lib/external.py b/pcs/lib/external.py
-index 08bf2bb..074d2aa 100644
---- a/pcs/lib/external.py
-+++ b/pcs/lib/external.py
-@@ -47,14 +47,15 @@ except ImportError:
-         URLError as urllib_URLError
-     )
- 
--from pcs.lib import reports
--from pcs.lib.errors import LibraryError, ReportItemSeverity
-+from pcs import settings
- from pcs.common import report_codes
- from pcs.common.tools import (
-+    join_multilines,
-     simple_cache,
-     run_parallel as tools_run_parallel,
- )
--from pcs import settings
-+from pcs.lib import reports
-+from pcs.lib.errors import LibraryError, ReportItemSeverity
- 
- 
- class ManageServiceError(Exception):
-@@ -138,13 +139,17 @@ def disable_service(runner, service, instance=None):
-     if not is_service_installed(runner, service):
-         return
-     if is_systemctl():
--        output, retval = runner.run([
-+        stdout, stderr, retval = runner.run([
-             "systemctl", "disable", _get_service_name(service, instance)
-         ])
-     else:
--        output, retval = runner.run(["chkconfig", service, "off"])
-+        stdout, stderr, retval = runner.run(["chkconfig", service, "off"])
-     if retval != 0:
--        raise DisableServiceError(service, output.rstrip(), instance)
-+        raise DisableServiceError(
-+            service,
-+            join_multilines([stderr, stdout]),
-+            instance
-+        )
- 
- 
- def enable_service(runner, service, instance=None):
-@@ -158,13 +163,17 @@ def enable_service(runner, service, instance=None):
-         If None no instance name will be used.
-     """
-     if is_systemctl():
--        output, retval = runner.run([
-+        stdout, stderr, retval = runner.run([
-             "systemctl", "enable", _get_service_name(service, instance)
-         ])
-     else:
--        output, retval = runner.run(["chkconfig", service, "on"])
-+        stdout, stderr, retval = runner.run(["chkconfig", service, "on"])
-     if retval != 0:
--        raise EnableServiceError(service, output.rstrip(), instance)
-+        raise EnableServiceError(
-+            service,
-+            join_multilines([stderr, stdout]),
-+            instance
-+        )
- 
- 
- def start_service(runner, service, instance=None):
-@@ -176,13 +185,17 @@ def start_service(runner, service, instance=None):
-         If None no instance name will be used.
-     """
-     if is_systemctl():
--        output, retval = runner.run([
-+        stdout, stderr, retval = runner.run([
-             "systemctl", "start", _get_service_name(service, instance)
-         ])
-     else:
--        output, retval = runner.run(["service", service, "start"])
-+        stdout, stderr, retval = runner.run(["service", service, "start"])
-     if retval != 0:
--        raise StartServiceError(service, output.rstrip(), instance)
-+        raise StartServiceError(
-+            service,
-+            join_multilines([stderr, stdout]),
-+            instance
-+        )
- 
- 
- def stop_service(runner, service, instance=None):
-@@ -194,13 +207,17 @@ def stop_service(runner, service, instance=None):
-         If None no instance name will be used.
-     """
-     if is_systemctl():
--        output, retval = runner.run([
-+        stdout, stderr, retval = runner.run([
-             "systemctl", "stop", _get_service_name(service, instance)
-         ])
-     else:
--        output, retval = runner.run(["service", service, "stop"])
-+        stdout, stderr, retval = runner.run(["service", service, "stop"])
-     if retval != 0:
--        raise StopServiceError(service, output.rstrip(), instance)
-+        raise StopServiceError(
-+            service,
-+            join_multilines([stderr, stdout]),
-+            instance
-+        )
- 
- 
- def kill_services(runner, services):
-@@ -210,15 +227,16 @@ def kill_services(runner, services):
-     iterable services service names
-     """
-     # make killall not report that a process is not running
--    output, retval = runner.run(
-+    stdout, stderr, retval = runner.run(
-         ["killall", "--quiet", "--signal", "9", "--"] + list(services)
-     )
-     # If a process isn't running, killall will still return 1 even with --quiet.
-     # We don't consider that an error, so we check for output string as well.
-     # If it's empty, no actuall error happened.
-     if retval != 0:
--        if output.strip():
--            raise KillServicesError(list(services), output.rstrip())
-+        message = join_multilines([stderr, stdout])
-+        if message:
-+            raise KillServicesError(list(services), message)
- 
- 
- def is_service_enabled(runner, service, instance=None):
-@@ -229,11 +247,11 @@ def is_service_enabled(runner, service, instance=None):
-     service -- name of service
-     """
-     if is_systemctl():
--        _, retval = runner.run(
-+        dummy_stdout, dummy_stderr, retval = runner.run(
-             ["systemctl", "is-enabled", _get_service_name(service, instance)]
-         )
-     else:
--        _, retval = runner.run(["chkconfig", service])
-+        dummy_stdout, dummy_stderr, retval = runner.run(["chkconfig", service])
- 
-     return retval == 0
- 
-@@ -246,13 +264,15 @@ def is_service_running(runner, service, instance=None):
-     service -- name of service
-     """
-     if is_systemctl():
--        _, retval = runner.run([
-+        dummy_stdout, dummy_stderr, retval = runner.run([
-             "systemctl",
-             "is-active",
-             _get_service_name(service, instance)
-         ])
-     else:
--        _, retval = runner.run(["service", service, "status"])
-+        dummy_stdout, dummy_stderr, retval = runner.run(
-+            ["service", service, "status"]
-+        )
- 
-     return retval == 0
- 
-@@ -279,12 +299,12 @@ def get_non_systemd_services(runner):
-     if is_systemctl():
-         return []
- 
--    output, return_code = runner.run(["chkconfig"], ignore_stderr=True)
-+    stdout, dummy_stderr, return_code = runner.run(["chkconfig"])
-     if return_code != 0:
-         return []
- 
-     service_list = []
--    for service in output.splitlines():
-+    for service in stdout.splitlines():
-         service = service.split(" ", 1)[0]
-         if service:
-             service_list.append(service)
-@@ -300,12 +320,14 @@ def get_systemd_services(runner):
-     if not is_systemctl():
-         return []
- 
--    output, return_code = runner.run(["systemctl", "list-unit-files", "--full"])
-+    stdout, dummy_stderr, return_code = runner.run([
-+        "systemctl", "list-unit-files", "--full"
-+    ])
-     if return_code != 0:
-         return []
- 
-     service_list = []
--    for service in output.splitlines():
-+    for service in stdout.splitlines():
-         match = re.search(r'^([\S]*)\.service', service)
-         if match:
-             service_list.append(match.group(1))
-@@ -322,13 +344,13 @@ def is_cman_cluster(runner):
-     # - corosync1 runs with cman on rhel6
-     # - corosync1 can be used without cman, but we don't support it anyways
-     # - corosync2 is the default result if errors occur
--    output, retval = runner.run([
-+    stdout, dummy_stderr, retval = runner.run([
-         os.path.join(settings.corosync_binaries, "corosync"),
-         "-v"
-     ])
-     if retval != 0:
-         return False
--    match = re.search(r"version\D+(\d+)", output)
-+    match = re.search(r"version\D+(\d+)", stdout)
-     return match is not None and match.group(1) == "1"
- 
- 
-@@ -340,8 +362,7 @@ class CommandRunner(object):
-         self._python2 = sys.version[0] == "2"
- 
-     def run(
--        self, args, ignore_stderr=False, stdin_string=None, env_extend=None,
--        binary_output=False
-+        self, args, stdin_string=None, env_extend=None, binary_output=False
-     ):
-         #Reset environment variables by empty dict is desired here.  We need to
-         #get rid of defaults - we do not know the context and environment of the
-@@ -364,9 +385,7 @@ class CommandRunner(object):
-                 # Some commands react differently if they get anything via stdin
-                 stdin=(subprocess.PIPE if stdin_string is not None else None),
-                 stdout=subprocess.PIPE,
--                stderr=(
--                    subprocess.PIPE if ignore_stderr else subprocess.STDOUT
--                ),
-+                stderr=subprocess.PIPE,
-                 preexec_fn=(
-                     lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)
-                 ),
-@@ -376,7 +395,7 @@ class CommandRunner(object):
-                 # decodes newlines and in python3 also converts bytes to str
-                 universal_newlines=(not self._python2 and not binary_output)
-             )
--            output, dummy_stderror = process.communicate(stdin_string)
-+            out_std, out_err = process.communicate(stdin_string)
-             retval = process.returncode
-         except OSError as e:
-             raise LibraryError(
-@@ -386,13 +405,19 @@ class CommandRunner(object):
-         self._logger.debug(
-             (
-                 "Finished running: {args}\nReturn value: {retval}"
--                + "\n--Debug Output Start--\n{output}\n--Debug Output End--"
--            ).format(args=log_args, retval=retval, output=output)
--        )
--        self._reporter.process(
--            reports.run_external_process_finished(log_args, retval, output)
-+                + "\n--Debug Stdout Start--\n{out_std}\n--Debug Stdout End--"
-+                + "\n--Debug Stderr Start--\n{out_err}\n--Debug Stderr End--"
-+            ).format(
-+                args=log_args,
-+                retval=retval,
-+                out_std=out_std,
-+                out_err=out_err
-+            )
-         )
--        return output, retval
-+        self._reporter.process(reports.run_external_process_finished(
-+            log_args, retval, out_std, out_err
-+        ))
-+        return out_std, out_err, retval
- 
- 
- class NodeCommunicationException(Exception):
-diff --git a/pcs/lib/pacemaker.py b/pcs/lib/pacemaker.py
-index fd6f97b..6747b22 100644
---- a/pcs/lib/pacemaker.py
-+++ b/pcs/lib/pacemaker.py
-@@ -9,6 +9,7 @@ import os.path
- from lxml import etree
- 
- from pcs import settings
-+from pcs.common.tools import join_multilines
- from pcs.lib import reports
- from pcs.lib.errors import LibraryError
- from pcs.lib.pacemaker_state import ClusterState
-@@ -26,28 +27,33 @@ def __exec(name):
-     return os.path.join(settings.pacemaker_binaries, name)
- 
- def get_cluster_status_xml(runner):
--    output, retval = runner.run(
-+    stdout, stderr, retval = runner.run(
-         [__exec("crm_mon"), "--one-shot", "--as-xml", "--inactive"]
-     )
-     if retval != 0:
-         raise CrmMonErrorException(
--            reports.cluster_state_cannot_load(retval, output)
-+            reports.cluster_state_cannot_load(join_multilines([stderr, stdout]))
-         )
--    return output
-+    return stdout
- 
- def get_cib_xml(runner, scope=None):
-     command = [__exec("cibadmin"), "--local", "--query"]
-     if scope:
-         command.append("--scope={0}".format(scope))
--    output, retval = runner.run(command)
-+    stdout, stderr, retval = runner.run(command)
-     if retval != 0:
-         if retval == __EXITCODE_CIB_SCOPE_VALID_BUT_NOT_PRESENT and scope:
-             raise LibraryError(
--                reports.cib_load_error_scope_missing(scope, retval, output)
-+                reports.cib_load_error_scope_missing(
-+                    scope,
-+                    join_multilines([stderr, stdout])
-+                )
-             )
-         else:
--            raise LibraryError(reports.cib_load_error(retval, output))
--    return output
-+            raise LibraryError(
-+                reports.cib_load_error(join_multilines([stderr, stdout]))
-+            )
-+    return stdout
- 
- def get_cib(xml):
-     try:
-@@ -59,9 +65,9 @@ def replace_cib_configuration_xml(runner, xml, cib_upgraded=False):
-     cmd = [__exec("cibadmin"), "--replace",  "--verbose", "--xml-pipe"]
-     if not cib_upgraded:
-         cmd += ["--scope", "configuration"]
--    output, retval = runner.run(cmd, stdin_string=xml)
-+    stdout, stderr, retval = runner.run(cmd, stdin_string=xml)
-     if retval != 0:
--        raise LibraryError(reports.cib_push_error(retval, output))
-+        raise LibraryError(reports.cib_push_error(stderr, stdout))
- 
- def replace_cib_configuration(runner, tree, cib_upgraded=False):
-     #etree returns bytes: b'xml'
-@@ -108,13 +114,18 @@ def resource_cleanup(runner, resource=None, node=None, force=False):
-     if node:
-         cmd.extend(["--node", node])
- 
--    output, retval = runner.run(cmd)
-+    stdout, stderr, retval = runner.run(cmd)
- 
-     if retval != 0:
-         raise LibraryError(
--            reports.resource_cleanup_error(retval, output, resource, node)
-+            reports.resource_cleanup_error(
-+                join_multilines([stderr, stdout]),
-+                resource,
-+                node
-+            )
-         )
--    return output
-+    # usefull output (what has been done) goes to stderr
-+    return join_multilines([stdout, stderr])
- 
- def nodes_standby(runner, node_list=None, all_nodes=False):
-     return __nodes_standby_unstandby(runner, True, node_list, all_nodes)
-@@ -124,8 +135,11 @@ def nodes_unstandby(runner, node_list=None, all_nodes=False):
- 
- def has_resource_wait_support(runner):
-     # returns 1 on success so we don't care about retval
--    output, dummy_retval = runner.run([__exec("crm_resource"), "-?"])
--    return "--wait" in output
-+    stdout, stderr, dummy_retval = runner.run(
-+        [__exec("crm_resource"), "-?"]
-+    )
-+    # help goes to stderr but we check stdout as well if that gets changed
-+    return "--wait" in stderr or "--wait" in stdout
- 
- def ensure_resource_wait_support(runner):
-     if not has_resource_wait_support(runner):
-@@ -135,15 +149,22 @@ def wait_for_resources(runner, timeout=None):
-     args = [__exec("crm_resource"), "--wait"]
-     if timeout is not None:
-         args.append("--timeout={0}".format(timeout))
--    output, retval = runner.run(args)
-+    stdout, stderr, retval = runner.run(args)
-     if retval != 0:
-+        # Usefull info goes to stderr - not only error messages, a list of
-+        # pending actions in case of timeout goes there as well.
-+        # We use stdout just to be sure if that's get changed.
-         if retval == __EXITCODE_WAIT_TIMEOUT:
-             raise LibraryError(
--                reports.resource_wait_timed_out(retval, output.strip())
-+                reports.resource_wait_timed_out(
-+                    join_multilines([stderr, stdout])
-+                )
-             )
-         else:
-             raise LibraryError(
--                reports.resource_wait_error(retval, output.strip())
-+                reports.resource_wait_error(
-+                    join_multilines([stderr, stdout])
-+                )
-             )
- 
- def __nodes_standby_unstandby(
-@@ -178,9 +199,11 @@ def __nodes_standby_unstandby(
-         cmd_list.append(cmd_template)
-     report = []
-     for cmd in cmd_list:
--        output, retval = runner.run(cmd)
-+        stdout, stderr, retval = runner.run(cmd)
-         if retval != 0:
--            report.append(reports.common_error(output))
-+            report.append(
-+                reports.common_error(join_multilines([stderr, stdout]))
-+            )
-     if report:
-         raise LibraryError(*report)
- 
-@@ -189,21 +212,23 @@ def __get_local_node_name(runner):
-     # but it returns false names when cluster is not running (or we are on
-     # a remote node). Getting node id first is reliable since it fails in those
-     # cases.
--    output, retval = runner.run([__exec("crm_node"), "--cluster-id"])
-+    stdout, dummy_stderr, retval = runner.run(
-+        [__exec("crm_node"), "--cluster-id"]
-+    )
-     if retval != 0:
-         raise LibraryError(
-             reports.pacemaker_local_node_name_not_found("node id not found")
-         )
--    node_id = output.strip()
-+    node_id = stdout.strip()
- 
--    output, retval = runner.run(
-+    stdout, dummy_stderr, retval = runner.run(
-         [__exec("crm_node"), "--name-for-id={0}".format(node_id)]
-     )
-     if retval != 0:
-         raise LibraryError(
-             reports.pacemaker_local_node_name_not_found("node name not found")
-         )
--    node_name = output.strip()
-+    node_name = stdout.strip()
- 
-     if node_name == "(null)":
-         raise LibraryError(
-diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
-index a701679..b9e9a66 100644
---- a/pcs/lib/reports.py
-+++ b/pcs/lib/reports.py
-@@ -262,21 +262,24 @@ def run_external_process_started(command, stdin):
-         }
-     )
- 
--def run_external_process_finished(command, retval, stdout):
-+def run_external_process_finished(command, retval, stdout, stderr):
-     """
-     information about result of running an external process
-     command string the external process command
-     retval external process's return (exit) code
-     stdout string external process's stdout
-+    stderr string external process's stderr
-     """
-     return ReportItem.debug(
-         report_codes.RUN_EXTERNAL_PROCESS_FINISHED,
-         "Finished running: {command}\nReturn value: {return_value}"
--        + "\n--Debug Output Start--\n{stdout}\n--Debug Output End--\n",
-+        + "\n--Debug Stdout Start--\n{stdout}\n--Debug Stdout End--"
-+        + "\n--Debug Stderr Start--\n{stderr}\n--Debug Stderr End--\n",
-         info={
-             "command": command,
-             "return_value": retval,
-             "stdout": stdout,
-+            "stderr": stderr,
-         }
-     )
- 
-@@ -854,6 +857,23 @@ def qdevice_get_status_error(model, reason):
-         }
-     )
- 
-+def qdevice_used_by_clusters(
-+    clusters, severity=ReportItemSeverity.ERROR, forceable=None
-+):
-+    """
-+    Qdevice is currently being used by clusters, cannot stop it unless forced
-+    """
-+    return ReportItem(
-+        report_codes.QDEVICE_USED_BY_CLUSTERS,
-+        severity,
-+        "Quorum device is currently being used by cluster(s): {clusters_str}",
-+        info={
-+            "clusters": clusters,
-+            "clusters_str": ", ".join(clusters),
-+        },
-+        forceable=forceable
-+    )
-+
- def cman_unsupported_command():
-     """
-     requested library command is not available as local cluster is CMAN based
-@@ -903,35 +923,31 @@ def resource_does_not_exist(resource_id):
-         }
-     )
- 
--def cib_load_error(retval, stdout):
-+def cib_load_error(reason):
-     """
-     cannot load cib from cibadmin, cibadmin exited with non-zero code
--    retval external process's return (exit) code
--    stdout string external process's stdout
-+    string reason error description
-     """
-     return ReportItem.error(
-         report_codes.CIB_LOAD_ERROR,
-         "unable to get cib",
-         info={
--            "return_value": retval,
--            "stdout": stdout,
-+            "reason": reason,
-         }
-     )
- 
--def cib_load_error_scope_missing(scope, retval, stdout):
-+def cib_load_error_scope_missing(scope, reason):
-     """
-     cannot load cib from cibadmin, specified scope is missing in the cib
-     scope string requested cib scope
--    retval external process's return (exit) code
--    stdout string external process's stdout
-+    string reason error description
-     """
-     return ReportItem.error(
-         report_codes.CIB_LOAD_ERROR_SCOPE_MISSING,
-         "unable to get cib, scope '{scope}' not present in cib",
-         info={
-             "scope": scope,
--            "return_value": retval,
--            "stdout": stdout,
-+            "reason": reason,
-         }
-     )
- 
-@@ -957,33 +973,31 @@ def cib_missing_mandatory_section(section_name):
-         }
-     )
- 
--def cib_push_error(retval, stdout):
-+def cib_push_error(reason, pushed_cib):
-     """
-     cannot push cib to cibadmin, cibadmin exited with non-zero code
--    retval external process's return (exit) code
--    stdout string external process's stdout
-+    string reason error description
-+    string pushed_cib cib which failed to be pushed
-     """
-     return ReportItem.error(
-         report_codes.CIB_PUSH_ERROR,
--        "Unable to update cib\n{stdout}",
-+        "Unable to update cib\n{reason}\n{pushed_cib}",
-         info={
--            "return_value": retval,
--            "stdout": stdout,
-+            "reason": reason,
-+            "pushed_cib": pushed_cib,
-         }
-     )
- 
--def cluster_state_cannot_load(retval, stdout):
-+def cluster_state_cannot_load(reason):
-     """
-     cannot load cluster status from crm_mon, crm_mon exited with non-zero code
--    retval external process's return (exit) code
--    stdout string external process's stdout
-+    string reason error description
-     """
-     return ReportItem.error(
-         report_codes.CRM_MON_ERROR,
-         "error running crm_mon, is pacemaker running?",
-         info={
--            "return_value": retval,
--            "stdout": stdout,
-+            "reason": reason,
-         }
-     )
- 
-@@ -1005,57 +1019,50 @@ def resource_wait_not_supported():
-         "crm_resource does not support --wait, please upgrade pacemaker"
-     )
- 
--def resource_wait_timed_out(retval, stdout):
-+def resource_wait_timed_out(reason):
-     """
-     waiting for resources (crm_resource --wait) failed, timeout expired
--    retval external process's return (exit) code
--    stdout string external process's stdout
-+    string reason error description
-     """
-     return ReportItem.error(
-         report_codes.RESOURCE_WAIT_TIMED_OUT,
--        "waiting timeout\n\n{stdout}",
-+        "waiting timeout\n\n{reason}",
-         info={
--            "return_value": retval,
--            "stdout": stdout,
-+            "reason": reason,
-         }
-     )
- 
--def resource_wait_error(retval, stdout):
-+def resource_wait_error(reason):
-     """
-     waiting for resources (crm_resource --wait) failed
--    retval external process's return (exit) code
--    stdout string external process's stdout
-+    string reason error description
-     """
-     return ReportItem.error(
-         report_codes.RESOURCE_WAIT_ERROR,
--        "{stdout}",
-+        "{reason}",
-         info={
--            "return_value": retval,
--            "stdout": stdout,
-+            "reason": reason,
-         }
-     )
- 
--def resource_cleanup_error(retval, stdout, resource=None, node=None):
-+def resource_cleanup_error(reason, resource=None, node=None):
-     """
-     an error occured when deleting resource history in pacemaker
--    retval external process's return (exit) code
--    stdout string external process's stdout
--    resource string resource which has been cleaned up
--    node string node which has been cleaned up
-+    string reason error description
-+    string resource resource which has been cleaned up
-+    string node node which has been cleaned up
-     """
-     if resource:
--        text = "Unable to cleanup resource: {resource}\n{stdout}"
-+        text = "Unable to cleanup resource: {resource}\n{reason}"
-     else:
-         text = (
--            "Unexpected error occured. 'crm_resource -C' err_code: "
--            + "{return_value}\n{stdout}"
-+            "Unexpected error occured. 'crm_resource -C' error:\n{reason}"
-         )
-     return ReportItem.error(
-         report_codes.RESOURCE_CLEANUP_ERROR,
-         text,
-         info={
--            "return_value": retval,
--            "stdout": stdout,
-+            "reason": reason,
-             "resource": resource,
-             "node": node,
-         }
-diff --git a/pcs/lib/resource_agent.py b/pcs/lib/resource_agent.py
-index ea93875..d49b5c0 100644
---- a/pcs/lib/resource_agent.py
-+++ b/pcs/lib/resource_agent.py
-@@ -125,14 +125,14 @@ def _get_pcmk_advanced_stonith_parameters(runner):
-     """
-     @simple_cache
-     def __get_stonithd_parameters():
--        output, retval = runner.run(
--            [settings.stonithd_binary, "metadata"], ignore_stderr=True
-+        stdout, stderr, dummy_retval = runner.run(
-+            [settings.stonithd_binary, "metadata"]
-         )
--        if output.strip() == "":
--            raise UnableToGetAgentMetadata("stonithd", output)
-+        if stdout.strip() == "":
-+            raise UnableToGetAgentMetadata("stonithd", stderr)
- 
-         try:
--            params = _get_agent_parameters(etree.fromstring(output))
-+            params = _get_agent_parameters(etree.fromstring(stdout))
-             for param in params:
-                 param["longdesc"] = "{0}\n{1}".format(
-                     param["shortdesc"], param["longdesc"]
-@@ -166,15 +166,15 @@ def get_fence_agent_metadata(runner, fence_agent):
-     ):
-         raise AgentNotFound(fence_agent)
- 
--    output, retval = runner.run(
--        [script_path, "-o", "metadata"], ignore_stderr=True
-+    stdout, stderr, dummy_retval = runner.run(
-+        [script_path, "-o", "metadata"]
-     )
- 
--    if output.strip() == "":
--        raise UnableToGetAgentMetadata(fence_agent, output)
-+    if stdout.strip() == "":
-+        raise UnableToGetAgentMetadata(fence_agent, stderr)
- 
-     try:
--        return etree.fromstring(output)
-+        return etree.fromstring(stdout)
-     except etree.XMLSyntaxError as e:
-         raise UnableToGetAgentMetadata(fence_agent, str(e))
- 
-@@ -219,17 +219,16 @@ def _get_ocf_resource_agent_metadata(runner, provider, agent):
-     if not __is_path_abs(script_path) or not is_path_runnable(script_path):
-         raise AgentNotFound(agent_name)
- 
--    output, retval = runner.run(
-+    stdout, stderr, dummy_retval = runner.run(
-         [script_path, "meta-data"],
--        env_extend={"OCF_ROOT": settings.ocf_root},
--        ignore_stderr=True
-+        env_extend={"OCF_ROOT": settings.ocf_root}
-     )
- 
--    if output.strip() == "":
--        raise UnableToGetAgentMetadata(agent_name, output)
-+    if stdout.strip() == "":
-+        raise UnableToGetAgentMetadata(agent_name, stderr)
- 
-     try:
--        return etree.fromstring(output)
-+        return etree.fromstring(stdout)
-     except etree.XMLSyntaxError as e:
-         raise UnableToGetAgentMetadata(agent_name, str(e))
- 
-diff --git a/pcs/lib/sbd.py b/pcs/lib/sbd.py
-index 39de740..9b57400 100644
---- a/pcs/lib/sbd.py
-+++ b/pcs/lib/sbd.py
-@@ -115,11 +115,11 @@ def atb_has_to_be_enabled(runner, corosync_conf_facade, node_number_modifier=0):
-         node.
-     """
-     return (
-+        not corosync_conf_facade.is_enabled_auto_tie_breaker()
-+        and
-         is_auto_tie_breaker_needed(
-             runner, corosync_conf_facade, node_number_modifier
-         )
--        and
--        not corosync_conf_facade.is_enabled_auto_tie_breaker()
-     )
- 
- 
-diff --git a/pcs/qdevice.py b/pcs/qdevice.py
-index 0037704..2591bae 100644
---- a/pcs/qdevice.py
-+++ b/pcs/qdevice.py
-@@ -92,7 +92,7 @@ def qdevice_destroy_cmd(lib, argv, modifiers):
-     if len(argv) != 1:
-         raise CmdLineInputError()
-     model = argv[0]
--    lib.qdevice.destroy(model)
-+    lib.qdevice.destroy(model, modifiers["force"])
- 
- def qdevice_start_cmd(lib, argv, modifiers):
-     if len(argv) != 1:
-@@ -104,7 +104,7 @@ def qdevice_stop_cmd(lib, argv, modifiers):
-     if len(argv) != 1:
-         raise CmdLineInputError()
-     model = argv[0]
--    lib.qdevice.stop(model)
-+    lib.qdevice.stop(model, modifiers["force"])
- 
- def qdevice_kill_cmd(lib, argv, modifiers):
-     if len(argv) != 1:
-diff --git a/pcs/test/resources/corosync-qdevice.conf b/pcs/test/resources/corosync-qdevice.conf
-new file mode 100644
-index 0000000..38998e7
---- /dev/null
-+++ b/pcs/test/resources/corosync-qdevice.conf
-@@ -0,0 +1,34 @@
-+totem {
-+    version: 2
-+    secauth: off
-+    cluster_name: test99
-+    transport: udpu
-+}
-+
-+nodelist {
-+    node {
-+        ring0_addr: rh7-1
-+        nodeid: 1
-+    }
-+
-+    node {
-+        ring0_addr: rh7-2
-+        nodeid: 2
-+    }
-+}
-+
-+quorum {
-+    provider: corosync_votequorum
-+
-+    device {
-+        model: net
-+
-+        net {
-+            host: 127.0.0.1
-+        }
-+    }
-+}
-+
-+logging {
-+    to_syslog: yes
-+}
-diff --git a/pcs/test/test_common_tools.py b/pcs/test/test_common_tools.py
-index 5290e6d..d9b6af3 100644
---- a/pcs/test/test_common_tools.py
-+++ b/pcs/test/test_common_tools.py
-@@ -63,3 +63,35 @@ class RunParallelTestCase(TestCase):
-         elapsed_time = finish_time - start_time
-         self.assertTrue(elapsed_time > x)
-         self.assertTrue(elapsed_time < sum([i + 1 for i in range(x)]))
-+
-+
-+class JoinMultilinesTest(TestCase):
-+    def test_empty_input(self):
-+        self.assertEqual(
-+            "",
-+            tools.join_multilines([])
-+        )
-+
-+    def test_two_strings(self):
-+        self.assertEqual(
-+            "a\nb",
-+            tools.join_multilines(["a", "b"])
-+        )
-+
-+    def test_strip(self):
-+        self.assertEqual(
-+            "a\nb",
-+            tools.join_multilines(["  a\n", "  b\n"])
-+        )
-+
-+    def test_skip_empty(self):
-+        self.assertEqual(
-+            "a\nb",
-+            tools.join_multilines(["  a\n", "   \n", "  b\n"])
-+        )
-+
-+    def test_multiline(self):
-+        self.assertEqual(
-+            "a\nA\nb\nB",
-+            tools.join_multilines(["a\nA\n", "b\nB\n"])
-+        )
-diff --git a/pcs/test/test_lib_cib_tools.py b/pcs/test/test_lib_cib_tools.py
-index ffc2642..ec9c312 100644
---- a/pcs/test/test_lib_cib_tools.py
-+++ b/pcs/test/test_lib_cib_tools.py
-@@ -383,7 +383,7 @@ class UpgradeCibTest(TestCase):
-         mock_file.name = "mock_file_name"
-         mock_file.read.return_value = "<cib/>"
-         mock_named_file.return_value = mock_file
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("", "", 0)
-         assert_xml_equal(
-             "<cib/>",
-             etree.tostring(
-@@ -408,13 +408,15 @@ class UpgradeCibTest(TestCase):
-         mock_file = mock.MagicMock()
-         mock_file.name = "mock_file_name"
-         mock_named_file.return_value = mock_file
--        self.mock_runner.run.return_value = ("reason", 1)
-+        self.mock_runner.run.return_value = ("some info", "some error", 1)
-         assert_raise_library_error(
-             lambda: lib.upgrade_cib(etree.XML("<old_cib/>"), self.mock_runner),
-             (
-                 severities.ERROR,
-                 report_codes.CIB_UPGRADE_FAILED,
--                {"reason": "reason"}
-+                {
-+                    "reason": "some error\nsome info",
-+                }
-             )
-         )
-         mock_named_file.assert_called_once_with("w+", suffix=".pcs")
-@@ -434,7 +436,7 @@ class UpgradeCibTest(TestCase):
-         mock_file.name = "mock_file_name"
-         mock_file.read.return_value = "not xml"
-         mock_named_file.return_value = mock_file
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("", "", 0)
-         assert_raise_library_error(
-             lambda: lib.upgrade_cib(etree.XML("<old_cib/>"), self.mock_runner),
-             (
-diff --git a/pcs/test/test_lib_commands_qdevice.py b/pcs/test/test_lib_commands_qdevice.py
-index 10841e9..756afa8 100644
---- a/pcs/test/test_lib_commands_qdevice.py
-+++ b/pcs/test/test_lib_commands_qdevice.py
-@@ -345,6 +345,7 @@ class QdeviceNetSetupTest(QdeviceTestCase):
-         )
- 
- 
-+@mock.patch("pcs.lib.corosync.qdevice_net.qdevice_status_cluster_text")
- @mock.patch("pcs.lib.external.stop_service")
- @mock.patch("pcs.lib.external.disable_service")
- @mock.patch("pcs.lib.commands.qdevice.qdevice_net.qdevice_destroy")
-@@ -355,7 +356,11 @@ class QdeviceNetSetupTest(QdeviceTestCase):
-     lambda self: "mock_runner"
- )
- class QdeviceNetDestroyTest(QdeviceTestCase):
--    def test_success(self, mock_net_destroy, mock_net_disable, mock_net_stop):
-+    def test_success_not_used(
-+        self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status
-+    ):
-+        mock_status.return_value = ""
-+
-         lib.qdevice_destroy(self.lib_env, "net")
- 
-         mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd")
-@@ -398,9 +403,85 @@ class QdeviceNetDestroyTest(QdeviceTestCase):
-             ]
-         )
- 
-+    def test_success_used_forced(
-+        self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status
-+    ):
-+        mock_status.return_value = 'Cluster "a_cluster":\n'
-+
-+        lib.qdevice_destroy(self.lib_env, "net", proceed_if_used=True)
-+
-+        mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd")
-+        mock_net_disable.assert_called_once_with(
-+            "mock_runner",
-+            "corosync-qnetd"
-+        )
-+        mock_net_destroy.assert_called_once_with()
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.WARNING,
-+                    report_codes.QDEVICE_USED_BY_CLUSTERS,
-+                    {
-+                        "clusters": ["a_cluster"],
-+                    }
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.SERVICE_STOP_STARTED,
-+                    {
-+                        "service": "quorum device",
-+                    }
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.SERVICE_STOP_SUCCESS,
-+                    {
-+                        "service": "quorum device",
-+                    }
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.SERVICE_DISABLE_SUCCESS,
-+                    {
-+                        "service": "quorum device",
-+                    }
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.QDEVICE_DESTROY_SUCCESS,
-+                    {
-+                        "model": "net",
-+                    }
-+                )
-+            ]
-+        )
-+
-+    def test_used_not_forced(
-+        self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status
-+    ):
-+        mock_status.return_value = 'Cluster "a_cluster":\n'
-+
-+        assert_raise_library_error(
-+            lambda: lib.qdevice_destroy(self.lib_env, "net"),
-+            (
-+                severity.ERROR,
-+                report_codes.QDEVICE_USED_BY_CLUSTERS,
-+                {
-+                    "clusters": ["a_cluster"],
-+                },
-+                report_codes.FORCE_QDEVICE_USED
-+            ),
-+        )
-+
-+        mock_net_stop.assert_not_called()
-+        mock_net_disable.assert_not_called()
-+        mock_net_destroy.assert_not_called()
-+
-     def test_stop_failed(
--        self, mock_net_destroy, mock_net_disable, mock_net_stop
-+        self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status
-     ):
-+        mock_status.return_value = ""
-         mock_net_stop.side_effect = StopServiceError(
-             "test service",
-             "test error"
-@@ -435,8 +516,9 @@ class QdeviceNetDestroyTest(QdeviceTestCase):
-         )
- 
-     def test_disable_failed(
--        self, mock_net_destroy, mock_net_disable, mock_net_stop
-+        self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status
-     ):
-+        mock_status.return_value = ""
-         mock_net_disable.side_effect = DisableServiceError(
-             "test service",
-             "test error"
-@@ -481,8 +563,9 @@ class QdeviceNetDestroyTest(QdeviceTestCase):
-         )
- 
-     def test_destroy_failed(
--        self, mock_net_destroy, mock_net_disable, mock_net_stop
-+        self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status
-     ):
-+        mock_status.return_value = ""
-         mock_net_destroy.side_effect = LibraryError("mock_report_item")
- 
-         self.assertRaises(
-@@ -755,6 +838,7 @@ class QdeviceNetStartTest(QdeviceTestCase):
-         )
- 
- 
-+@mock.patch("pcs.lib.corosync.qdevice_net.qdevice_status_cluster_text")
- @mock.patch("pcs.lib.external.stop_service")
- @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
- @mock.patch.object(
-@@ -763,13 +847,49 @@ class QdeviceNetStartTest(QdeviceTestCase):
-     lambda self: "mock_runner"
- )
- class QdeviceNetStopTest(QdeviceTestCase):
--    def test_success(self, mock_net_stop):
--        lib.qdevice_stop(self.lib_env, "net")
-+    def test_success_not_used(self, mock_net_stop, mock_status):
-+        mock_status.return_value = ""
-+
-+        lib.qdevice_stop(self.lib_env, "net", proceed_if_used=False)
-+
-+        mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd")
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.SERVICE_STOP_STARTED,
-+                    {
-+                        "service": "quorum device",
-+                    }
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.SERVICE_STOP_SUCCESS,
-+                    {
-+                        "service": "quorum device",
-+                    }
-+                )
-+            ]
-+        )
-+
-+    def test_success_used_forced(self, mock_net_stop, mock_status):
-+        mock_status.return_value = 'Cluster "a_cluster":\n'
-+
-+        lib.qdevice_stop(self.lib_env, "net", proceed_if_used=True)
-+
-         mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd")
-         assert_report_item_list_equal(
-             self.mock_reporter.report_item_list,
-             [
-                 (
-+                    severity.WARNING,
-+                    report_codes.QDEVICE_USED_BY_CLUSTERS,
-+                    {
-+                        "clusters": ["a_cluster"],
-+                    }
-+                ),
-+                (
-                     severity.INFO,
-                     report_codes.SERVICE_STOP_STARTED,
-                     {
-@@ -786,7 +906,28 @@ class QdeviceNetStopTest(QdeviceTestCase):
-             ]
-         )
- 
--    def test_failed(self, mock_net_stop):
-+    def test_used_not_forced(self, mock_net_stop, mock_status):
-+        mock_status.return_value = 'Cluster "a_cluster":\n'
-+
-+        assert_raise_library_error(
-+            lambda: lib.qdevice_stop(
-+                self.lib_env,
-+                "net",
-+                proceed_if_used=False
-+            ),
-+            (
-+                severity.ERROR,
-+                report_codes.QDEVICE_USED_BY_CLUSTERS,
-+                {
-+                    "clusters": ["a_cluster"],
-+                },
-+                report_codes.FORCE_QDEVICE_USED
-+            ),
-+        )
-+        mock_net_stop.assert_not_called()
-+
-+    def test_failed(self, mock_net_stop, mock_status):
-+        mock_status.return_value = ""
-         mock_net_stop.side_effect = StopServiceError(
-             "test service",
-             "test error"
-diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py
-index d7701af..1487eb4 100644
---- a/pcs/test/test_lib_commands_quorum.py
-+++ b/pcs/test/test_lib_commands_quorum.py
-@@ -1579,10 +1579,14 @@ class RemoveDeviceTest(TestCase, CmanMixin):
-         mock_remote_stop.assert_not_called()
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
--    def test_success(
-+    @mock.patch("pcs.lib.sbd.is_sbd_installed", lambda self: True)
-+    @mock.patch("pcs.lib.sbd.is_sbd_enabled", lambda self: True)
-+    def test_success_3nodes_sbd(
-         self, mock_remote_stop, mock_remote_disable, mock_remove_net,
-         mock_get_corosync, mock_push_corosync
-     ):
-+        # nothing special needs to be done in regards of SBD if a cluster
-+        # consists of odd number of nodes
-         original_conf = open(rc("corosync-3nodes-qdevice.conf")).read()
-         no_device_conf = open(rc("corosync-3nodes.conf")).read()
-         mock_get_corosync.return_value = original_conf
-@@ -1619,10 +1623,106 @@ class RemoveDeviceTest(TestCase, CmanMixin):
-         self.assertEqual(3, len(mock_remote_stop.mock_calls))
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
--    def test_success_file(
-+    @mock.patch("pcs.lib.sbd.is_sbd_installed", lambda self: False)
-+    @mock.patch("pcs.lib.sbd.is_sbd_enabled", lambda self: False)
-+    def test_success_2nodes_no_sbd(
-+        self, mock_remote_stop, mock_remote_disable, mock_remove_net,
-+        mock_get_corosync, mock_push_corosync
-+    ):
-+        # cluster consists of two nodes, two_node must be set
-+        original_conf = open(rc("corosync-qdevice.conf")).read()
-+        no_device_conf = open(rc("corosync.conf")).read()
-+        mock_get_corosync.return_value = original_conf
-+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-+
-+        lib.remove_device(lib_env)
-+
-+        self.assertEqual(1, len(mock_push_corosync.mock_calls))
-+        ac(
-+            mock_push_corosync.mock_calls[0][1][0].config.export(),
-+            no_device_conf
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.INFO,
-+                    report_codes.SERVICE_DISABLE_STARTED,
-+                    {
-+                        "service": "corosync-qdevice",
-+                    }
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.SERVICE_STOP_STARTED,
-+                    {
-+                        "service": "corosync-qdevice",
-+                    }
-+                ),
-+            ]
-+        )
-+        self.assertEqual(1, len(mock_remove_net.mock_calls))
-+        self.assertEqual(2, len(mock_remote_disable.mock_calls))
-+        self.assertEqual(2, len(mock_remote_stop.mock_calls))
-+
-+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-+    @mock.patch("pcs.lib.sbd.is_sbd_installed", lambda self: True)
-+    @mock.patch("pcs.lib.sbd.is_sbd_enabled", lambda self: True)
-+    def test_success_2nodes_sbd(
-         self, mock_remote_stop, mock_remote_disable, mock_remove_net,
-         mock_get_corosync, mock_push_corosync
-     ):
-+        # cluster consists of two nodes, but SBD is in use
-+        # auto tie breaker must be enabled
-+        original_conf = open(rc("corosync-qdevice.conf")).read()
-+        no_device_conf = open(rc("corosync.conf")).read().replace(
-+            "two_node: 1",
-+            "auto_tie_breaker: 1"
-+        )
-+        mock_get_corosync.return_value = original_conf
-+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-+
-+        lib.remove_device(lib_env)
-+
-+        self.assertEqual(1, len(mock_push_corosync.mock_calls))
-+        ac(
-+            mock_push_corosync.mock_calls[0][1][0].config.export(),
-+            no_device_conf
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severity.WARNING,
-+                    report_codes.SBD_REQUIRES_ATB,
-+                    {}
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.SERVICE_DISABLE_STARTED,
-+                    {
-+                        "service": "corosync-qdevice",
-+                    }
-+                ),
-+                (
-+                    severity.INFO,
-+                    report_codes.SERVICE_STOP_STARTED,
-+                    {
-+                        "service": "corosync-qdevice",
-+                    }
-+                ),
-+            ]
-+        )
-+        self.assertEqual(1, len(mock_remove_net.mock_calls))
-+        self.assertEqual(2, len(mock_remote_disable.mock_calls))
-+        self.assertEqual(2, len(mock_remote_stop.mock_calls))
-+
-+    @mock.patch("pcs.lib.sbd.atb_has_to_be_enabled")
-+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-+    def test_success_file(
-+        self, mock_atb_check, mock_remote_stop, mock_remote_disable,
-+        mock_remove_net, mock_get_corosync, mock_push_corosync
-+    ):
-         original_conf = open(rc("corosync-3nodes-qdevice.conf")).read()
-         no_device_conf = open(rc("corosync-3nodes.conf")).read()
-         mock_get_corosync.return_value = original_conf
-@@ -1643,6 +1743,7 @@ class RemoveDeviceTest(TestCase, CmanMixin):
-         mock_remove_net.assert_not_called()
-         mock_remote_disable.assert_not_called()
-         mock_remote_stop.assert_not_called()
-+        mock_atb_check.assert_not_called()
- 
- 
- @mock.patch("pcs.lib.commands.quorum.qdevice_net.remote_client_destroy")
-diff --git a/pcs/test/test_lib_corosync_live.py b/pcs/test/test_lib_corosync_live.py
-index 3173195..f03d78b 100644
---- a/pcs/test/test_lib_corosync_live.py
-+++ b/pcs/test/test_lib_corosync_live.py
-@@ -69,9 +69,10 @@ class ReloadConfigTest(TestCase):
- 
-     def test_success(self):
-         cmd_retval = 0
--        cmd_output = "cmd output"
-+        cmd_stdout = "cmd output"
-+        cmd_stderr = ""
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (cmd_output, cmd_retval)
-+        mock_runner.run.return_value = (cmd_stdout, cmd_stderr, cmd_retval)
- 
-         lib.reload_config(mock_runner)
- 
-@@ -81,9 +82,10 @@ class ReloadConfigTest(TestCase):
- 
-     def test_error(self):
-         cmd_retval = 1
--        cmd_output = "cmd output"
-+        cmd_stdout = "cmd output"
-+        cmd_stderr = "cmd error"
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (cmd_output, cmd_retval)
-+        mock_runner.run.return_value = (cmd_stdout, cmd_stderr, cmd_retval)
- 
-         assert_raise_library_error(
-             lambda: lib.reload_config(mock_runner),
-@@ -91,7 +93,7 @@ class ReloadConfigTest(TestCase):
-                 severity.ERROR,
-                 report_codes.COROSYNC_CONFIG_RELOAD_ERROR,
-                 {
--                    "reason": cmd_output,
-+                    "reason": "\n".join([cmd_stderr, cmd_stdout]),
-                 }
-             )
-         )
-@@ -107,7 +109,7 @@ class GetQuorumStatusTextTest(TestCase):
-         self.quorum_tool = "/usr/sbin/corosync-quorumtool"
- 
-     def test_success(self):
--        self.mock_runner.run.return_value = ("status info", 0)
-+        self.mock_runner.run.return_value = ("status info", "", 0)
-         self.assertEqual(
-             "status info",
-             lib.get_quorum_status_text(self.mock_runner)
-@@ -117,7 +119,7 @@ class GetQuorumStatusTextTest(TestCase):
-         ])
- 
-     def test_success_with_retval_1(self):
--        self.mock_runner.run.return_value = ("status info", 1)
-+        self.mock_runner.run.return_value = ("status info", "", 1)
-         self.assertEqual(
-             "status info",
-             lib.get_quorum_status_text(self.mock_runner)
-@@ -127,7 +129,7 @@ class GetQuorumStatusTextTest(TestCase):
-         ])
- 
-     def test_error(self):
--        self.mock_runner.run.return_value = ("status error", 2)
-+        self.mock_runner.run.return_value = ("some info", "status error", 2)
-         assert_raise_library_error(
-             lambda: lib.get_quorum_status_text(self.mock_runner),
-             (
-@@ -152,9 +154,10 @@ class SetExpectedVotesTest(TestCase):
- 
-     def test_success(self):
-         cmd_retval = 0
--        cmd_output = "cmd output"
-+        cmd_stdout = "cmd output"
-+        cmd_stderr = ""
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (cmd_output, cmd_retval)
-+        mock_runner.run.return_value = (cmd_stdout, cmd_stderr, cmd_retval)
- 
-         lib.set_expected_votes(mock_runner, 3)
- 
-@@ -164,9 +167,10 @@ class SetExpectedVotesTest(TestCase):
- 
-     def test_error(self):
-         cmd_retval = 1
--        cmd_output = "cmd output"
-+        cmd_stdout = "cmd output"
-+        cmd_stderr = "cmd stderr"
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (cmd_output, cmd_retval)
-+        mock_runner.run.return_value = (cmd_stdout, cmd_stderr, cmd_retval)
- 
-         assert_raise_library_error(
-             lambda: lib.set_expected_votes(mock_runner, 3),
-@@ -174,7 +178,7 @@ class SetExpectedVotesTest(TestCase):
-                 severity.ERROR,
-                 report_codes.COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR,
-                 {
--                    "reason": cmd_output,
-+                    "reason": cmd_stderr,
-                 }
-             )
-         )
-diff --git a/pcs/test/test_lib_corosync_qdevice_client.py b/pcs/test/test_lib_corosync_qdevice_client.py
-index 0b5bd67..8c32c36 100644
---- a/pcs/test/test_lib_corosync_qdevice_client.py
-+++ b/pcs/test/test_lib_corosync_qdevice_client.py
-@@ -23,7 +23,7 @@ class GetStatusTextTest(TestCase):
-         self.qdevice_tool = "/usr/sbin/corosync-qdevice-tool"
- 
-     def test_success(self):
--        self.mock_runner.run.return_value = ("status info", 0)
-+        self.mock_runner.run.return_value = ("status info", "", 0)
-         self.assertEqual(
-             "status info",
-             lib.get_status_text(self.mock_runner)
-@@ -33,7 +33,7 @@ class GetStatusTextTest(TestCase):
-         ])
- 
-     def test_success_verbose(self):
--        self.mock_runner.run.return_value = ("status info", 0)
-+        self.mock_runner.run.return_value = ("status info", "", 0)
-         self.assertEqual(
-             "status info",
-             lib.get_status_text(self.mock_runner, True)
-@@ -43,14 +43,14 @@ class GetStatusTextTest(TestCase):
-         ])
- 
-     def test_error(self):
--        self.mock_runner.run.return_value = ("status error", 1)
-+        self.mock_runner.run.return_value = ("some info", "status error", 1)
-         assert_raise_library_error(
-             lambda: lib.get_status_text(self.mock_runner),
-             (
-                 severity.ERROR,
-                 report_codes.COROSYNC_QUORUM_GET_STATUS_ERROR,
-                 {
--                    "reason": "status error",
-+                    "reason": "status error\nsome info",
-                 }
-             )
-         )
-diff --git a/pcs/test/test_lib_corosync_qdevice_net.py b/pcs/test/test_lib_corosync_qdevice_net.py
-index 340a8dc..21c526b 100644
---- a/pcs/test/test_lib_corosync_qdevice_net.py
-+++ b/pcs/test/test_lib_corosync_qdevice_net.py
-@@ -49,7 +49,7 @@ class QdeviceSetupTest(TestCase):
- 
-     def test_success(self, mock_is_dir_nonempty):
-         mock_is_dir_nonempty.return_value = False
--        self.mock_runner.run.return_value = ("initialized", 0)
-+        self.mock_runner.run.return_value = ("initialized", "", 0)
- 
-         lib.qdevice_setup(self.mock_runner)
- 
-@@ -73,7 +73,7 @@ class QdeviceSetupTest(TestCase):
- 
-     def test_init_tool_fail(self, mock_is_dir_nonempty):
-         mock_is_dir_nonempty.return_value = False
--        self.mock_runner.run.return_value = ("test error", 1)
-+        self.mock_runner.run.return_value = ("stdout", "test error", 1)
- 
-         assert_raise_library_error(
-             lambda: lib.qdevice_setup(self.mock_runner),
-@@ -82,7 +82,7 @@ class QdeviceSetupTest(TestCase):
-                 report_codes.QDEVICE_INITIALIZATION_ERROR,
-                 {
-                     "model": "net",
--                    "reason": "test error",
-+                    "reason": "test error\nstdout",
-                 }
-             )
-         )
-@@ -126,7 +126,7 @@ class QdeviceStatusGenericTest(TestCase):
-         self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
- 
-     def test_success(self):
--        self.mock_runner.run.return_value = ("status info", 0)
-+        self.mock_runner.run.return_value = ("status info", "", 0)
-         self.assertEqual(
-             "status info",
-             lib.qdevice_status_generic_text(self.mock_runner)
-@@ -134,7 +134,7 @@ class QdeviceStatusGenericTest(TestCase):
-         self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-s"])
- 
-     def test_success_verbose(self):
--        self.mock_runner.run.return_value = ("status info", 0)
-+        self.mock_runner.run.return_value = ("status info", "", 0)
-         self.assertEqual(
-             "status info",
-             lib.qdevice_status_generic_text(self.mock_runner, True)
-@@ -142,7 +142,7 @@ class QdeviceStatusGenericTest(TestCase):
-         self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-s", "-v"])
- 
-     def test_error(self):
--        self.mock_runner.run.return_value = ("status error", 1)
-+        self.mock_runner.run.return_value = ("some info", "status error", 1)
-         assert_raise_library_error(
-             lambda: lib.qdevice_status_generic_text(self.mock_runner),
-             (
-@@ -150,7 +150,7 @@ class QdeviceStatusGenericTest(TestCase):
-                 report_codes.QDEVICE_GET_STATUS_ERROR,
-                 {
-                     "model": "net",
--                    "reason": "status error",
-+                    "reason": "status error\nsome info",
-                 }
-             )
-         )
-@@ -162,7 +162,7 @@ class QdeviceStatusClusterTest(TestCase):
-         self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
- 
-     def test_success(self):
--        self.mock_runner.run.return_value = ("status info", 0)
-+        self.mock_runner.run.return_value = ("status info", "", 0)
-         self.assertEqual(
-             "status info",
-             lib.qdevice_status_cluster_text(self.mock_runner)
-@@ -170,7 +170,7 @@ class QdeviceStatusClusterTest(TestCase):
-         self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l"])
- 
-     def test_success_verbose(self):
--        self.mock_runner.run.return_value = ("status info", 0)
-+        self.mock_runner.run.return_value = ("status info", "", 0)
-         self.assertEqual(
-             "status info",
-             lib.qdevice_status_cluster_text(self.mock_runner, verbose=True)
-@@ -178,7 +178,7 @@ class QdeviceStatusClusterTest(TestCase):
-         self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l", "-v"])
- 
-     def test_success_cluster(self):
--        self.mock_runner.run.return_value = ("status info", 0)
-+        self.mock_runner.run.return_value = ("status info", "", 0)
-         self.assertEqual(
-             "status info",
-             lib.qdevice_status_cluster_text(self.mock_runner, "cluster")
-@@ -188,7 +188,7 @@ class QdeviceStatusClusterTest(TestCase):
-         ])
- 
-     def test_success_cluster_verbose(self):
--        self.mock_runner.run.return_value = ("status info", 0)
-+        self.mock_runner.run.return_value = ("status info", "", 0)
-         self.assertEqual(
-             "status info",
-             lib.qdevice_status_cluster_text(self.mock_runner, "cluster", True)
-@@ -198,7 +198,7 @@ class QdeviceStatusClusterTest(TestCase):
-         ])
- 
-     def test_error(self):
--        self.mock_runner.run.return_value = ("status error", 1)
-+        self.mock_runner.run.return_value = ("some info", "status error", 1)
-         assert_raise_library_error(
-             lambda: lib.qdevice_status_cluster_text(self.mock_runner),
-             (
-@@ -206,13 +206,63 @@ class QdeviceStatusClusterTest(TestCase):
-                 report_codes.QDEVICE_GET_STATUS_ERROR,
-                 {
-                     "model": "net",
--                    "reason": "status error",
-+                    "reason": "status error\nsome info",
-                 }
-             )
-         )
-         self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l"])
- 
- 
-+class QdeviceConnectedClustersTest(TestCase):
-+    def test_empty_status(self):
-+        status = ""
-+        self.assertEqual(
-+            [],
-+            lib.qdevice_connected_clusters(status)
-+        )
-+
-+    def test_one_cluster(self):
-+        status = """\
-+Cluster "rhel72":
-+    Algorithm:          LMS
-+    Tie-breaker:        Node with lowest node ID
-+    Node ID 2:
-+        Client address:         ::ffff:192.168.122.122:59738
-+        Configured node list:   1, 2
-+        Membership node list:   1, 2
-+        Vote:                   ACK (ACK)
-+    Node ID 1:
-+        Client address:         ::ffff:192.168.122.121:43420
-+        Configured node list:   1, 2
-+        Membership node list:   1, 2
-+        Vote:                   ACK (ACK)
-+"""
-+        self.assertEqual(
-+            ["rhel72"],
-+            lib.qdevice_connected_clusters(status)
-+        )
-+
-+    def test_more_clusters(self):
-+        status = """\
-+Cluster "rhel72":
-+Cluster "rhel73":
-+"""
-+        self.assertEqual(
-+            ["rhel72", "rhel73"],
-+            lib.qdevice_connected_clusters(status)
-+        )
-+
-+    def test_invalid_status(self):
-+        status = """\
-+Cluster:
-+    Cluster "rhel72":
-+"""
-+        self.assertEqual(
-+            [],
-+            lib.qdevice_connected_clusters(status)
-+        )
-+
-+
- @mock.patch("pcs.lib.corosync.qdevice_net._get_output_certificate")
- @mock.patch("pcs.lib.corosync.qdevice_net._store_to_tmpfile")
- class QdeviceSignCertificateRequestTest(CertificateTestCase):
-@@ -222,7 +272,7 @@ class QdeviceSignCertificateRequestTest(CertificateTestCase):
-     )
-     def test_success(self, mock_tmp_store, mock_get_cert):
-         mock_tmp_store.return_value = self.mock_tmpfile
--        self.mock_runner.run.return_value = ("tool output", 0)
-+        self.mock_runner.run.return_value = ("tool output", "", 0)
-         mock_get_cert.return_value = "new certificate".encode("utf-8")
- 
-         result = lib.qdevice_sign_certificate_request(
-@@ -293,7 +343,7 @@ class QdeviceSignCertificateRequestTest(CertificateTestCase):
-     )
-     def test_sign_error(self, mock_tmp_store, mock_get_cert):
-         mock_tmp_store.return_value = self.mock_tmpfile
--        self.mock_runner.run.return_value = ("tool output error", 1)
-+        self.mock_runner.run.return_value = ("stdout", "tool output error", 1)
- 
-         assert_raise_library_error(
-             lambda: lib.qdevice_sign_certificate_request(
-@@ -305,7 +355,7 @@ class QdeviceSignCertificateRequestTest(CertificateTestCase):
-                 severity.ERROR,
-                 report_codes.QDEVICE_CERTIFICATE_SIGN_ERROR,
-                 {
--                    "reason": "tool output error",
-+                    "reason": "tool output error\nstdout",
-                 }
-             )
-         )
-@@ -326,7 +376,7 @@ class QdeviceSignCertificateRequestTest(CertificateTestCase):
-     )
-     def test_output_read_error(self, mock_tmp_store, mock_get_cert):
-         mock_tmp_store.return_value = self.mock_tmpfile
--        self.mock_runner.run.return_value = ("tool output", 0)
-+        self.mock_runner.run.return_value = ("tool output", "", 0)
-         mock_get_cert.side_effect = LibraryError
- 
-         self.assertRaises(
-@@ -399,7 +449,7 @@ class ClientSetupTest(TestCase):
- 
-     @mock.patch("pcs.lib.corosync.qdevice_net.client_destroy")
-     def test_success(self, mock_destroy):
--        self.mock_runner.run.return_value = ("tool output", 0)
-+        self.mock_runner.run.return_value = ("tool output", "", 0)
- 
-         lib.client_setup(self.mock_runner, "certificate data".encode("utf-8"))
- 
-@@ -414,7 +464,7 @@ class ClientSetupTest(TestCase):
- 
-     @mock.patch("pcs.lib.corosync.qdevice_net.client_destroy")
-     def test_init_error(self, mock_destroy):
--        self.mock_runner.run.return_value = ("tool output error", 1)
-+        self.mock_runner.run.return_value = ("stdout", "tool output error", 1)
- 
-         assert_raise_library_error(
-             lambda: lib.client_setup(
-@@ -426,7 +476,7 @@ class ClientSetupTest(TestCase):
-                 report_codes.QDEVICE_INITIALIZATION_ERROR,
-                 {
-                     "model": "net",
--                    "reason": "tool output error",
-+                    "reason": "tool output error\nstdout",
-                 }
-             )
-         )
-@@ -448,7 +498,7 @@ class ClientGenerateCertificateRequestTest(CertificateTestCase):
-         lambda: True
-     )
-     def test_success(self, mock_get_cert):
--        self.mock_runner.run.return_value = ("tool output", 0)
-+        self.mock_runner.run.return_value = ("tool output", "", 0)
-         mock_get_cert.return_value = "new certificate".encode("utf-8")
- 
-         result = lib.client_generate_certificate_request(
-@@ -492,7 +542,7 @@ class ClientGenerateCertificateRequestTest(CertificateTestCase):
-         lambda: True
-     )
-     def test_tool_error(self, mock_get_cert):
--        self.mock_runner.run.return_value = ("tool output error", 1)
-+        self.mock_runner.run.return_value = ("stdout", "tool output error", 1)
- 
-         assert_raise_library_error(
-             lambda: lib.client_generate_certificate_request(
-@@ -504,7 +554,7 @@ class ClientGenerateCertificateRequestTest(CertificateTestCase):
-                 report_codes.QDEVICE_INITIALIZATION_ERROR,
-                 {
-                     "model": "net",
--                    "reason": "tool output error",
-+                    "reason": "tool output error\nstdout",
-                 }
-             )
-         )
-@@ -523,7 +573,7 @@ class ClientCertRequestToPk12Test(CertificateTestCase):
-     )
-     def test_success(self, mock_tmp_store, mock_get_cert):
-         mock_tmp_store.return_value = self.mock_tmpfile
--        self.mock_runner.run.return_value = ("tool output", 0)
-+        self.mock_runner.run.return_value = ("tool output", "", 0)
-         mock_get_cert.return_value = "new certificate".encode("utf-8")
- 
-         result = lib.client_cert_request_to_pk12(
-@@ -594,7 +644,7 @@ class ClientCertRequestToPk12Test(CertificateTestCase):
-     )
-     def test_transform_error(self, mock_tmp_store, mock_get_cert):
-         mock_tmp_store.return_value = self.mock_tmpfile
--        self.mock_runner.run.return_value = ("tool output error", 1)
-+        self.mock_runner.run.return_value = ("stdout", "tool output error", 1)
- 
-         assert_raise_library_error(
-             lambda: lib.client_cert_request_to_pk12(
-@@ -605,7 +655,7 @@ class ClientCertRequestToPk12Test(CertificateTestCase):
-                 severity.ERROR,
-                 report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR,
-                 {
--                    "reason": "tool output error",
-+                    "reason": "tool output error\nstdout",
-                 }
-             )
-         )
-@@ -625,7 +675,7 @@ class ClientCertRequestToPk12Test(CertificateTestCase):
-     )
-     def test_output_read_error(self, mock_tmp_store, mock_get_cert):
-         mock_tmp_store.return_value = self.mock_tmpfile
--        self.mock_runner.run.return_value = ("tool output", 0)
-+        self.mock_runner.run.return_value = ("tool output", "", 0)
-         mock_get_cert.side_effect = LibraryError
- 
-         self.assertRaises(
-@@ -657,7 +707,7 @@ class ClientImportCertificateAndKeyTest(CertificateTestCase):
-     )
-     def test_success(self, mock_tmp_store):
-         mock_tmp_store.return_value = self.mock_tmpfile
--        self.mock_runner.run.return_value = ("tool output", 0)
-+        self.mock_runner.run.return_value = ("tool output", "", 0)
- 
-         lib.client_import_certificate_and_key(
-             self.mock_runner,
-@@ -721,7 +771,7 @@ class ClientImportCertificateAndKeyTest(CertificateTestCase):
-     )
-     def test_import_error(self, mock_tmp_store):
-         mock_tmp_store.return_value = self.mock_tmpfile
--        self.mock_runner.run.return_value = ("tool output error", 1)
-+        self.mock_runner.run.return_value = ("stdout", "tool output error", 1)
- 
-         assert_raise_library_error(
-             lambda: lib.client_import_certificate_and_key(
-@@ -732,7 +782,7 @@ class ClientImportCertificateAndKeyTest(CertificateTestCase):
-                 severity.ERROR,
-                 report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR,
-                 {
--                    "reason": "tool output error",
-+                    "reason": "tool output error\nstdout",
-                 }
-             )
-         )
-diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py
-index aafbe85..d37747a 100644
---- a/pcs/test/test_lib_external.py
-+++ b/pcs/test/test_lib_external.py
-@@ -57,19 +57,23 @@ class CommandRunnerTest(TestCase):
-         self.assertEqual(filtered_kwargs, kwargs)
- 
-     def test_basic(self, mock_popen):
--        expected_output = "expected output"
-+        expected_stdout = "expected stdout"
-+        expected_stderr = "expected stderr"
-         expected_retval = 123
-         command = ["a_command"]
-         command_str = "a_command"
-         mock_process = mock.MagicMock(spec_set=["communicate", "returncode"])
--        mock_process.communicate.return_value = (expected_output, "dummy")
-+        mock_process.communicate.return_value = (
-+            expected_stdout, expected_stderr
-+        )
-         mock_process.returncode = expected_retval
-         mock_popen.return_value = mock_process
- 
-         runner = lib.CommandRunner(self.mock_logger, self.mock_reporter)
--        real_output, real_retval = runner.run(command)
-+        real_stdout, real_stderr, real_retval = runner.run(command)
- 
--        self.assertEqual(real_output, expected_output)
-+        self.assertEqual(real_stdout, expected_stdout)
-+        self.assertEqual(real_stderr, expected_stderr)
-         self.assertEqual(real_retval, expected_retval)
-         mock_process.communicate.assert_called_once_with(None)
-         self.assert_popen_called_with(
-@@ -82,9 +86,14 @@ class CommandRunnerTest(TestCase):
-             mock.call("""\
- Finished running: {0}
- Return value: {1}
----Debug Output Start--
-+--Debug Stdout Start--
- {2}
----Debug Output End--""".format(command_str, expected_retval, expected_output))
-+--Debug Stdout End--
-+--Debug Stderr Start--
-+{3}
-+--Debug Stderr End--""".format(
-+                command_str, expected_retval, expected_stdout, expected_stderr
-+            ))
-         ]
-         self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
-         self.mock_logger.debug.assert_has_calls(logger_calls)
-@@ -105,19 +114,23 @@ Return value: {1}
-                     {
-                         "command": command_str,
-                         "return_value": expected_retval,
--                        "stdout": expected_output,
-+                        "stdout": expected_stdout,
-+                        "stderr": expected_stderr,
-                     }
-                 )
-             ]
-         )
- 
-     def test_env(self, mock_popen):
--        expected_output = "expected output"
-+        expected_stdout = "expected output"
-+        expected_stderr = "expected stderr"
-         expected_retval = 123
-         command = ["a_command"]
-         command_str = "a_command"
-         mock_process = mock.MagicMock(spec_set=["communicate", "returncode"])
--        mock_process.communicate.return_value = (expected_output, "dummy")
-+        mock_process.communicate.return_value = (
-+            expected_stdout, expected_stderr
-+        )
-         mock_process.returncode = expected_retval
-         mock_popen.return_value = mock_process
- 
-@@ -126,12 +139,13 @@ Return value: {1}
-             self.mock_reporter,
-             {"a": "a", "b": "b"}
-         )
--        real_output, real_retval = runner.run(
-+        real_stdout, real_stderr, real_retval = runner.run(
-             command,
-             env_extend={"b": "B", "c": "C"}
-         )
- 
--        self.assertEqual(real_output, expected_output)
-+        self.assertEqual(real_stdout, expected_stdout)
-+        self.assertEqual(real_stderr, expected_stderr)
-         self.assertEqual(real_retval, expected_retval)
-         mock_process.communicate.assert_called_once_with(None)
-         self.assert_popen_called_with(
-@@ -144,9 +158,14 @@ Return value: {1}
-             mock.call("""\
- Finished running: {0}
- Return value: {1}
----Debug Output Start--
-+--Debug Stdout Start--
- {2}
----Debug Output End--""".format(command_str, expected_retval, expected_output))
-+--Debug Stdout End--
-+--Debug Stderr Start--
-+{3}
-+--Debug Stderr End--""".format(
-+                command_str, expected_retval, expected_stdout, expected_stderr
-+            ))
-         ]
-         self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
-         self.mock_logger.debug.assert_has_calls(logger_calls)
-@@ -167,27 +186,34 @@ Return value: {1}
-                     {
-                         "command": command_str,
-                         "return_value": expected_retval,
--                        "stdout": expected_output,
-+                        "stdout": expected_stdout,
-+                        "stderr": expected_stderr,
-                     }
-                 )
-             ]
-         )
- 
-     def test_stdin(self, mock_popen):
--        expected_output = "expected output"
-+        expected_stdout = "expected output"
-+        expected_stderr = "expected stderr"
-         expected_retval = 123
-         command = ["a_command"]
-         command_str = "a_command"
-         stdin = "stdin string"
-         mock_process = mock.MagicMock(spec_set=["communicate", "returncode"])
--        mock_process.communicate.return_value = (expected_output, "dummy")
-+        mock_process.communicate.return_value = (
-+            expected_stdout, expected_stderr
-+        )
-         mock_process.returncode = expected_retval
-         mock_popen.return_value = mock_process
- 
-         runner = lib.CommandRunner(self.mock_logger, self.mock_reporter)
--        real_output, real_retval = runner.run(command, stdin_string=stdin)
-+        real_stdout, real_stderr, real_retval = runner.run(
-+            command, stdin_string=stdin
-+        )
- 
--        self.assertEqual(real_output, expected_output)
-+        self.assertEqual(real_stdout, expected_stdout)
-+        self.assertEqual(real_stderr, expected_stderr)
-         self.assertEqual(real_retval, expected_retval)
-         mock_process.communicate.assert_called_once_with(stdin)
-         self.assert_popen_called_with(
-@@ -204,9 +230,14 @@ Running: {0}
-             mock.call("""\
- Finished running: {0}
- Return value: {1}
----Debug Output Start--
-+--Debug Stdout Start--
- {2}
----Debug Output End--""".format(command_str, expected_retval, expected_output))
-+--Debug Stdout End--
-+--Debug Stderr Start--
-+{3}
-+--Debug Stderr End--""".format(
-+                command_str, expected_retval, expected_stdout, expected_stderr
-+            ))
-         ]
-         self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
-         self.mock_logger.debug.assert_has_calls(logger_calls)
-@@ -227,7 +258,8 @@ Return value: {1}
-                     {
-                         "command": command_str,
-                         "return_value": expected_retval,
--                        "stdout": expected_output,
-+                        "stdout": expected_stdout,
-+                        "stderr": expected_stderr,
-                     }
-                 )
-             ]
-@@ -957,7 +989,7 @@ class ParallelCommunicationHelperTest(TestCase):
- class IsCmanClusterTest(TestCase):
-     def template_test(self, is_cman, corosync_output, corosync_retval=0):
-         mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
--        mock_runner.run.return_value = (corosync_output, corosync_retval)
-+        mock_runner.run.return_value = (corosync_output, "", corosync_retval)
-         self.assertEqual(is_cman, lib.is_cman_cluster(mock_runner))
-         mock_runner.run.assert_called_once_with([
-             os.path.join(settings.corosync_binaries, "corosync"),
-@@ -1021,7 +1053,7 @@ class DisableServiceTest(TestCase):
-     def test_systemctl(self, mock_is_installed, mock_systemctl):
-         mock_is_installed.return_value = True
-         mock_systemctl.return_value = True
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("", "Removed symlink", 0)
-         lib.disable_service(self.mock_runner, self.service)
-         self.mock_runner.run.assert_called_once_with(
-             ["systemctl", "disable", self.service + ".service"]
-@@ -1030,7 +1062,7 @@ class DisableServiceTest(TestCase):
-     def test_systemctl_failed(self, mock_is_installed, mock_systemctl):
-         mock_is_installed.return_value = True
-         mock_systemctl.return_value = True
--        self.mock_runner.run.return_value = ("", 1)
-+        self.mock_runner.run.return_value = ("", "Failed", 1)
-         self.assertRaises(
-             lib.DisableServiceError,
-             lambda: lib.disable_service(self.mock_runner, self.service)
-@@ -1042,7 +1074,7 @@ class DisableServiceTest(TestCase):
-     def test_not_systemctl(self, mock_is_installed, mock_systemctl):
-         mock_is_installed.return_value = True
-         mock_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("", "", 0)
-         lib.disable_service(self.mock_runner, self.service)
-         self.mock_runner.run.assert_called_once_with(
-             ["chkconfig", self.service, "off"]
-@@ -1051,7 +1083,7 @@ class DisableServiceTest(TestCase):
-     def test_not_systemctl_failed(self, mock_is_installed, mock_systemctl):
-         mock_is_installed.return_value = True
-         mock_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("", 1)
-+        self.mock_runner.run.return_value = ("", "error", 1)
-         self.assertRaises(
-             lib.DisableServiceError,
-             lambda: lib.disable_service(self.mock_runner, self.service)
-@@ -1079,7 +1111,7 @@ class DisableServiceTest(TestCase):
-     def test_instance_systemctl(self, mock_is_installed, mock_systemctl):
-         mock_is_installed.return_value = True
-         mock_systemctl.return_value = True
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("", "Removed symlink", 0)
-         lib.disable_service(self.mock_runner, self.service, instance="test")
-         self.mock_runner.run.assert_called_once_with([
-             "systemctl",
-@@ -1090,7 +1122,7 @@ class DisableServiceTest(TestCase):
-     def test_instance_not_systemctl(self, mock_is_installed, mock_systemctl):
-         mock_is_installed.return_value = True
-         mock_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("", "", 0)
-         lib.disable_service(self.mock_runner, self.service, instance="test")
-         self.mock_runner.run.assert_called_once_with(
-             ["chkconfig", self.service, "off"]
-@@ -1104,7 +1136,7 @@ class EnableServiceTest(TestCase):
- 
-     def test_systemctl(self, mock_systemctl):
-         mock_systemctl.return_value = True
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("", "Created symlink", 0)
-         lib.enable_service(self.mock_runner, self.service)
-         self.mock_runner.run.assert_called_once_with(
-             ["systemctl", "enable", self.service + ".service"]
-@@ -1112,7 +1144,7 @@ class EnableServiceTest(TestCase):
- 
-     def test_systemctl_failed(self, mock_systemctl):
-         mock_systemctl.return_value = True
--        self.mock_runner.run.return_value = ("", 1)
-+        self.mock_runner.run.return_value = ("", "Failed", 1)
-         self.assertRaises(
-             lib.EnableServiceError,
-             lambda: lib.enable_service(self.mock_runner, self.service)
-@@ -1123,7 +1155,7 @@ class EnableServiceTest(TestCase):
- 
-     def test_not_systemctl(self, mock_systemctl):
-         mock_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("", "", 0)
-         lib.enable_service(self.mock_runner, self.service)
-         self.mock_runner.run.assert_called_once_with(
-             ["chkconfig", self.service, "on"]
-@@ -1131,7 +1163,7 @@ class EnableServiceTest(TestCase):
- 
-     def test_not_systemctl_failed(self, mock_systemctl):
-         mock_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("", 1)
-+        self.mock_runner.run.return_value = ("", "error", 1)
-         self.assertRaises(
-             lib.EnableServiceError,
-             lambda: lib.enable_service(self.mock_runner, self.service)
-@@ -1142,7 +1174,7 @@ class EnableServiceTest(TestCase):
- 
-     def test_instance_systemctl(self, mock_systemctl):
-         mock_systemctl.return_value = True
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("", "Created symlink", 0)
-         lib.enable_service(self.mock_runner, self.service, instance="test")
-         self.mock_runner.run.assert_called_once_with([
-             "systemctl",
-@@ -1152,7 +1184,7 @@ class EnableServiceTest(TestCase):
- 
-     def test_instance_not_systemctl(self, mock_systemctl):
-         mock_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("", "", 0)
-         lib.enable_service(self.mock_runner, self.service, instance="test")
-         self.mock_runner.run.assert_called_once_with(
-             ["chkconfig", self.service, "on"]
-@@ -1167,7 +1199,7 @@ class StartServiceTest(TestCase):
- 
-     def test_systemctl(self, mock_systemctl):
-         mock_systemctl.return_value = True
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("", "", 0)
-         lib.start_service(self.mock_runner, self.service)
-         self.mock_runner.run.assert_called_once_with(
-             ["systemctl", "start", self.service + ".service"]
-@@ -1175,7 +1207,7 @@ class StartServiceTest(TestCase):
- 
-     def test_systemctl_failed(self, mock_systemctl):
-         mock_systemctl.return_value = True
--        self.mock_runner.run.return_value = ("", 1)
-+        self.mock_runner.run.return_value = ("", "Failed", 1)
-         self.assertRaises(
-             lib.StartServiceError,
-             lambda: lib.start_service(self.mock_runner, self.service)
-@@ -1186,7 +1218,7 @@ class StartServiceTest(TestCase):
- 
-     def test_not_systemctl(self, mock_systemctl):
-         mock_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("Starting...", "", 0)
-         lib.start_service(self.mock_runner, self.service)
-         self.mock_runner.run.assert_called_once_with(
-             ["service", self.service, "start"]
-@@ -1194,7 +1226,7 @@ class StartServiceTest(TestCase):
- 
-     def test_not_systemctl_failed(self, mock_systemctl):
-         mock_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("", 1)
-+        self.mock_runner.run.return_value = ("", "unrecognized", 1)
-         self.assertRaises(
-             lib.StartServiceError,
-             lambda: lib.start_service(self.mock_runner, self.service)
-@@ -1205,7 +1237,7 @@ class StartServiceTest(TestCase):
- 
-     def test_instance_systemctl(self, mock_systemctl):
-         mock_systemctl.return_value = True
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("", "", 0)
-         lib.start_service(self.mock_runner, self.service, instance="test")
-         self.mock_runner.run.assert_called_once_with([
-             "systemctl", "start", "{0}@{1}.service".format(self.service, "test")
-@@ -1213,7 +1245,7 @@ class StartServiceTest(TestCase):
- 
-     def test_instance_not_systemctl(self, mock_systemctl):
-         mock_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("Starting...", "", 0)
-         lib.start_service(self.mock_runner, self.service, instance="test")
-         self.mock_runner.run.assert_called_once_with(
-             ["service", self.service, "start"]
-@@ -1228,7 +1260,7 @@ class StopServiceTest(TestCase):
- 
-     def test_systemctl(self, mock_systemctl):
-         mock_systemctl.return_value = True
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("", "", 0)
-         lib.stop_service(self.mock_runner, self.service)
-         self.mock_runner.run.assert_called_once_with(
-             ["systemctl", "stop", self.service + ".service"]
-@@ -1236,7 +1268,7 @@ class StopServiceTest(TestCase):
- 
-     def test_systemctl_failed(self, mock_systemctl):
-         mock_systemctl.return_value = True
--        self.mock_runner.run.return_value = ("", 1)
-+        self.mock_runner.run.return_value = ("", "Failed", 1)
-         self.assertRaises(
-             lib.StopServiceError,
-             lambda: lib.stop_service(self.mock_runner, self.service)
-@@ -1247,7 +1279,7 @@ class StopServiceTest(TestCase):
- 
-     def test_not_systemctl(self, mock_systemctl):
-         mock_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("Stopping...", "", 0)
-         lib.stop_service(self.mock_runner, self.service)
-         self.mock_runner.run.assert_called_once_with(
-             ["service", self.service, "stop"]
-@@ -1255,7 +1287,7 @@ class StopServiceTest(TestCase):
- 
-     def test_not_systemctl_failed(self, mock_systemctl):
-         mock_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("", 1)
-+        self.mock_runner.run.return_value = ("", "unrecognized", 1)
-         self.assertRaises(
-             lib.StopServiceError,
-             lambda: lib.stop_service(self.mock_runner, self.service)
-@@ -1266,7 +1298,7 @@ class StopServiceTest(TestCase):
- 
-     def test_instance_systemctl(self, mock_systemctl):
-         mock_systemctl.return_value = True
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("", "", 0)
-         lib.stop_service(self.mock_runner, self.service, instance="test")
-         self.mock_runner.run.assert_called_once_with([
-             "systemctl", "stop", "{0}@{1}.service".format(self.service, "test")
-@@ -1274,7 +1306,7 @@ class StopServiceTest(TestCase):
- 
-     def test_instance_not_systemctl(self, mock_systemctl):
-         mock_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("Stopping...", "", 0)
-         lib.stop_service(self.mock_runner, self.service, instance="test")
-         self.mock_runner.run.assert_called_once_with(
-             ["service", self.service, "stop"]
-@@ -1287,14 +1319,14 @@ class KillServicesTest(TestCase):
-         self.services = ["service1", "service2"]
- 
-     def test_success(self):
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("", "", 0)
-         lib.kill_services(self.mock_runner, self.services)
-         self.mock_runner.run.assert_called_once_with(
-             ["killall", "--quiet", "--signal", "9", "--"] + self.services
-         )
- 
-     def test_failed(self):
--        self.mock_runner.run.return_value = ("error", 1)
-+        self.mock_runner.run.return_value = ("", "error", 1)
-         self.assertRaises(
-             lib.KillServicesError,
-             lambda: lib.kill_services(self.mock_runner, self.services)
-@@ -1304,7 +1336,7 @@ class KillServicesTest(TestCase):
-         )
- 
-     def test_service_not_running(self):
--        self.mock_runner.run.return_value = ("", 1)
-+        self.mock_runner.run.return_value = ("", "", 1)
-         lib.kill_services(self.mock_runner, self.services)
-         self.mock_runner.run.assert_called_once_with(
-             ["killall", "--quiet", "--signal", "9", "--"] + self.services
-@@ -1348,7 +1380,7 @@ class IsServiceEnabledTest(TestCase):
- 
-     def test_systemctl_enabled(self, mock_systemctl):
-         mock_systemctl.return_value = True
--        self.mock_runner.run.return_value = ("enabled\n", 0)
-+        self.mock_runner.run.return_value = ("enabled\n", "", 0)
-         self.assertTrue(lib.is_service_enabled(self.mock_runner, self.service))
-         self.mock_runner.run.assert_called_once_with(
-             ["systemctl", "is-enabled", self.service + ".service"]
-@@ -1356,7 +1388,7 @@ class IsServiceEnabledTest(TestCase):
- 
-     def test_systemctl_disabled(self, mock_systemctl):
-         mock_systemctl.return_value = True
--        self.mock_runner.run.return_value = ("disabled\n", 2)
-+        self.mock_runner.run.return_value = ("disabled\n", "", 2)
-         self.assertFalse(lib.is_service_enabled(self.mock_runner, self.service))
-         self.mock_runner.run.assert_called_once_with(
-             ["systemctl", "is-enabled", self.service + ".service"]
-@@ -1364,7 +1396,7 @@ class IsServiceEnabledTest(TestCase):
- 
-     def test_not_systemctl_enabled(self, mock_systemctl):
-         mock_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("", "", 0)
-         self.assertTrue(lib.is_service_enabled(self.mock_runner, self.service))
-         self.mock_runner.run.assert_called_once_with(
-             ["chkconfig", self.service]
-@@ -1372,7 +1404,7 @@ class IsServiceEnabledTest(TestCase):
- 
-     def test_not_systemctl_disabled(self, mock_systemctl):
-         mock_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("", 3)
-+        self.mock_runner.run.return_value = ("", "", 3)
-         self.assertFalse(lib.is_service_enabled(self.mock_runner, self.service))
-         self.mock_runner.run.assert_called_once_with(
-             ["chkconfig", self.service]
-@@ -1387,7 +1419,7 @@ class IsServiceRunningTest(TestCase):
- 
-     def test_systemctl_running(self, mock_systemctl):
-         mock_systemctl.return_value = True
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("active", "", 0)
-         self.assertTrue(lib.is_service_running(self.mock_runner, self.service))
-         self.mock_runner.run.assert_called_once_with(
-             ["systemctl", "is-active", self.service + ".service"]
-@@ -1395,7 +1427,7 @@ class IsServiceRunningTest(TestCase):
- 
-     def test_systemctl_not_running(self, mock_systemctl):
-         mock_systemctl.return_value = True
--        self.mock_runner.run.return_value = ("", 2)
-+        self.mock_runner.run.return_value = ("inactive", "", 2)
-         self.assertFalse(lib.is_service_running(self.mock_runner, self.service))
-         self.mock_runner.run.assert_called_once_with(
-             ["systemctl", "is-active", self.service + ".service"]
-@@ -1403,7 +1435,7 @@ class IsServiceRunningTest(TestCase):
- 
-     def test_not_systemctl_running(self, mock_systemctl):
-         mock_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("", 0)
-+        self.mock_runner.run.return_value = ("is running", "", 0)
-         self.assertTrue(lib.is_service_running(self.mock_runner, self.service))
-         self.mock_runner.run.assert_called_once_with(
-             ["service", self.service, "status"]
-@@ -1411,7 +1443,7 @@ class IsServiceRunningTest(TestCase):
- 
-     def test_not_systemctl_not_running(self, mock_systemctl):
-         mock_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("", 3)
-+        self.mock_runner.run.return_value = ("is stopped", "", 3)
-         self.assertFalse(lib.is_service_running(self.mock_runner, self.service))
-         self.mock_runner.run.assert_called_once_with(
-             ["service", self.service, "status"]
-@@ -1484,7 +1516,7 @@ sbd.service                                 enabled
- pacemaker.service                           enabled
- 
- 3 unit files listed.
--""", 0)
-+""", "", 0)
-         self.assertEqual(
-             lib.get_systemd_services(self.mock_runner),
-             ["pcsd", "sbd", "pacemaker"]
-@@ -1496,7 +1528,7 @@ pacemaker.service                           enabled
- 
-     def test_failed(self, mock_is_systemctl):
-         mock_is_systemctl.return_value = True
--        self.mock_runner.run.return_value = ("failed", 1)
-+        self.mock_runner.run.return_value = ("stdout", "failed", 1)
-         self.assertEqual(lib.get_systemd_services(self.mock_runner), [])
-         self.assertEqual(mock_is_systemctl.call_count, 1)
-         self.mock_runner.run.assert_called_once_with(
-@@ -1505,10 +1537,9 @@ pacemaker.service                           enabled
- 
-     def test_not_systemd(self, mock_is_systemctl):
-         mock_is_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("", 0)
-         self.assertEqual(lib.get_systemd_services(self.mock_runner), [])
--        self.assertEqual(mock_is_systemctl.call_count, 1)
--        self.assertEqual(self.mock_runner.call_count, 0)
-+        mock_is_systemctl.assert_called_once_with()
-+        self.mock_runner.assert_not_called()
- 
- 
- @mock.patch("pcs.lib.external.is_systemctl")
-@@ -1522,24 +1553,20 @@ class GetNonSystemdServicesTest(TestCase):
- pcsd           	0:off	1:off	2:on	3:on	4:on	5:on	6:off
- sbd            	0:off	1:on	2:on	3:on	4:on	5:on	6:off
- pacemaker      	0:off	1:off	2:off	3:off	4:off	5:off	6:off
--""", 0)
-+""", "", 0)
-         self.assertEqual(
-             lib.get_non_systemd_services(self.mock_runner),
-             ["pcsd", "sbd", "pacemaker"]
-         )
-         self.assertEqual(mock_is_systemctl.call_count, 1)
--        self.mock_runner.run.assert_called_once_with(
--            ["chkconfig"], ignore_stderr=True
--        )
-+        self.mock_runner.run.assert_called_once_with(["chkconfig"])
- 
-     def test_failed(self, mock_is_systemctl):
-         mock_is_systemctl.return_value = False
--        self.mock_runner.run.return_value = ("failed", 1)
-+        self.mock_runner.run.return_value = ("stdout", "failed", 1)
-         self.assertEqual(lib.get_non_systemd_services(self.mock_runner), [])
-         self.assertEqual(mock_is_systemctl.call_count, 1)
--        self.mock_runner.run.assert_called_once_with(
--            ["chkconfig"], ignore_stderr=True
--        )
-+        self.mock_runner.run.assert_called_once_with(["chkconfig"])
- 
-     def test_systemd(self, mock_is_systemctl):
-         mock_is_systemctl.return_value = True
-diff --git a/pcs/test/test_lib_pacemaker.py b/pcs/test/test_lib_pacemaker.py
-index c475db6..7ca7b77 100644
---- a/pcs/test/test_lib_pacemaker.py
-+++ b/pcs/test/test_lib_pacemaker.py
-@@ -64,21 +64,31 @@ class LibraryPacemakerNodeStatusTest(LibraryPacemakerTest):
- 
- class GetClusterStatusXmlTest(LibraryPacemakerTest):
-     def test_success(self):
--        expected_xml = "<xml />"
-+        expected_stdout = "<xml />"
-+        expected_stderr = ""
-         expected_retval = 0
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_xml, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         real_xml = lib.get_cluster_status_xml(mock_runner)
- 
-         mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
--        self.assertEqual(expected_xml, real_xml)
-+        self.assertEqual(expected_stdout, real_xml)
- 
-     def test_error(self):
--        expected_error = "some error"
-+        expected_stdout = "some info"
-+        expected_stderr = "some error"
-         expected_retval = 1
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_error, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         assert_raise_library_error(
-             lambda: lib.get_cluster_status_xml(mock_runner),
-@@ -86,8 +96,7 @@ class GetClusterStatusXmlTest(LibraryPacemakerTest):
-                 Severity.ERROR,
-                 report_codes.CRM_MON_ERROR,
-                 {
--                    "return_value": expected_retval,
--                    "stdout": expected_error,
-+                    "reason": expected_stderr + "\n" + expected_stdout,
-                 }
-             )
-         )
-@@ -96,23 +105,33 @@ class GetClusterStatusXmlTest(LibraryPacemakerTest):
- 
- class GetCibXmlTest(LibraryPacemakerTest):
-     def test_success(self):
--        expected_xml = "<xml />"
-+        expected_stdout = "<xml />"
-+        expected_stderr = ""
-         expected_retval = 0
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_xml, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         real_xml = lib.get_cib_xml(mock_runner)
- 
-         mock_runner.run.assert_called_once_with(
-             [self.path("cibadmin"), "--local", "--query"]
-         )
--        self.assertEqual(expected_xml, real_xml)
-+        self.assertEqual(expected_stdout, real_xml)
- 
-     def test_error(self):
--        expected_error = "some error"
-+        expected_stdout = "some info"
-+        expected_stderr = "some error"
-         expected_retval = 1
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_error, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         assert_raise_library_error(
-             lambda: lib.get_cib_xml(mock_runner),
-@@ -120,8 +139,7 @@ class GetCibXmlTest(LibraryPacemakerTest):
-                 Severity.ERROR,
-                 report_codes.CIB_LOAD_ERROR,
-                 {
--                    "return_value": expected_retval,
--                    "stdout": expected_error,
-+                    "reason": expected_stderr + "\n" + expected_stdout,
-                 }
-             )
-         )
-@@ -131,11 +149,16 @@ class GetCibXmlTest(LibraryPacemakerTest):
-         )
- 
-     def test_success_scope(self):
--        expected_xml = "<xml />"
-+        expected_stdout = "<xml />"
-+        expected_stderr = ""
-         expected_retval = 0
-         scope = "test_scope"
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_xml, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         real_xml = lib.get_cib_xml(mock_runner, scope)
- 
-@@ -145,14 +168,19 @@ class GetCibXmlTest(LibraryPacemakerTest):
-                 "--local", "--query", "--scope={0}".format(scope)
-             ]
-         )
--        self.assertEqual(expected_xml, real_xml)
-+        self.assertEqual(expected_stdout, real_xml)
- 
-     def test_scope_error(self):
--        expected_error = "some error"
-+        expected_stdout = "some info"
-+        expected_stderr = "some error"
-         expected_retval = 6
-         scope = "test_scope"
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_error, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         assert_raise_library_error(
-             lambda: lib.get_cib_xml(mock_runner, scope=scope),
-@@ -161,8 +189,7 @@ class GetCibXmlTest(LibraryPacemakerTest):
-                 report_codes.CIB_LOAD_ERROR_SCOPE_MISSING,
-                 {
-                     "scope": scope,
--                    "return_value": expected_retval,
--                    "stdout": expected_error,
-+                    "reason": expected_stderr + "\n" + expected_stdout,
-                 }
-             )
-         )
-@@ -194,10 +221,15 @@ class GetCibTest(LibraryPacemakerTest):
- class ReplaceCibConfigurationTest(LibraryPacemakerTest):
-     def test_success(self):
-         xml = "<xml/>"
--        expected_output = "expected output"
-+        expected_stdout = "expected output"
-+        expected_stderr = ""
-         expected_retval = 0
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_output, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         lib.replace_cib_configuration(
-             mock_runner,
-@@ -214,10 +246,15 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest):
- 
-     def test_cib_upgraded(self):
-         xml = "<xml/>"
--        expected_output = "expected output"
-+        expected_stdout = "expected output"
-+        expected_stderr = ""
-         expected_retval = 0
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_output, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         lib.replace_cib_configuration(
-             mock_runner, XmlManipulation.from_str(xml).tree, True
-@@ -230,10 +267,15 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest):
- 
-     def test_error(self):
-         xml = "<xml/>"
--        expected_error = "expected error"
-+        expected_stdout = "expected output"
-+        expected_stderr = "expected stderr"
-         expected_retval = 1
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_error, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         assert_raise_library_error(
-             lambda: lib.replace_cib_configuration(
-@@ -245,8 +287,8 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest):
-                 Severity.ERROR,
-                 report_codes.CIB_PUSH_ERROR,
-                 {
--                    "return_value": expected_retval,
--                    "stdout": expected_error,
-+                    "reason": expected_stderr,
-+                    "pushed_cib": expected_stdout,
-                 }
-             )
-         )
-@@ -261,10 +303,15 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest):
- 
- class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
-     def test_offline(self):
--        expected_error = "some error"
-+        expected_stdout = "some info"
-+        expected_stderr = "some error"
-         expected_retval = 1
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_error, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         self.assertEqual(
-             {"offline": True},
-@@ -273,10 +320,15 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
-         mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
- 
-     def test_invalid_status(self):
--        expected_xml = "some error"
-+        expected_stdout = "invalid xml"
-+        expected_stderr = ""
-         expected_retval = 0
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_xml, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         assert_raise_library_error(
-             lambda: lib.get_local_node_status(mock_runner),
-@@ -310,9 +362,9 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
-             ),
-         ]
-         return_value_list = [
--            (str(self.status), 0),
--            (node_id, 0),
--            (node_name, 0)
-+            (str(self.status), "", 0),
-+            (node_id, "", 0),
-+            (node_name, "", 0)
-         ]
-         mock_runner.run.side_effect = return_value_list
- 
-@@ -339,9 +391,9 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
-             ),
-         ]
-         return_value_list = [
--            (str(self.status), 0),
--            (node_id, 0),
--            (node_name_bad, 0)
-+            (str(self.status), "", 0),
-+            (node_id, "", 0),
-+            (node_name_bad, "", 0)
-         ]
-         mock_runner.run.side_effect = return_value_list
- 
-@@ -370,8 +422,8 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
-             mock.call([self.path("crm_node"), "--cluster-id"]),
-         ]
-         return_value_list = [
--            (str(self.status), 0),
--            ("some error", 1),
-+            (str(self.status), "", 0),
-+            ("", "some error", 1),
-         ]
-         mock_runner.run.side_effect = return_value_list
- 
-@@ -403,9 +455,9 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
-             ),
-         ]
-         return_value_list = [
--            (str(self.status), 0),
--            (node_id, 0),
--            ("some error", 1),
-+            (str(self.status), "", 0),
-+            (node_id, "", 0),
-+            ("", "some error", 1),
-         ]
-         mock_runner.run.side_effect = return_value_list
- 
-@@ -437,9 +489,9 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
-             ),
-         ]
-         return_value_list = [
--            (str(self.status), 0),
--            (node_id, 0),
--            ("(null)", 0),
-+            (str(self.status), "", 0),
-+            (node_id, "", 0),
-+            ("(null)", "", 0),
-         ]
-         mock_runner.run.side_effect = return_value_list
- 
-@@ -465,15 +517,16 @@ class ResourceCleanupTest(LibraryPacemakerTest):
-         return str(XmlManipulation(doc))
- 
-     def test_basic(self):
--        expected_output = "expected output"
-+        expected_stdout = "expected output"
-+        expected_stderr = "expected stderr"
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-         call_list = [
-             mock.call(self.crm_mon_cmd()),
-             mock.call([self.path("crm_resource"), "--cleanup"]),
-         ]
-         return_value_list = [
--            (self.fixture_status_xml(1, 1), 0),
--            (expected_output, 0),
-+            (self.fixture_status_xml(1, 1), "", 0),
-+            (expected_stdout, expected_stderr, 0),
-         ]
-         mock_runner.run.side_effect = return_value_list
- 
-@@ -482,11 +535,18 @@ class ResourceCleanupTest(LibraryPacemakerTest):
-         self.assertEqual(len(return_value_list), len(call_list))
-         self.assertEqual(len(return_value_list), mock_runner.run.call_count)
-         mock_runner.run.assert_has_calls(call_list)
--        self.assertEqual(expected_output, real_output)
-+        self.assertEqual(
-+            expected_stdout + "\n" + expected_stderr,
-+            real_output
-+        )
- 
-     def test_threshold_exceeded(self):
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (self.fixture_status_xml(1000, 1000), 0)
-+        mock_runner.run.return_value = (
-+            self.fixture_status_xml(1000, 1000),
-+            "",
-+            0
-+        )
- 
-         assert_raise_library_error(
-             lambda: lib.resource_cleanup(mock_runner),
-@@ -501,49 +561,62 @@ class ResourceCleanupTest(LibraryPacemakerTest):
-         mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
- 
-     def test_forced(self):
--        expected_output = "expected output"
-+        expected_stdout = "expected output"
-+        expected_stderr = "expected stderr"
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_output, 0)
-+        mock_runner.run.return_value = (expected_stdout, expected_stderr, 0)
- 
-         real_output = lib.resource_cleanup(mock_runner, force=True)
- 
-         mock_runner.run.assert_called_once_with(
-             [self.path("crm_resource"), "--cleanup"]
-         )
--        self.assertEqual(expected_output, real_output)
-+        self.assertEqual(
-+            expected_stdout + "\n" + expected_stderr,
-+            real_output
-+        )
- 
-     def test_resource(self):
-         resource = "test_resource"
--        expected_output = "expected output"
-+        expected_stdout = "expected output"
-+        expected_stderr = "expected stderr"
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_output, 0)
-+        mock_runner.run.return_value = (expected_stdout, expected_stderr, 0)
- 
-         real_output = lib.resource_cleanup(mock_runner, resource=resource)
- 
-         mock_runner.run.assert_called_once_with(
-             [self.path("crm_resource"), "--cleanup", "--resource", resource]
-         )
--        self.assertEqual(expected_output, real_output)
-+        self.assertEqual(
-+            expected_stdout + "\n" + expected_stderr,
-+            real_output
-+        )
- 
-     def test_node(self):
-         node = "test_node"
--        expected_output = "expected output"
-+        expected_stdout = "expected output"
-+        expected_stderr = "expected stderr"
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_output, 0)
-+        mock_runner.run.return_value = (expected_stdout, expected_stderr, 0)
- 
-         real_output = lib.resource_cleanup(mock_runner, node=node)
- 
-         mock_runner.run.assert_called_once_with(
-             [self.path("crm_resource"), "--cleanup", "--node", node]
-         )
--        self.assertEqual(expected_output, real_output)
-+        self.assertEqual(
-+            expected_stdout + "\n" + expected_stderr,
-+            real_output
-+        )
- 
-     def test_node_and_resource(self):
-         node = "test_node"
-         resource = "test_resource"
--        expected_output = "expected output"
-+        expected_stdout = "expected output"
-+        expected_stderr = "expected stderr"
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_output, 0)
-+        mock_runner.run.return_value = (expected_stdout, expected_stderr, 0)
- 
-         real_output = lib.resource_cleanup(
-             mock_runner, resource=resource, node=node
-@@ -555,13 +628,21 @@ class ResourceCleanupTest(LibraryPacemakerTest):
-                 "--cleanup", "--resource", resource, "--node", node
-             ]
-         )
--        self.assertEqual(expected_output, real_output)
-+        self.assertEqual(
-+            expected_stdout + "\n" + expected_stderr,
-+            real_output
-+        )
- 
-     def test_error_state(self):
--        expected_error = "some error"
-+        expected_stdout = "some info"
-+        expected_stderr = "some error"
-         expected_retval = 1
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_error, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         assert_raise_library_error(
-             lambda: lib.resource_cleanup(mock_runner),
-@@ -569,8 +650,7 @@ class ResourceCleanupTest(LibraryPacemakerTest):
-                 Severity.ERROR,
-                 report_codes.CRM_MON_ERROR,
-                 {
--                    "return_value": expected_retval,
--                    "stdout": expected_error,
-+                    "reason": expected_stderr + "\n" + expected_stdout,
-                 }
-             )
-         )
-@@ -578,7 +658,8 @@ class ResourceCleanupTest(LibraryPacemakerTest):
-         mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
- 
-     def test_error_cleanup(self):
--        expected_error = "expected error"
-+        expected_stdout = "some info"
-+        expected_stderr = "some error"
-         expected_retval = 1
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-         call_list = [
-@@ -586,8 +667,8 @@ class ResourceCleanupTest(LibraryPacemakerTest):
-             mock.call([self.path("crm_resource"), "--cleanup"]),
-         ]
-         return_value_list = [
--            (self.fixture_status_xml(1, 1), 0),
--            (expected_error, expected_retval),
-+            (self.fixture_status_xml(1, 1), "", 0),
-+            (expected_stdout, expected_stderr, expected_retval),
-         ]
-         mock_runner.run.side_effect = return_value_list
- 
-@@ -597,8 +678,7 @@ class ResourceCleanupTest(LibraryPacemakerTest):
-                 Severity.ERROR,
-                 report_codes.RESOURCE_CLEANUP_ERROR,
-                 {
--                    "return_value": expected_retval,
--                    "stdout": expected_error,
-+                    "reason": expected_stderr + "\n" + expected_stdout,
-                 }
-             )
-         )
-@@ -609,10 +689,33 @@ class ResourceCleanupTest(LibraryPacemakerTest):
- 
- class ResourcesWaitingTest(LibraryPacemakerTest):
-     def test_has_support(self):
--        expected_output = "something --wait something else"
-+        expected_stdout = ""
-+        expected_stderr = "something --wait something else"
-+        expected_retval = 1
-+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
-+
-+        self.assertTrue(
-+            lib.has_resource_wait_support(mock_runner)
-+        )
-+        mock_runner.run.assert_called_once_with(
-+            [self.path("crm_resource"), "-?"]
-+        )
-+
-+    def test_has_support_stdout(self):
-+        expected_stdout = "something --wait something else"
-+        expected_stderr = ""
-         expected_retval = 1
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_output, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         self.assertTrue(
-             lib.has_resource_wait_support(mock_runner)
-@@ -622,10 +725,15 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
-         )
- 
-     def test_doesnt_have_support(self):
--        expected_output = "something something else"
-+        expected_stdout = "something something else"
-+        expected_stderr = "something something else"
-         expected_retval = 1
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_output, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         self.assertFalse(
-             lib.has_resource_wait_support(mock_runner)
-@@ -652,10 +760,15 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
-         )
- 
-     def test_wait_success(self):
--        expected_output = "expected output"
-+        expected_stdout = "expected output"
-+        expected_stderr = "expected stderr"
-         expected_retval = 0
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_output, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         self.assertEqual(None, lib.wait_for_resources(mock_runner))
- 
-@@ -664,11 +777,16 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
-         )
- 
-     def test_wait_timeout_success(self):
--        expected_output = "expected output"
-+        expected_stdout = "expected output"
-+        expected_stderr = "expected stderr"
-         expected_retval = 0
-         timeout = 10
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_output, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         self.assertEqual(None, lib.wait_for_resources(mock_runner, timeout))
- 
-@@ -680,10 +798,15 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
-         )
- 
-     def test_wait_error(self):
--        expected_error = "some error"
-+        expected_stdout = "some info"
-+        expected_stderr = "some error"
-         expected_retval = 1
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_error, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         assert_raise_library_error(
-             lambda: lib.wait_for_resources(mock_runner),
-@@ -691,8 +814,7 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
-                 Severity.ERROR,
-                 report_codes.RESOURCE_WAIT_ERROR,
-                 {
--                    "return_value": expected_retval,
--                    "stdout": expected_error,
-+                    "reason": expected_stderr + "\n" + expected_stdout,
-                 }
-             )
-         )
-@@ -702,10 +824,15 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
-         )
- 
-     def test_wait_error_timeout(self):
--        expected_error = "some error"
-+        expected_stdout = "some info"
-+        expected_stderr = "some error"
-         expected_retval = 62
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_error, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         assert_raise_library_error(
-             lambda: lib.wait_for_resources(mock_runner),
-@@ -713,8 +840,7 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
-                 Severity.ERROR,
-                 report_codes.RESOURCE_WAIT_TIMED_OUT,
-                 {
--                    "return_value": expected_retval,
--                    "stdout": expected_error,
-+                    "reason": expected_stderr + "\n" + expected_stdout,
-                 }
-             )
-         )
-@@ -727,7 +853,7 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
-     def test_standby_local(self):
-         expected_retval = 0
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = ("dummy", expected_retval)
-+        mock_runner.run.return_value = ("dummy", "", expected_retval)
- 
-         output = lib.nodes_standby(mock_runner)
- 
-@@ -739,7 +865,7 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
-     def test_unstandby_local(self):
-         expected_retval = 0
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = ("dummy", expected_retval)
-+        mock_runner.run.return_value = ("dummy", "", expected_retval)
- 
-         output = lib.nodes_unstandby(mock_runner)
- 
-@@ -760,8 +886,8 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
-             mock.call([self.path("crm_standby"), "-v", "on", "-N", n])
-             for n in nodes
-         ]
--        return_value_list = [(str(self.status), 0)]
--        return_value_list += [("dummy", 0) for n in nodes]
-+        return_value_list = [(str(self.status), "", 0)]
-+        return_value_list += [("dummy", "", 0) for n in nodes]
-         mock_runner.run.side_effect = return_value_list
- 
-         output = lib.nodes_standby(mock_runner, all_nodes=True)
-@@ -783,8 +909,8 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
-             mock.call([self.path("crm_standby"), "-D", "-N", n])
-             for n in nodes
-         ]
--        return_value_list = [(str(self.status), 0)]
--        return_value_list += [("dummy", 0) for n in nodes]
-+        return_value_list = [(str(self.status), "", 0)]
-+        return_value_list += [("dummy", "", 0) for n in nodes]
-         mock_runner.run.side_effect = return_value_list
- 
-         output = lib.nodes_unstandby(mock_runner, all_nodes=True)
-@@ -806,8 +932,8 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
-             mock.call([self.path("crm_standby"), "-v", "on", "-N", n])
-             for n in nodes[1:]
-         ]
--        return_value_list = [(str(self.status), 0)]
--        return_value_list += [("dummy", 0) for n in nodes[1:]]
-+        return_value_list = [(str(self.status), "", 0)]
-+        return_value_list += [("dummy", "", 0) for n in nodes[1:]]
-         mock_runner.run.side_effect = return_value_list
- 
-         output = lib.nodes_standby(mock_runner, node_list=nodes[1:])
-@@ -829,8 +955,8 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
-             mock.call([self.path("crm_standby"), "-D", "-N", n])
-             for n in nodes[:2]
-         ]
--        return_value_list = [(str(self.status), 0)]
--        return_value_list += [("dummy", 0) for n in nodes[:2]]
-+        return_value_list = [(str(self.status), "", 0)]
-+        return_value_list += [("dummy", "", 0) for n in nodes[:2]]
-         mock_runner.run.side_effect = return_value_list
- 
-         output = lib.nodes_unstandby(mock_runner, node_list=nodes[:2])
-@@ -845,7 +971,7 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
-             self.fixture_get_node_status("node_1", "id_1")
-         )
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (str(self.status), 0)
-+        mock_runner.run.return_value = (str(self.status), "", 0)
- 
-         assert_raise_library_error(
-             lambda: lib.nodes_standby(mock_runner, ["node_2"]),
-@@ -863,7 +989,7 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
-             self.fixture_get_node_status("node_1", "id_1")
-         )
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (str(self.status), 0)
-+        mock_runner.run.return_value = (str(self.status), "", 0)
- 
-         assert_raise_library_error(
-             lambda: lib.nodes_unstandby(mock_runner, ["node_2", "node_3"]),
-@@ -882,17 +1008,24 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
-         mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
- 
-     def test_error_one_node(self):
--        expected_error = "some error"
-+        expected_stdout = "some info"
-+        expected_stderr = "some error"
-         expected_retval = 1
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (expected_error, expected_retval)
-+        mock_runner.run.return_value = (
-+            expected_stdout,
-+            expected_stderr,
-+            expected_retval
-+        )
- 
-         assert_raise_library_error(
-             lambda: lib.nodes_unstandby(mock_runner),
-             (
-                 Severity.ERROR,
-                 report_codes.COMMON_ERROR,
--                {}
-+                {
-+                    "text": expected_stderr + "\n" + expected_stdout,
-+                }
-             )
-         )
- 
-@@ -913,11 +1046,11 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
-             for n in nodes
-         ]
-         return_value_list = [
--            (str(self.status), 0),
--            ("dummy1", 0),
--            ("dummy2", 1),
--            ("dummy3", 0),
--            ("dummy4", 1),
-+            (str(self.status), "", 0),
-+            ("dummy1", "", 0),
-+            ("dummy2", "error2", 1),
-+            ("dummy3", "", 0),
-+            ("dummy4", "error4", 1),
-         ]
-         mock_runner.run.side_effect = return_value_list
- 
-@@ -926,12 +1059,16 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
-             (
-                 Severity.ERROR,
-                 report_codes.COMMON_ERROR,
--                {}
-+                {
-+                    "text": "error2\ndummy2",
-+                }
-             ),
-             (
-                 Severity.ERROR,
-                 report_codes.COMMON_ERROR,
--                {}
-+                {
-+                    "text": "error4\ndummy4",
-+                }
-             )
-         )
- 
-diff --git a/pcs/test/test_lib_resource_agent.py b/pcs/test/test_lib_resource_agent.py
-index 08f9061..a569e66 100644
---- a/pcs/test/test_lib_resource_agent.py
-+++ b/pcs/test/test_lib_resource_agent.py
-@@ -199,7 +199,7 @@ class GetFenceAgentMetadataTest(LibraryResourceTest):
-     def test_execution_failed(self, mock_is_runnable):
-         mock_is_runnable.return_value = True
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = ("error", 1)
-+        mock_runner.run.return_value = ("", "error", 1)
-         agent_name = "fence_ipmi"
- 
-         self.assert_raises(
-@@ -210,13 +210,13 @@ class GetFenceAgentMetadataTest(LibraryResourceTest):
- 
-         script_path = os.path.join(settings.fence_agent_binaries, agent_name)
-         mock_runner.run.assert_called_once_with(
--            [script_path, "-o", "metadata"], ignore_stderr=True
-+            [script_path, "-o", "metadata"]
-         )
- 
-     @mock.patch("pcs.lib.resource_agent.is_path_runnable")
-     def test_invalid_xml(self, mock_is_runnable):
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = ("not xml", 0)
-+        mock_runner.run.return_value = ("not xml", "", 0)
-         mock_is_runnable.return_value = True
-         agent_name = "fence_ipmi"
-         self.assert_raises(
-@@ -227,7 +227,7 @@ class GetFenceAgentMetadataTest(LibraryResourceTest):
- 
-         script_path = os.path.join(settings.fence_agent_binaries, agent_name)
-         mock_runner.run.assert_called_once_with(
--            [script_path, "-o", "metadata"], ignore_stderr=True
-+            [script_path, "-o", "metadata"]
-         )
- 
-     @mock.patch("pcs.lib.resource_agent.is_path_runnable")
-@@ -235,14 +235,14 @@ class GetFenceAgentMetadataTest(LibraryResourceTest):
-         agent_name = "fence_ipmi"
-         xml = "<xml />"
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (xml, 0)
-+        mock_runner.run.return_value = (xml, "", 0)
-         mock_is_runnable.return_value = True
- 
-         out_dom = lib_ra.get_fence_agent_metadata(mock_runner, agent_name)
- 
-         script_path = os.path.join(settings.fence_agent_binaries, agent_name)
-         mock_runner.run.assert_called_once_with(
--            [script_path, "-o", "metadata"], ignore_stderr=True
-+            [script_path, "-o", "metadata"]
-         )
-         assert_xml_equal(xml, str(XmlMan(out_dom)))
- 
-@@ -304,7 +304,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
-         provider = "provider"
-         agent = "agent"
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = ("error", 1)
-+        mock_runner.run.return_value = ("", "error", 1)
-         mock_is_runnable.return_value = True
- 
-         self.assert_raises(
-@@ -318,8 +318,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
-         script_path = os.path.join(settings.ocf_resources, provider, agent)
-         mock_runner.run.assert_called_once_with(
-             [script_path, "meta-data"],
--            env_extend={"OCF_ROOT": settings.ocf_root},
--            ignore_stderr=True
-+            env_extend={"OCF_ROOT": settings.ocf_root}
-         )
- 
-     @mock.patch("pcs.lib.resource_agent.is_path_runnable")
-@@ -327,7 +326,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
-         provider = "provider"
-         agent = "agent"
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = ("not xml", 0)
-+        mock_runner.run.return_value = ("not xml", "", 0)
-         mock_is_runnable.return_value = True
- 
-         self.assert_raises(
-@@ -341,8 +340,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
-         script_path = os.path.join(settings.ocf_resources, provider, agent)
-         mock_runner.run.assert_called_once_with(
-             [script_path, "meta-data"],
--            env_extend={"OCF_ROOT": settings.ocf_root},
--            ignore_stderr=True
-+            env_extend={"OCF_ROOT": settings.ocf_root}
-         )
- 
-     @mock.patch("pcs.lib.resource_agent.is_path_runnable")
-@@ -351,7 +349,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
-         agent = "agent"
-         xml = "<xml />"
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (xml, 0)
-+        mock_runner.run.return_value = (xml, "", 0)
-         mock_is_runnable.return_value = True
- 
-         out_dom = lib_ra._get_ocf_resource_agent_metadata(
-@@ -361,8 +359,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
-         script_path = os.path.join(settings.ocf_resources, provider, agent)
-         mock_runner.run.assert_called_once_with(
-             [script_path, "meta-data"],
--            env_extend={"OCF_ROOT": settings.ocf_root},
--            ignore_stderr=True
-+            env_extend={"OCF_ROOT": settings.ocf_root}
-         )
-         assert_xml_equal(xml, str(XmlMan(out_dom)))
- 
-@@ -596,7 +593,7 @@ class GetPcmkAdvancedStonithParametersTest(LibraryResourceTest):
-             </resource-agent>
-         """
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = (xml, 0)
-+        mock_runner.run.return_value = (xml, "", 0)
-         self.assertEqual(
-             [
-                 {
-@@ -623,12 +620,12 @@ class GetPcmkAdvancedStonithParametersTest(LibraryResourceTest):
-             lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner)
-         )
-         mock_runner.run.assert_called_once_with(
--            [settings.stonithd_binary, "metadata"], ignore_stderr=True
-+            [settings.stonithd_binary, "metadata"]
-         )
- 
-     def test_failed_to_get_xml(self):
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = ("", 1)
-+        mock_runner.run.return_value = ("", "some error", 1)
-         self.assert_raises(
-             lib_ra.UnableToGetAgentMetadata,
-             lambda: lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner),
-@@ -636,19 +633,19 @@ class GetPcmkAdvancedStonithParametersTest(LibraryResourceTest):
-         )
- 
-         mock_runner.run.assert_called_once_with(
--            [settings.stonithd_binary, "metadata"], ignore_stderr=True
-+            [settings.stonithd_binary, "metadata"]
-         )
- 
-     def test_invalid_xml(self):
-         mock_runner = mock.MagicMock(spec_set=CommandRunner)
--        mock_runner.run.return_value = ("invalid XML", 0)
-+        mock_runner.run.return_value = ("invalid XML", "", 0)
-         self.assertRaises(
-             lib_ra.InvalidMetadataFormat,
-             lambda: lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner)
-         )
- 
-         mock_runner.run.assert_called_once_with(
--            [settings.stonithd_binary, "metadata"], ignore_stderr=True
-+            [settings.stonithd_binary, "metadata"]
-         )
- 
- 
-diff --git a/pcs/test/test_lib_sbd.py b/pcs/test/test_lib_sbd.py
-index 720d8b1..9b7b801 100644
---- a/pcs/test/test_lib_sbd.py
-+++ b/pcs/test/test_lib_sbd.py
-@@ -155,9 +155,8 @@ class AtbHasToBeEnabledTest(TestCase):
-         self.assertFalse(lib_sbd.atb_has_to_be_enabled(
-             self.mock_runner, self.mock_conf, 1
-         ))
--        mock_is_needed.assert_called_once_with(
--            self.mock_runner, self.mock_conf, 1
--        )
-+        self.mock_conf.is_enabled_auto_tie_breaker.assert_called_once_with()
-+        mock_is_needed.assert_not_called()
- 
-     def test_atb_needed_is_disabled(self, mock_is_needed):
-         mock_is_needed.return_value = True
-@@ -165,6 +164,7 @@ class AtbHasToBeEnabledTest(TestCase):
-         self.assertTrue(lib_sbd.atb_has_to_be_enabled(
-             self.mock_runner, self.mock_conf, -1
-         ))
-+        self.mock_conf.is_enabled_auto_tie_breaker.assert_called_once_with()
-         mock_is_needed.assert_called_once_with(
-             self.mock_runner, self.mock_conf, -1
-         )
-@@ -175,9 +175,8 @@ class AtbHasToBeEnabledTest(TestCase):
-         self.assertFalse(lib_sbd.atb_has_to_be_enabled(
-             self.mock_runner, self.mock_conf, 2
-         ))
--        mock_is_needed.assert_called_once_with(
--            self.mock_runner, self.mock_conf, 2
--        )
-+        self.mock_conf.is_enabled_auto_tie_breaker.assert_called_once_with()
-+        mock_is_needed.assert_not_called()
- 
-     def test_atb_not_needed_is_disabled(self, mock_is_needed):
-         mock_is_needed.return_value = False
-@@ -185,6 +184,7 @@ class AtbHasToBeEnabledTest(TestCase):
-         self.assertFalse(lib_sbd.atb_has_to_be_enabled(
-             self.mock_runner, self.mock_conf, -2
-         ))
-+        self.mock_conf.is_enabled_auto_tie_breaker.assert_called_once_with()
-         mock_is_needed.assert_called_once_with(
-             self.mock_runner, self.mock_conf, -2
-         )
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1158805-03-add-support-for-qdeviceqnetd-provided-by-corosync.patch b/SOURCES/bz1158805-03-add-support-for-qdeviceqnetd-provided-by-corosync.patch
deleted file mode 100644
index f13113c..0000000
--- a/SOURCES/bz1158805-03-add-support-for-qdeviceqnetd-provided-by-corosync.patch
+++ /dev/null
@@ -1,201 +0,0 @@
-From 4fe757d176060089e46f76d66ef20918b65e1f7f Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Tue, 20 Sep 2016 08:20:29 +0200
-Subject: [PATCH] squash bz1158805 Add support for qdevice/qnetd pro
-
-66e72fa18ddb lib: do not error out in "qdevice stop" if qdevice is stopped already
-
-788407652f58 lib: fix removing qdevice from a cluster
----
- pcs/common/report_codes.py      |  1 +
- pcs/lib/commands/qdevice.py     | 22 +++++++++++++++-------
- pcs/lib/commands/quorum.py      |  7 +++----
- pcs/lib/corosync/qdevice_net.py | 32 +++++++++++++++++++++++++-------
- pcs/lib/reports.py              | 13 +++++++++++++
- 5 files changed, 57 insertions(+), 18 deletions(-)
-
-diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
-index 23e931f..9b05951 100644
---- a/pcs/common/report_codes.py
-+++ b/pcs/common/report_codes.py
-@@ -134,6 +134,7 @@ QDEVICE_INITIALIZATION_ERROR = "QDEVICE_INITIALIZATION_ERROR"
- QDEVICE_INITIALIZATION_SUCCESS = "QDEVICE_INITIALIZATION_SUCCESS"
- QDEVICE_NOT_DEFINED = "QDEVICE_NOT_DEFINED"
- QDEVICE_NOT_INITIALIZED = "QDEVICE_NOT_INITIALIZED"
-+QDEVICE_NOT_RUNNING = "QDEVICE_NOT_RUNNING"
- QDEVICE_CLIENT_RELOAD_STARTED = "QDEVICE_CLIENT_RELOAD_STARTED"
- QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED = "QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED"
- QDEVICE_USED_BY_CLUSTERS = "QDEVICE_USED_BY_CLUSTERS"
-diff --git a/pcs/lib/commands/qdevice.py b/pcs/lib/commands/qdevice.py
-index ca0ae86..119c51d 100644
---- a/pcs/lib/commands/qdevice.py
-+++ b/pcs/lib/commands/qdevice.py
-@@ -61,11 +61,16 @@ def qdevice_status_text(lib_env, model, verbose=False, cluster=None):
-     _ensure_not_cman(lib_env)
-     _check_model(model)
-     runner = lib_env.cmd_runner()
--    return (
--        qdevice_net.qdevice_status_generic_text(runner, verbose)
--        +
--        qdevice_net.qdevice_status_cluster_text(runner, cluster, verbose)
--    )
-+    try:
-+        return (
-+            qdevice_net.qdevice_status_generic_text(runner, verbose)
-+            +
-+            qdevice_net.qdevice_status_cluster_text(runner, cluster, verbose)
-+        )
-+    except qdevice_net.QnetdNotRunningException:
-+        raise LibraryError(
-+            reports.qdevice_not_running(model)
-+        )
- 
- def qdevice_enable(lib_env, model):
-     """
-@@ -196,8 +201,11 @@ def _check_qdevice_not_used(reporter, runner, model, force=False):
-     _check_model(model)
-     connected_clusters = []
-     if model == "net":
--        status = qdevice_net.qdevice_status_cluster_text(runner)
--        connected_clusters = qdevice_net.qdevice_connected_clusters(status)
-+        try:
-+            status = qdevice_net.qdevice_status_cluster_text(runner)
-+            connected_clusters = qdevice_net.qdevice_connected_clusters(status)
-+        except qdevice_net.QnetdNotRunningException:
-+            pass
-     if connected_clusters:
-         reporter.process(reports.qdevice_used_by_clusters(
-             connected_clusters,
-diff --git a/pcs/lib/commands/quorum.py b/pcs/lib/commands/quorum.py
-index 8390fc6..aa98e61 100644
---- a/pcs/lib/commands/quorum.py
-+++ b/pcs/lib/commands/quorum.py
-@@ -285,6 +285,7 @@ def remove_device(lib_env, skip_offline_nodes=False):
-     cfg.remove_quorum_device()
- 
-     if lib_env.is_corosync_conf_live:
-+        communicator = lib_env.node_communicator()
-         # fix quorum options for SBD to work properly
-         if sbd.atb_has_to_be_enabled(lib_env.cmd_runner(), cfg):
-             lib_env.report_processor.process(reports.sbd_requires_atb())
-@@ -292,10 +293,6 @@ def remove_device(lib_env, skip_offline_nodes=False):
-                 lib_env.report_processor, {"auto_tie_breaker": "1"}
-             )
- 
--    lib_env.push_corosync_conf(cfg, skip_offline_nodes)
--
--    if lib_env.is_corosync_conf_live:
--        communicator = lib_env.node_communicator()
-         # disable qdevice
-         lib_env.report_processor.process(
-             reports.service_disable_started("corosync-qdevice")
-@@ -330,6 +327,8 @@ def remove_device(lib_env, skip_offline_nodes=False):
-                 skip_offline_nodes
-             )
- 
-+    lib_env.push_corosync_conf(cfg, skip_offline_nodes)
-+
- def _remove_device_model_net(lib_env, cluster_nodes, skip_offline_nodes):
-     """
-     remove configuration used by qdevice model net
-diff --git a/pcs/lib/corosync/qdevice_net.py b/pcs/lib/corosync/qdevice_net.py
-index 200e45a..fa44923 100644
---- a/pcs/lib/corosync/qdevice_net.py
-+++ b/pcs/lib/corosync/qdevice_net.py
-@@ -35,6 +35,9 @@ __qdevice_certutil = os.path.join(
-     "corosync-qdevice-net-certutil"
- )
- 
-+class QnetdNotRunningException(Exception):
-+    pass
-+
- def qdevice_setup(runner):
-     """
-     initialize qdevice on local host
-@@ -79,10 +82,10 @@ def qdevice_status_generic_text(runner, verbose=False):
-     get qdevice runtime status in plain text
-     bool verbose get more detailed output
-     """
--    cmd = [__qnetd_tool, "-s"]
-+    args = ["-s"]
-     if verbose:
--        cmd.append("-v")
--    stdout, stderr, retval = runner.run(cmd)
-+        args.append("-v")
-+    stdout, stderr, retval = _qdevice_run_tool(runner, args)
-     if retval != 0:
-         raise LibraryError(
-             reports.qdevice_get_status_error(
-@@ -98,12 +101,12 @@ def qdevice_status_cluster_text(runner, cluster=None, verbose=False):
-     bool verbose get more detailed output
-     string cluster show information only about specified cluster
-     """
--    cmd = [__qnetd_tool, "-l"]
-+    args = ["-l"]
-     if verbose:
--        cmd.append("-v")
-+        args.append("-v")
-     if cluster:
--        cmd.extend(["-c", cluster])
--    stdout, stderr, retval = runner.run(cmd)
-+        args.extend(["-c", cluster])
-+    stdout, stderr, retval = _qdevice_run_tool(runner, args)
-     if retval != 0:
-         raise LibraryError(
-             reports.qdevice_get_status_error(
-@@ -114,6 +117,10 @@ def qdevice_status_cluster_text(runner, cluster=None, verbose=False):
-     return stdout
- 
- def qdevice_connected_clusters(status_cluster_text):
-+    """
-+    parse qnetd cluster status listing and return connected clusters' names
-+    string status_cluster_text output of corosync-qnetd-tool -l
-+    """
-     connected_clusters = []
-     regexp = re.compile(r'^Cluster "(?P<cluster>[^"]+)":$')
-     for line in status_cluster_text.splitlines():
-@@ -122,6 +129,17 @@ def qdevice_connected_clusters(status_cluster_text):
-             connected_clusters.append(match.group("cluster"))
-     return connected_clusters
- 
-+def _qdevice_run_tool(runner, args):
-+    """
-+    run corosync-qnetd-tool, raise QnetdNotRunningException if qnetd not running
-+    CommandRunner runner
-+    iterable args corosync-qnetd-tool arguments
-+    """
-+    stdout, stderr, retval = runner.run([__qnetd_tool] + args)
-+    if retval == 3 and "is qnetd running?" in stderr.lower():
-+        raise QnetdNotRunningException()
-+    return stdout, stderr, retval
-+
- def qdevice_enable(runner):
-     """
-     make qdevice start automatically on boot on local host
-diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
-index b9e9a66..cff491c 100644
---- a/pcs/lib/reports.py
-+++ b/pcs/lib/reports.py
-@@ -842,6 +842,19 @@ def qdevice_destroy_error(model, reason):
-         }
-     )
- 
-+def qdevice_not_running(model):
-+    """
-+    qdevice is expected to be running but is not running
-+    string model qdevice model
-+    """
-+    return ReportItem.error(
-+        report_codes.QDEVICE_NOT_RUNNING,
-+        "Quorum device '{model}' is not running",
-+        info={
-+            "model": model,
-+        }
-+    )
-+
- def qdevice_get_status_error(model, reason):
-     """
-     unable to get runtime status of qdevice
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1164402-01-sbd-fix-call_node-calls-on-python3.patch b/SOURCES/bz1164402-01-sbd-fix-call_node-calls-on-python3.patch
deleted file mode 100644
index 454e32a..0000000
--- a/SOURCES/bz1164402-01-sbd-fix-call_node-calls-on-python3.patch
+++ /dev/null
@@ -1,129 +0,0 @@
-From dff92f778f692f0ec2aa7d0c20e76a06a767e4b2 Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Fri, 24 Jun 2016 20:17:38 +0200
-Subject: [PATCH] bz1164402-01 sbd fix call_node calls on python3
-
----
- pcs/lib/sbd.py           | 14 +++++++-------
- pcs/test/test_lib_sbd.py | 10 +++++-----
- 2 files changed, 12 insertions(+), 12 deletions(-)
-
-diff --git a/pcs/lib/sbd.py b/pcs/lib/sbd.py
-index 1330bfc..4488a73 100644
---- a/pcs/lib/sbd.py
-+++ b/pcs/lib/sbd.py
-@@ -57,7 +57,7 @@ def check_sbd(communicator, node, watchdog):
-     return communicator.call_node(
-         node,
-         "remote/check_sbd",
--        NodeCommunicator.format_data_dict({"watchdog": watchdog})
-+        NodeCommunicator.format_data_dict([("watchdog", watchdog)])
-     )
- 
- 
-@@ -119,7 +119,7 @@ def set_sbd_config(communicator, node, config):
-     communicator.call_node(
-         node,
-         "remote/set_sbd_config",
--        NodeCommunicator.format_data_dict({"config": config})
-+        NodeCommunicator.format_data_dict([("config", config)])
-     )
- 
- 
-@@ -171,7 +171,7 @@ def enable_sbd_service(communicator, node):
-     communicator -- NodeCommunicator
-     node -- NodeAddresses
-     """
--    communicator.call_node(node, "remote/sbd_enable", "")
-+    communicator.call_node(node, "remote/sbd_enable", None)
- 
- 
- def enable_sbd_service_on_node(report_processor, node_communicator, node):
-@@ -215,7 +215,7 @@ def disable_sbd_service(communicator, node):
-     communicator -- NodeCommunicator
-     node -- NodeAddresses
-     """
--    communicator.call_node(node, "remote/sbd_disable", "")
-+    communicator.call_node(node, "remote/sbd_disable", None)
- 
- 
- def disable_sbd_service_on_node(report_processor, node_communicator, node):
-@@ -259,7 +259,7 @@ def set_stonith_watchdog_timeout_to_zero(communicator, node):
-     node -- NodeAddresses
-     """
-     communicator.call_node(
--        node, "remote/set_stonith_watchdog_timeout_to_zero", ""
-+        node, "remote/set_stonith_watchdog_timeout_to_zero", None
-     )
- 
- 
-@@ -292,7 +292,7 @@ def remove_stonith_watchdog_timeout(communicator, node):
-     communicator -- NodeCommunicator
-     node -- NodeAddresses
-     """
--    communicator.call_node(node, "remote/remove_stonith_watchdog_timeout", "")
-+    communicator.call_node(node, "remote/remove_stonith_watchdog_timeout", None)
- 
- 
- def remove_stonith_watchdog_timeout_on_all_nodes(node_communicator, node_list):
-@@ -351,7 +351,7 @@ def get_sbd_config(communicator, node):
-     communicator -- NodeCommunicator
-     node -- NodeAddresses
-     """
--    return communicator.call_node(node, "remote/get_sbd_config", "")
-+    return communicator.call_node(node, "remote/get_sbd_config", None)
- 
- 
- def is_sbd_enabled(runner):
-diff --git a/pcs/test/test_lib_sbd.py b/pcs/test/test_lib_sbd.py
-index 54c5669..e3c1401 100644
---- a/pcs/test/test_lib_sbd.py
-+++ b/pcs/test/test_lib_sbd.py
-@@ -360,7 +360,7 @@ class EnableSbdServiceTest(TestCase):
-         node = NodeAddresses("node1")
-         lib_sbd.enable_sbd_service(mock_communicator, node)
-         mock_communicator.call_node.assert_called_once_with(
--            node, "remote/sbd_enable", ""
-+            node, "remote/sbd_enable", None
-         )
- 
- 
-@@ -408,7 +408,7 @@ class DisableSbdServiceTest(TestCase):
-         node = NodeAddresses("node1")
-         lib_sbd.disable_sbd_service(mock_communicator, node)
-         mock_communicator.call_node.assert_called_once_with(
--            node, "remote/sbd_disable", ""
-+            node, "remote/sbd_disable", None
-         )
- 
- 
-@@ -456,7 +456,7 @@ class SetStonithWatchdogTimeoutToZeroTest(TestCase):
-         node = NodeAddresses("node1")
-         lib_sbd.set_stonith_watchdog_timeout_to_zero(mock_communicator, node)
-         mock_communicator.call_node.assert_called_once_with(
--            node, "remote/set_stonith_watchdog_timeout_to_zero", ""
-+            node, "remote/set_stonith_watchdog_timeout_to_zero", None
-         )
- 
- 
-@@ -520,7 +520,7 @@ class RemoveStonithWatchdogTimeoutTest(TestCase):
-         node = NodeAddresses("node1")
-         lib_sbd.remove_stonith_watchdog_timeout(mock_communicator, node)
-         mock_communicator.call_node.assert_called_once_with(
--            node, "remote/remove_stonith_watchdog_timeout", ""
-+            node, "remote/remove_stonith_watchdog_timeout", None
-         )
- 
- 
-@@ -584,7 +584,7 @@ class GetSbdConfigTest(TestCase):
-         node = NodeAddresses("node1")
-         lib_sbd.get_sbd_config(mock_communicator, node)
-         mock_communicator.call_node.assert_called_once_with(
--            node, "remote/get_sbd_config", ""
-+            node, "remote/get_sbd_config", None
-         )
- 
- 
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1164402-02-sbd-fixes.patch b/SOURCES/bz1164402-02-sbd-fixes.patch
deleted file mode 100644
index aac71ce..0000000
--- a/SOURCES/bz1164402-02-sbd-fixes.patch
+++ /dev/null
@@ -1,1621 +0,0 @@
-From e43d7324b9fc6933d8fa431e66c6236721724b98 Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Fri, 19 Aug 2016 02:57:38 +0200
-Subject: [PATCH] squash bz1164402 Support for sbd configuration is
-
-f2da8ad476c3 fix disable_service
-
-9367e7162b7b fix code formating
-
-57b618777d14 test: fix tests with parallel operations in SBD
-
-f7b9fc15072c sbd: change error message
-
-0dbdff4628d5 sbd: fix setting watchdog in config
-
-1c5ccd3be588 sbd: ban changing sbd option SBD_PACEMAKER
-
-733e28337589 sbd: add validation for SBD_WATCHDOG_TIMEOUT option
-
-9951f3262ef1 docs: add watchdog option to node add command
-
-d79592e05158 lib: fix disabling service on systemd systems
-
-17e4c5838842 sbd: set auto_tie_breaker whenever it is needed for SBD to work
-
-1ed4c2e3bc38 lib: fix enabled ATB in corosync.conf detection
----
- pcs/cluster.py                              |  54 +++++-
- pcs/common/report_codes.py                  |   4 +-
- pcs/lib/commands/quorum.py                  |  41 ++++-
- pcs/lib/commands/sbd.py                     |  30 ++-
- pcs/lib/corosync/config_facade.py           |  15 +-
- pcs/lib/external.py                         |   4 +-
- pcs/lib/reports.py                          |  27 +++
- pcs/lib/sbd.py                              |  78 +++++++-
- pcs/pcs.8                                   |   4 +-
- pcs/quorum.py                               |   3 +-
- pcs/stonith.py                              |   4 +-
- pcs/test/test_lib_commands_quorum.py        | 205 ++++++++++++++++++++-
- pcs/test/test_lib_commands_sbd.py           | 134 +++++++++++++-
- pcs/test/test_lib_corosync_config_facade.py |  28 +++
- pcs/test/test_lib_external.py               |  22 ++-
- pcs/test/test_lib_sbd.py                    | 272 +++++++++++++++++++++++++++-
- pcs/usage.py                                |   3 +
- pcs/utils.py                                |  25 ++-
- pcsd/pcs.rb                                 |  10 +-
- 19 files changed, 908 insertions(+), 55 deletions(-)
-
-diff --git a/pcs/cluster.py b/pcs/cluster.py
-index 90fec63..577e08e 100644
---- a/pcs/cluster.py
-+++ b/pcs/cluster.py
-@@ -43,6 +43,7 @@ from pcs.lib import (
-     reports as lib_reports,
- )
- from pcs.lib.booth import sync as booth_sync
-+from pcs.lib.nodes_task import check_corosync_offline_on_nodes
- from pcs.lib.commands.quorum import _add_device_model_net
- from pcs.lib.corosync import (
-     config_parser as corosync_conf_utils,
-@@ -1328,6 +1329,36 @@ def get_cib(argv):
-         except IOError as e:
-             utils.err("Unable to write to file '%s', %s" % (filename, e.strerror))
- 
-+
-+def _ensure_cluster_is_offline_if_atb_should_be_enabled(
-+    lib_env, node_num_modifier, skip_offline_nodes=False
-+):
-+    """
-+    Check if cluster is offline if auto tie breaker should be enabled.
-+    Raises LibraryError if ATB needs to be enabled cluster is not offline.
-+
-+    lib_env -- LibraryEnvironment
-+    node_num_modifier -- number which wil be added to the number of nodes in
-+        cluster when determining whenever ATB is needed.
-+    skip_offline_nodes -- if True offline nodes will be skipped
-+    """
-+    corosync_conf = lib_env.get_corosync_conf()
-+    if lib_sbd.atb_has_to_be_enabled(
-+        lib_env.cmd_runner(), corosync_conf, node_num_modifier
-+    ):
-+        print(
-+            "Warning: auto_tie_breaker quorum option will be enabled to make "
-+            "SBD fencing effecive after this change. Cluster has to be offline "
-+            "to be able to make this change."
-+        )
-+        check_corosync_offline_on_nodes(
-+            lib_env.node_communicator(),
-+            lib_env.report_processor,
-+            corosync_conf.get_nodes(),
-+            skip_offline_nodes
-+        )
-+
-+
- def cluster_node(argv):
-     if len(argv) != 2:
-         usage.cluster()
-@@ -1363,6 +1394,9 @@ def cluster_node(argv):
-                 msg += ", use --force to override"
-             utils.err(msg)
- 
-+    lib_env = utils.get_lib_env()
-+    modifiers = utils.get_modificators()
-+
-     if add_node == True:
-         wait = False
-         wait_timeout = None
-@@ -1385,11 +1419,9 @@ def cluster_node(argv):
-         if not canAdd:
-             utils.err("Unable to add '%s' to cluster: %s" % (node0, error))
- 
--        lib_env = utils.get_lib_env()
-         report_processor = lib_env.report_processor
-         node_communicator = lib_env.node_communicator()
-         node_addr = NodeAddresses(node0, node1)
--        modifiers = utils.get_modificators()
-         try:
-             if lib_sbd.is_sbd_enabled(utils.cmd_runner()):
-                 if "--watchdog" not in utils.pcs_options:
-@@ -1400,6 +1432,10 @@ def cluster_node(argv):
-                 else:
-                     watchdog = utils.pcs_options["--watchdog"][0]
- 
-+                _ensure_cluster_is_offline_if_atb_should_be_enabled(
-+                    lib_env, 1, modifiers["skip_offline_nodes"]
-+                )
-+
-                 report_processor.process(lib_reports.sbd_check_started())
-                 lib_sbd.check_sbd_on_node(
-                     report_processor, node_communicator, node_addr, watchdog
-@@ -1407,12 +1443,15 @@ def cluster_node(argv):
-                 sbd_cfg = environment_file_to_dict(
-                     lib_sbd.get_local_sbd_config()
-                 )
--                sbd_cfg["SBD_WATCHDOG_DEV"] = watchdog
-                 report_processor.process(
-                     lib_reports.sbd_config_distribution_started()
-                 )
-                 lib_sbd.set_sbd_config_on_node(
--                    report_processor, node_communicator, node_addr, sbd_cfg
-+                    report_processor,
-+                    node_communicator,
-+                    node_addr,
-+                    sbd_cfg,
-+                    watchdog
-                 )
-                 report_processor.process(lib_reports.sbd_enabling_started())
-                 lib_sbd.enable_sbd_service_on_node(
-@@ -1549,6 +1588,13 @@ def cluster_node(argv):
-                 )
-             # else the node seems to be stopped already, we're ok to proceed
- 
-+        try:
-+            _ensure_cluster_is_offline_if_atb_should_be_enabled(
-+                lib_env, -1, modifiers["skip_offline_nodes"]
-+            )
-+        except LibraryError as e:
-+            utils.process_library_reports(e.args)
-+
-         nodesRemoved = False
-         c_nodes = utils.getNodesFromCorosyncConf()
-         destroy_cluster([node0], keep_going=("--force" in utils.pcs_options))
-diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
-index e71d418..672c2e3 100644
---- a/pcs/common/report_codes.py
-+++ b/pcs/common/report_codes.py
-@@ -10,9 +10,9 @@ FORCE_ACTIVE_RRP = "ACTIVE_RRP"
- FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE = "FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE"
- FORCE_BOOTH_REMOVE_FROM_CIB = "FORCE_BOOTH_REMOVE_FROM_CIB"
- FORCE_BOOTH_DESTROY = "FORCE_BOOTH_DESTROY"
--FORCE_FILE_OVERWRITE = "FORCE_FILE_OVERWRITE"
- FORCE_CONSTRAINT_DUPLICATE = "CONSTRAINT_DUPLICATE"
- FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE = "CONSTRAINT_MULTIINSTANCE_RESOURCE"
-+FORCE_FILE_OVERWRITE = "FORCE_FILE_OVERWRITE"
- FORCE_LOAD_THRESHOLD = "LOAD_THRESHOLD"
- FORCE_OPTIONS = "OPTIONS"
- FORCE_QDEVICE_MODEL = "QDEVICE_MODEL"
-@@ -81,6 +81,7 @@ COROSYNC_NOT_RUNNING_CHECK_STARTED = "COROSYNC_NOT_RUNNING_CHECK_STARTED"
- COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR = "COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR"
- COROSYNC_NOT_RUNNING_ON_NODE = "COROSYNC_NOT_RUNNING_ON_NODE"
- COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE = "COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE"
-+COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD = "COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD"
- COROSYNC_QUORUM_GET_STATUS_ERROR = "COROSYNC_QUORUM_GET_STATUS_ERROR"
- COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR = "COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR"
- COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE"
-@@ -179,5 +180,6 @@ UNABLE_TO_GET_SBD_CONFIG = "UNABLE_TO_GET_SBD_CONFIG"
- UNABLE_TO_GET_SBD_STATUS = "UNABLE_TO_GET_SBD_STATUS"
- UNKNOWN_COMMAND = 'UNKNOWN_COMMAND'
- UNSUPPORTED_AGENT = 'UNSUPPORTED_AGENT'
-+WATCHDOG_INVALID = "WATCHDOG_INVALID"
- UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS = "UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS"
- WATCHDOG_NOT_FOUND = "WATCHDOG_NOT_FOUND"
-diff --git a/pcs/lib/commands/quorum.py b/pcs/lib/commands/quorum.py
-index 7425e78..7fb7bb4 100644
---- a/pcs/lib/commands/quorum.py
-+++ b/pcs/lib/commands/quorum.py
-@@ -5,8 +5,9 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from pcs.lib import reports
--from pcs.lib.errors import LibraryError
-+from pcs.common import report_codes
-+from pcs.lib import reports, sbd
-+from pcs.lib.errors import LibraryError, ReportItemSeverity
- from pcs.lib.corosync import (
-     live as corosync_live,
-     qdevice_net,
-@@ -39,16 +40,50 @@ def get_config(lib_env):
-         "device": device,
-     }
- 
--def set_options(lib_env, options, skip_offline_nodes=False):
-+
-+def _check_if_atb_can_be_disabled(
-+    runner, report_processor, corosync_conf, was_enabled, force=False
-+):
-+    """
-+    Check whenever auto_tie_breaker can be changed without affecting SBD.
-+    Raises LibraryError if change of ATB will affect SBD functionality.
-+
-+    runner -- CommandRunner
-+    report_processor -- report processor
-+    corosync_conf -- corosync conf facade
-+    was_enabled -- True if ATB was enabled, False otherwise
-+    force -- force change
-+    """
-+    if (
-+        was_enabled
-+        and
-+        not corosync_conf.is_enabled_auto_tie_breaker()
-+        and
-+        sbd.is_auto_tie_breaker_needed(runner, corosync_conf)
-+    ):
-+        report_processor.process(reports.quorum_cannot_disable_atb_due_to_sbd(
-+            ReportItemSeverity.WARNING if force else ReportItemSeverity.ERROR,
-+            None if force else report_codes.FORCE_OPTIONS
-+        ))
-+
-+
-+def set_options(lib_env, options, skip_offline_nodes=False, force=False):
-     """
-     Set corosync quorum options, distribute and reload corosync.conf if live
-     lib_env LibraryEnvironment
-     options quorum options (dict)
-     skip_offline_nodes continue even if not all nodes are accessible
-+    bool force force changes
-     """
-     __ensure_not_cman(lib_env)
-     cfg = lib_env.get_corosync_conf()
-+    atb_enabled = cfg.is_enabled_auto_tie_breaker()
-     cfg.set_quorum_options(lib_env.report_processor, options)
-+    if lib_env.is_corosync_conf_live:
-+        _check_if_atb_can_be_disabled(
-+            lib_env.cmd_runner(), lib_env.report_processor,
-+            cfg, atb_enabled, force
-+        )
-     lib_env.push_corosync_conf(cfg, skip_offline_nodes)
- 
- def status_text(lib_env):
-diff --git a/pcs/lib/commands/sbd.py b/pcs/lib/commands/sbd.py
-index 875758f..265ebb5 100644
---- a/pcs/lib/commands/sbd.py
-+++ b/pcs/lib/commands/sbd.py
-@@ -5,6 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
-+import os
- import json
- 
- from pcs import settings
-@@ -44,7 +45,9 @@ def _validate_sbd_options(sbd_config, allow_unknown_opts=False):
-     """
- 
-     report_item_list = []
--    unsupported_sbd_option_list = ["SBD_WATCHDOG_DEV", "SBD_OPTS"]
-+    unsupported_sbd_option_list = [
-+        "SBD_WATCHDOG_DEV", "SBD_OPTS", "SBD_PACEMAKER"
-+    ]
-     allowed_sbd_options = [
-         "SBD_DELAY_START", "SBD_STARTMODE", "SBD_WATCHDOG_TIMEOUT"
-     ]
-@@ -62,6 +65,17 @@ def _validate_sbd_options(sbd_config, allow_unknown_opts=False):
-                 Severities.WARNING if allow_unknown_opts else Severities.ERROR,
-                 None if allow_unknown_opts else report_codes.FORCE_OPTIONS
-             ))
-+    if "SBD_WATCHDOG_TIMEOUT" in sbd_config:
-+        report_item = reports.invalid_option_value(
-+            "SBD_WATCHDOG_TIMEOUT",
-+            sbd_config["SBD_WATCHDOG_TIMEOUT"],
-+            "nonnegative integer"
-+        )
-+        try:
-+            if int(sbd_config["SBD_WATCHDOG_TIMEOUT"]) < 0:
-+                report_item_list.append(report_item)
-+        except (ValueError, TypeError):
-+            report_item_list.append(report_item)
- 
-     return report_item_list
- 
-@@ -81,6 +95,9 @@ def _get_full_watchdog_list(node_list, default_watchdog, watchdog_dict):
-     report_item_list = []
- 
-     for node_name, watchdog in watchdog_dict.items():
-+        if not watchdog or not os.path.isabs(watchdog):
-+            report_item_list.append(reports.invalid_watchdog_path(watchdog))
-+            continue
-         try:
-             full_dict[node_list.find_by_label(node_name)] = watchdog
-         except NodeNotFound:
-@@ -140,6 +157,14 @@ def enable_sbd(
-         full_watchdog_dict
-     )
- 
-+    # enable ATB if needed
-+    corosync_conf = lib_env.get_corosync_conf()
-+    if sbd.atb_has_to_be_enabled(lib_env.cmd_runner(), corosync_conf):
-+        corosync_conf.set_quorum_options(
-+            lib_env.report_processor, {"auto_tie_breaker": "1"}
-+        )
-+        lib_env.push_corosync_conf(corosync_conf, ignore_offline_nodes)
-+
-     # distribute SBD configuration
-     config = sbd.get_default_sbd_config()
-     config.update(sbd_options)
-@@ -147,7 +172,8 @@ def enable_sbd(
-         lib_env.report_processor,
-         lib_env.node_communicator(),
-         online_nodes,
--        config
-+        config,
-+        full_watchdog_dict
-     )
- 
-     # remove cluster prop 'stonith_watchdog_timeout'
-diff --git a/pcs/lib/corosync/config_facade.py b/pcs/lib/corosync/config_facade.py
-index 600a89b..be621c0 100644
---- a/pcs/lib/corosync/config_facade.py
-+++ b/pcs/lib/corosync/config_facade.py
-@@ -129,6 +129,16 @@ class ConfigFacade(object):
-                     options[name] = value
-         return options
- 
-+    def is_enabled_auto_tie_breaker(self):
-+        """
-+        Returns True if auto tie braker option is enabled, False otherwise.
-+        """
-+        auto_tie_breaker = "0"
-+        for quorum in self.config.get_sections("quorum"):
-+            for attr in quorum.get_attributes("auto_tie_breaker"):
-+                auto_tie_breaker = attr[1]
-+        return auto_tie_breaker == "1"
-+
-     def __validate_quorum_options(self, options):
-         report_items = []
-         has_qdevice = self.has_quorum_device()
-@@ -488,10 +498,7 @@ class ConfigFacade(object):
-         # get relevant status
-         has_quorum_device = self.has_quorum_device()
-         has_two_nodes = len(self.get_nodes()) == 2
--        auto_tie_breaker = False
--        for quorum in self.config.get_sections("quorum"):
--            for attr in quorum.get_attributes("auto_tie_breaker"):
--                auto_tie_breaker = attr[1] != "0"
-+        auto_tie_breaker = self.is_enabled_auto_tie_breaker()
-         # update two_node
-         if has_two_nodes and not auto_tie_breaker and not has_quorum_device:
-             quorum_section_list = self.__ensure_section(self.config, "quorum")
-diff --git a/pcs/lib/external.py b/pcs/lib/external.py
-index 25e071f..08bf2bb 100644
---- a/pcs/lib/external.py
-+++ b/pcs/lib/external.py
-@@ -135,13 +135,13 @@ def disable_service(runner, service, instance=None):
-     instance -- instance name, it ha no effect on not systemd systems.
-         If None no instance name will be used.
-     """
-+    if not is_service_installed(runner, service):
-+        return
-     if is_systemctl():
-         output, retval = runner.run([
-             "systemctl", "disable", _get_service_name(service, instance)
-         ])
-     else:
--        if not is_service_installed(runner, service):
--            return
-         output, retval = runner.run(["chkconfig", service, "off"])
-     if retval != 0:
-         raise DisableServiceError(service, output.rstrip(), instance)
-diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
-index eac95c7..568bb7e 100644
---- a/pcs/lib/reports.py
-+++ b/pcs/lib/reports.py
-@@ -1701,6 +1701,19 @@ def watchdog_not_found(node, watchdog):
-     )
- 
- 
-+def invalid_watchdog_path(watchdog):
-+    """
-+    watchdog path is not absolut path
-+
-+    watchdog -- watchdog device path
-+    """
-+    return ReportItem.error(
-+        report_codes.WATCHDOG_INVALID,
-+        "Watchdog path '{watchdog}' is invalid.",
-+        info={"watchdog": watchdog}
-+    )
-+
-+
- def unable_to_get_sbd_status(node, reason):
-     """
-     there was (communication or parsing) failure during obtaining status of SBD
-@@ -1901,3 +1914,17 @@ def live_environment_required(forbidden_options):
-             "options_string": ", ".join(forbidden_options),
-         }
-     )
-+
-+
-+def quorum_cannot_disable_atb_due_to_sbd(
-+    severity=ReportItemSeverity.ERROR, forceable=None
-+):
-+    """
-+    Quorum option auto_tie_breaker cannot be disbled due to SBD.
-+    """
-+    return ReportItem(
-+        report_codes.COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD,
-+        severity,
-+        "unable to disable auto_tie_breaker: SBD fencing will have no effect",
-+        forceable=forceable
-+    )
-diff --git a/pcs/lib/sbd.py b/pcs/lib/sbd.py
-index 4488a73..c9f013b 100644
---- a/pcs/lib/sbd.py
-+++ b/pcs/lib/sbd.py
-@@ -46,6 +46,50 @@ def _run_parallel_and_raise_lib_error_on_failure(func, param_list):
-         raise LibraryError(*report_list)
- 
- 
-+def is_auto_tie_breaker_needed(
-+    runner, corosync_conf_facade, node_number_modifier=0
-+):
-+    """
-+    Returns True whenever quorum option auto tie breaker is needed to be enabled
-+    for proper working of SBD fencing. False if it is not needed.
-+
-+    runner -- command runner
-+    corosync_conf_facade --
-+    node_number_modifier -- this value vill be added to current number of nodes.
-+        This can be useful to test whenever is ATB needed when adding/removeing
-+        node.
-+    """
-+    return (
-+        not corosync_conf_facade.has_quorum_device()
-+        and
-+        (len(corosync_conf_facade.get_nodes()) + node_number_modifier) % 2 == 0
-+        and
-+        is_sbd_installed(runner)
-+        and
-+        is_sbd_enabled(runner)
-+    )
-+
-+def atb_has_to_be_enabled(runner, corosync_conf_facade, node_number_modifier=0):
-+    """
-+    Return True whenever quorum option auto tie breaker has to be enabled for
-+    proper working of SBD fencing. False if it's not needed or it is already
-+    enabled.
-+
-+    runner -- command runner
-+    corosync_conf_facade --
-+    node_number_modifier -- this value vill be added to current number of nodes.
-+        This can be useful to test whenever is ATB needed when adding/removeing
-+        node.
-+    """
-+    return (
-+        is_auto_tie_breaker_needed(
-+            runner, corosync_conf_facade, node_number_modifier
-+        )
-+        and
-+        not corosync_conf_facade.is_enabled_auto_tie_breaker()
-+    )
-+
-+
- def check_sbd(communicator, node, watchdog):
-     """
-     Check SBD on specified 'node' and existence of specified watchdog.
-@@ -123,18 +167,23 @@ def set_sbd_config(communicator, node, config):
-     )
- 
- 
--def set_sbd_config_on_node(report_processor, node_communicator, node, config):
-+def set_sbd_config_on_node(
-+    report_processor, node_communicator, node, config, watchdog
-+):
-     """
--    Send SBD configuration to 'node'. Also puts correct node name into
--        SBD_OPTS option (SBD_OPTS="-n <node_name>").
-+    Send SBD configuration to 'node' with specified watchdog set. Also puts
-+    correct node name into SBD_OPTS option (SBD_OPTS="-n <node_name>").
- 
-     report_processor --
-     node_communicator -- NodeCommunicator
-     node -- NodeAddresses
-     config -- dictionary in format: <SBD config option>: <value>
-+    watchdog -- path to watchdog device
-     """
-     config = dict(config)
-     config["SBD_OPTS"] = '"-n {node_name}"'.format(node_name=node.label)
-+    if watchdog:
-+        config["SBD_WATCHDOG_DEV"] = watchdog
-     set_sbd_config(node_communicator, node, dict_to_environment_file(config))
-     report_processor.process(
-         reports.sbd_config_accepted_by_node(node.label)
-@@ -142,7 +191,7 @@ def set_sbd_config_on_node(report_processor, node_communicator, node, config):
- 
- 
- def set_sbd_config_on_all_nodes(
--        report_processor, node_communicator, node_list, config
-+    report_processor, node_communicator, node_list, config, watchdog_dict
- ):
-     """
-     Send SBD configuration 'config' to all nodes in 'node_list'. Option
-@@ -153,12 +202,20 @@ def set_sbd_config_on_all_nodes(
-     node_communicator -- NodeCommunicator
-     node_list -- NodeAddressesList
-     config -- dictionary in format: <SBD config option>: <value>
-+    watchdog_dict -- dictionary of watchdogs where key is NodeAdresses object
-+        and value is path to watchdog
-     """
-     report_processor.process(reports.sbd_config_distribution_started())
-     _run_parallel_and_raise_lib_error_on_failure(
-         set_sbd_config_on_node,
-         [
--            ([report_processor, node_communicator, node, config], {})
-+            (
-+                [
-+                    report_processor, node_communicator, node, config,
-+                    watchdog_dict.get(node)
-+                ],
-+                {}
-+            )
-             for node in node_list
-         ]
-     )
-@@ -362,3 +419,14 @@ def is_sbd_enabled(runner):
-     runner -- CommandRunner
-     """
-     return external.is_service_enabled(runner, "sbd")
-+
-+
-+def is_sbd_installed(runner):
-+    """
-+    Check if SBD service is installed in local system.
-+    Reurns True id SBD service is installed. False otherwise.
-+
-+    runner -- CommandRunner
-+    """
-+    return external.is_service_installed(runner, "sbd")
-+
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 9064054..7a054ca 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -271,8 +271,8 @@ Upgrade the CIB to conform to the latest version of the document schema.
- edit [scope=<scope> | \fB\-\-config\fR]
- Edit the cib in the editor specified by the $EDITOR environment variable and push out any changes upon saving.  Specify scope to edit a specific section of the CIB.  Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults.  \fB\-\-config\fR is the same as scope=configuration.  Use of \fB\-\-config\fR is recommended.  Do not specify a scope if you need to edit the whole CIB or be warned in the case of outdated CIB.
- .TP
--node add <node[,node\-altaddr]> [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-enable\fR]
--Add the node to corosync.conf and corosync on all nodes in the cluster and sync the new corosync.conf to the new node.  If \fB\-\-start\fR is specified also start corosync/pacemaker on the new node, if \fB\-\-wait\fR is sepcified wait up to 'n' seconds for the new node to start.  If \fB\-\-enable\fR is specified enable corosync/pacemaker on new node.  When using Redundant Ring Protocol (RRP) with udpu transport, specify the ring 0 address first followed by a ',' and then the ring 1 address.
-+node add <node[,node\-altaddr]> [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-enable\fR] [\fB\-\-watchdog\fR=<watchdog\-path>]
-+Add the node to corosync.conf and corosync on all nodes in the cluster and sync the new corosync.conf to the new node.  If \fB\-\-start\fR is specified also start corosync/pacemaker on the new node, if \fB\-\-wait\fR is sepcified wait up to 'n' seconds for the new node to start.  If \fB\-\-enable\fR is specified enable corosync/pacemaker on new node.  When using Redundant Ring Protocol (RRP) with udpu transport, specify the ring 0 address first followed by a ',' and then the ring 1 address. Use \fB\-\-watchdog\fR to specify path to watchdog on newly added node, when SBD is enabled in cluster.
- .TP
- node remove <node>
- Shutdown specified node and remove it from pacemaker and corosync on all other nodes in the cluster.
-diff --git a/pcs/quorum.py b/pcs/quorum.py
-index 1c2d41d..6cd06ca 100644
---- a/pcs/quorum.py
-+++ b/pcs/quorum.py
-@@ -121,7 +121,8 @@ def quorum_update_cmd(lib, argv, modificators):
- 
-     lib.quorum.set_options(
-         options,
--        skip_offline_nodes=modificators["skip_offline_nodes"]
-+        skip_offline_nodes=modificators["skip_offline_nodes"],
-+        force=modificators["force"]
-     )
- 
- def quorum_device_add_cmd(lib, argv, modificators):
-diff --git a/pcs/stonith.py b/pcs/stonith.py
-index 93332ef..23f3800 100644
---- a/pcs/stonith.py
-+++ b/pcs/stonith.py
-@@ -495,7 +495,7 @@ def _sbd_parse_watchdogs(watchdog_list):
-     for watchdog_node in watchdog_list:
-         if "@" not in watchdog_node:
-             if default_watchdog:
--                raise CmdLineInputError("Multiple default watchdogs.")
-+                raise CmdLineInputError("Multiple watchdog definitions.")
-             default_watchdog = watchdog_node
-         else:
-             watchdog, node_name = watchdog_node.rsplit("@", 1)
-@@ -553,7 +553,7 @@ def sbd_config(lib, argv, modifiers):
- 
-     config = config_list[0]["config"]
- 
--    filtered_options = ["SBD_WATCHDOG_DEV", "SBD_OPTS"]
-+    filtered_options = ["SBD_WATCHDOG_DEV", "SBD_OPTS", "SBD_PACEMAKER"]
-     for key, val in config.items():
-         if key in filtered_options:
-             continue
-diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py
-index 826251a..d286a8f 100644
---- a/pcs/test/test_lib_commands_quorum.py
-+++ b/pcs/test/test_lib_commands_quorum.py
-@@ -25,6 +25,7 @@ from pcs.lib.errors import (
-     LibraryError,
-     ReportItemSeverity as severity,
- )
-+from pcs.lib.corosync.config_facade import ConfigFacade
- from pcs.lib.external import NodeCommunicationException
- from pcs.lib.node import NodeAddresses, NodeAddressesList
- 
-@@ -146,23 +147,201 @@ class GetQuorumConfigTest(TestCase, CmanMixin):
-         self.assertEqual([], self.mock_reporter.report_item_list)
- 
- 
-+@mock.patch("pcs.lib.sbd.is_auto_tie_breaker_needed")
-+class CheckIfAtbCanBeDisabledTest(TestCase):
-+    def setUp(self):
-+        self.mock_reporter = MockLibraryReportProcessor()
-+        self.mock_runner = "cmd_runner"
-+        self.mock_corosync_conf = mock.MagicMock(spec_set=ConfigFacade)
-+
-+    def test_atb_no_need_was_disabled_atb_disabled(self, mock_atb_needed):
-+        mock_atb_needed.return_value = False
-+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False
-+        lib._check_if_atb_can_be_disabled(
-+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, False
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+    def test_atb_no_need_was_disabled_atb_enabled(self, mock_atb_needed):
-+        mock_atb_needed.return_value = False
-+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True
-+        lib._check_if_atb_can_be_disabled(
-+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, False
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+    def test_atb_no_need_was_enable_atb_disabled(self, mock_atb_needed):
-+        mock_atb_needed.return_value = False
-+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False
-+        lib._check_if_atb_can_be_disabled(
-+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+    def test_atb_no_need_was_enabled_atb_enabled(self, mock_atb_needed):
-+        mock_atb_needed.return_value = False
-+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True
-+        lib._check_if_atb_can_be_disabled(
-+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+    def test_atb_needed_was_disabled_atb_disabled(self, mock_atb_needed):
-+        mock_atb_needed.return_value = True
-+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False
-+        lib._check_if_atb_can_be_disabled(
-+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, False
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+    def test_atb_needed_was_disabled_atb_enabled(self, mock_atb_needed):
-+        mock_atb_needed.return_value = True
-+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True
-+        lib._check_if_atb_can_be_disabled(
-+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, False
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+    def test_atb_needed_was_enable_atb_disabled(self, mock_atb_needed):
-+        mock_atb_needed.return_value = True
-+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False
-+        report_item = (
-+            severity.ERROR,
-+            report_codes.COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD,
-+            {},
-+            report_codes.FORCE_OPTIONS
-+        )
-+        assert_raise_library_error(
-+            lambda: lib._check_if_atb_can_be_disabled(
-+                self.mock_runner,
-+                self.mock_reporter,
-+                self.mock_corosync_conf,
-+                True
-+            ),
-+            report_item
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list, [report_item]
-+        )
-+
-+    def test_atb_needed_was_enabled_atb_enabled(self, mock_atb_needed):
-+        mock_atb_needed.return_value = True
-+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True
-+        lib._check_if_atb_can_be_disabled(
-+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+
-+    def test_atb_no_need_was_disabled_atb_disabled_force(
-+        self, mock_atb_needed
-+    ):
-+        mock_atb_needed.return_value = False
-+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False
-+        lib._check_if_atb_can_be_disabled(
-+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf,
-+            False, force=True
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+    def test_atb_no_need_was_disabled_atb_enabled_force(
-+        self, mock_atb_needed
-+    ):
-+        mock_atb_needed.return_value = False
-+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True
-+        lib._check_if_atb_can_be_disabled(
-+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf,
-+            False, force=True
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+    def test_atb_no_need_was_enable_atb_disabled_force(self, mock_atb_needed):
-+        mock_atb_needed.return_value = False
-+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False
-+        lib._check_if_atb_can_be_disabled(
-+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True,
-+            force=True
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+    def test_atb_no_need_was_enabled_atb_enabled_force(self, mock_atb_needed):
-+        mock_atb_needed.return_value = False
-+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True
-+        lib._check_if_atb_can_be_disabled(
-+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True,
-+            force=True
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+    def test_atb_needed_was_disabled_atb_disabled_force(
-+        self, mock_atb_needed
-+    ):
-+        mock_atb_needed.return_value = True
-+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False
-+        lib._check_if_atb_can_be_disabled(
-+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf,
-+            False, force=True
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+    def test_atb_needed_was_disabled_atb_enabled_force(self, mock_atb_needed):
-+        mock_atb_needed.return_value = True
-+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True
-+        lib._check_if_atb_can_be_disabled(
-+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf,
-+            False, force=True
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+    def test_atb_needed_was_enable_atb_disabled_force(self, mock_atb_needed):
-+        mock_atb_needed.return_value = True
-+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False
-+        lib._check_if_atb_can_be_disabled(
-+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True,
-+            force=True
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [(
-+                severity.WARNING,
-+                report_codes.COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD,
-+                {},
-+                None
-+            )]
-+        )
-+
-+    def test_atb_needed_was_enabled_atb_enabled_force(self, mock_atb_needed):
-+        mock_atb_needed.return_value = True
-+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True
-+        lib._check_if_atb_can_be_disabled(
-+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True,
-+            force=True
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+
-+@mock.patch("pcs.lib.commands.quorum._check_if_atb_can_be_disabled")
- @mock.patch.object(LibraryEnvironment, "push_corosync_conf")
- @mock.patch.object(LibraryEnvironment, "get_corosync_conf_data")
-+@mock.patch.object(LibraryEnvironment, "cmd_runner")
- class SetQuorumOptionsTest(TestCase, CmanMixin):
-     def setUp(self):
-         self.mock_logger = mock.MagicMock(logging.Logger)
-         self.mock_reporter = MockLibraryReportProcessor()
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
--    def test_disabled_on_cman(self, mock_get_corosync, mock_push_corosync):
-+    def test_disabled_on_cman(
-+        self, mock_runner, mock_get_corosync, mock_push_corosync, mock_check
-+    ):
-         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-         self.assert_disabled_on_cman(lambda: lib.set_options(lib_env, {}))
-         mock_get_corosync.assert_not_called()
-         mock_push_corosync.assert_not_called()
-+        mock_check.assert_not_called()
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
-     def test_enabled_on_cman_if_not_live(
--        self, mock_get_corosync, mock_push_corosync
-+        self, mock_runner, mock_get_corosync, mock_push_corosync, mock_check
-     ):
-         original_conf = "invalid {\nconfig: stop after cman test"
-         mock_get_corosync.return_value = original_conf
-@@ -182,11 +361,16 @@ class SetQuorumOptionsTest(TestCase, CmanMixin):
-         )
- 
-         mock_push_corosync.assert_not_called()
-+        mock_check.assert_not_called()
-+        mock_runner.assert_not_called()
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
--    def test_success(self, mock_get_corosync, mock_push_corosync):
-+    def test_success(
-+        self, mock_runner, mock_get_corosync, mock_push_corosync, mock_check
-+    ):
-         original_conf = open(rc("corosync-3nodes.conf")).read()
-         mock_get_corosync.return_value = original_conf
-+        mock_runner.return_value = "cmd_runner"
-         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
- 
-         new_options = {"wait_for_all": "1"}
-@@ -201,9 +385,16 @@ class SetQuorumOptionsTest(TestCase, CmanMixin):
-             )
-         )
-         self.assertEqual([], self.mock_reporter.report_item_list)
-+        self.assertEqual(1, mock_check.call_count)
-+        self.assertEqual("cmd_runner", mock_check.call_args[0][0])
-+        self.assertEqual(self.mock_reporter, mock_check.call_args[0][1])
-+        self.assertFalse(mock_check.call_args[0][3])
-+        self.assertFalse(mock_check.call_args[0][4])
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
--    def test_bad_options(self, mock_get_corosync, mock_push_corosync):
-+    def test_bad_options(
-+        self, mock_runner, mock_get_corosync, mock_push_corosync, mock_check
-+    ):
-         original_conf = open(rc("corosync.conf")).read()
-         mock_get_corosync.return_value = original_conf
-         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-@@ -228,9 +419,12 @@ class SetQuorumOptionsTest(TestCase, CmanMixin):
-         )
- 
-         mock_push_corosync.assert_not_called()
-+        mock_check.assert_not_called()
- 
-     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
--    def test_bad_config(self, mock_get_corosync, mock_push_corosync):
-+    def test_bad_config(
-+        self, mock_runner, mock_get_corosync, mock_push_corosync, mock_check
-+    ):
-         original_conf = "invalid {\nconfig: this is"
-         mock_get_corosync.return_value = original_conf
-         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-@@ -246,6 +440,7 @@ class SetQuorumOptionsTest(TestCase, CmanMixin):
-         )
- 
-         mock_push_corosync.assert_not_called()
-+        mock_check.assert_not_called()
- 
- 
- @mock.patch("pcs.lib.commands.quorum.corosync_live.get_quorum_status_text")
-diff --git a/pcs/test/test_lib_commands_sbd.py b/pcs/test/test_lib_commands_sbd.py
-index 9a96757..0663082 100644
---- a/pcs/test/test_lib_commands_sbd.py
-+++ b/pcs/test/test_lib_commands_sbd.py
-@@ -35,6 +35,15 @@ from pcs.lib.external import (
- import pcs.lib.commands.sbd as cmd_sbd
- 
- 
-+def _assert_equal_list_of_dictionaries_without_order(expected, actual):
-+    for item in actual:
-+        if item not in expected:
-+            raise AssertionError("Given but not expected: {0}".format(item))
-+    for item in expected:
-+        if item not in actual:
-+            raise AssertionError("Expected but not given: {0}".format(item))
-+
-+
- class CommandSbdTest(TestCase):
-     def setUp(self):
-         self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)
-@@ -234,7 +243,8 @@ class ValidateSbdOptionsTest(TestCase):
-             "SBD_STARTMODE": "clean",
-             "SBD_WATCHDOG_DEV": "/dev/watchdog",
-             "SBD_UNKNOWN": "",
--            "SBD_OPTS": "  "
-+            "SBD_OPTS": "  ",
-+            "SBD_PACEMAKER": "false",
-         }
- 
-         assert_report_item_list_equal(
-@@ -272,6 +282,90 @@ class ValidateSbdOptionsTest(TestCase):
-                         "allowed_str": self.allowed_sbd_options_str
-                     },
-                     None
-+                ),
-+                (
-+                    Severities.ERROR,
-+                    report_codes.INVALID_OPTION,
-+                    {
-+                        "option_name": "SBD_PACEMAKER",
-+                        "option_type": None,
-+                        "allowed": self.allowed_sbd_options,
-+                        "allowed_str": self.allowed_sbd_options_str
-+                    },
-+                    None
-+                )
-+            ]
-+        )
-+
-+    def test_watchdog_timeout_is_not_present(self):
-+        config = {
-+            "SBD_DELAY_START": "yes",
-+            "SBD_STARTMODE": "clean"
-+        }
-+        self.assertEqual([], cmd_sbd._validate_sbd_options(config))
-+
-+    def test_watchdog_timeout_is_nonnegative_int(self):
-+        config = {
-+            "SBD_WATCHDOG_TIMEOUT": "-1",
-+        }
-+
-+        assert_report_item_list_equal(
-+            cmd_sbd._validate_sbd_options(config),
-+            [
-+                (
-+                    Severities.ERROR,
-+                    report_codes.INVALID_OPTION_VALUE,
-+                    {
-+                        "option_name": "SBD_WATCHDOG_TIMEOUT",
-+                        "option_value": "-1",
-+                        "allowed_values": "nonnegative integer",
-+                        "allowed_values_str": "nonnegative integer",
-+                    },
-+                    None
-+                )
-+            ]
-+        )
-+
-+    def test_watchdog_timeout_is_not_int(self):
-+        config = {
-+            "SBD_WATCHDOG_TIMEOUT": "not int",
-+        }
-+
-+        assert_report_item_list_equal(
-+            cmd_sbd._validate_sbd_options(config),
-+            [
-+                (
-+                    Severities.ERROR,
-+                    report_codes.INVALID_OPTION_VALUE,
-+                    {
-+                        "option_name": "SBD_WATCHDOG_TIMEOUT",
-+                        "option_value": "not int",
-+                        "allowed_values": "nonnegative integer",
-+                        "allowed_values_str": "nonnegative integer",
-+                    },
-+                    None
-+                )
-+            ]
-+        )
-+
-+    def test_watchdog_timeout_is_none(self):
-+        config = {
-+            "SBD_WATCHDOG_TIMEOUT": None,
-+        }
-+
-+        assert_report_item_list_equal(
-+            cmd_sbd._validate_sbd_options(config),
-+            [
-+                (
-+                    Severities.ERROR,
-+                    report_codes.INVALID_OPTION_VALUE,
-+                    {
-+                        "option_name": "SBD_WATCHDOG_TIMEOUT",
-+                        "option_value": None,
-+                        "allowed_values": "nonnegative integer",
-+                        "allowed_values_str": "nonnegative integer",
-+                    },
-+                    None
-                 )
-             ]
-         )
-@@ -325,6 +419,35 @@ class GetFullWatchdogListTest(TestCase):
-             )
-         )
- 
-+    def test_invalid_watchdogs(self):
-+        watchdog_dict = {
-+            self.node_list[1].label: "",
-+            self.node_list[2].label: None,
-+            self.node_list[3].label: "not/abs/path",
-+            self.node_list[4].label: "/dev/watchdog"
-+
-+        }
-+        assert_raise_library_error(
-+            lambda: cmd_sbd._get_full_watchdog_list(
-+                self.node_list, "/dev/dog", watchdog_dict
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.WATCHDOG_INVALID,
-+                {"watchdog": ""}
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.WATCHDOG_INVALID,
-+                {"watchdog": None}
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.WATCHDOG_INVALID,
-+                {"watchdog": "not/abs/path"}
-+            )
-+        )
-+
- 
- @mock.patch("pcs.lib.commands.sbd._get_cluster_nodes")
- @mock.patch("pcs.lib.sbd.check_sbd")
-@@ -393,8 +516,7 @@ class GetClusterSbdStatusTest(CommandSbdTest):
-                 }
-             }
-         ]
--
--        self.assertEqual(
-+        _assert_equal_list_of_dictionaries_without_order(
-             expected, cmd_sbd.get_cluster_sbd_status(self.mock_env)
-         )
-         mock_get_nodes.assert_called_once_with(self.mock_env)
-@@ -447,7 +569,7 @@ class GetClusterSbdStatusTest(CommandSbdTest):
-             }
-         ]
- 
--        self.assertEqual(
-+        _assert_equal_list_of_dictionaries_without_order(
-             expected, cmd_sbd.get_cluster_sbd_status(self.mock_env)
-         )
-         mock_get_nodes.assert_called_once_with(self.mock_env)
-@@ -538,7 +660,7 @@ OPTION=   value
-             }
-         ]
- 
--        self.assertEqual(
-+        _assert_equal_list_of_dictionaries_without_order(
-             expected, cmd_sbd.get_cluster_sbd_config(self.mock_env)
-         )
-         mock_get_nodes.assert_called_once_with(self.mock_env)
-@@ -589,7 +711,7 @@ invalid value
-             }
-         ]
- 
--        self.assertEqual(
-+        _assert_equal_list_of_dictionaries_without_order(
-             expected, cmd_sbd.get_cluster_sbd_config(self.mock_env)
-         )
-         mock_get_nodes.assert_called_once_with(self.mock_env)
-diff --git a/pcs/test/test_lib_corosync_config_facade.py b/pcs/test/test_lib_corosync_config_facade.py
-index 4a35fd9..91f7b40 100644
---- a/pcs/test/test_lib_corosync_config_facade.py
-+++ b/pcs/test/test_lib_corosync_config_facade.py
-@@ -281,6 +281,34 @@ quorum {
-         self.assertFalse(facade.need_qdevice_reload)
- 
- 
-+class IsEnabledAutoTieBreaker(TestCase):
-+    def test_enabled(self):
-+        config = """\
-+quorum {
-+    auto_tie_breaker: 1
-+}
-+"""
-+        facade = lib.ConfigFacade.from_string(config)
-+        self.assertTrue(facade.is_enabled_auto_tie_breaker())
-+
-+    def test_disabled(self):
-+        config = """\
-+quorum {
-+    auto_tie_breaker: 0
-+}
-+"""
-+        facade = lib.ConfigFacade.from_string(config)
-+        self.assertFalse(facade.is_enabled_auto_tie_breaker())
-+
-+    def test_no_value(self):
-+        config = """\
-+quorum {
-+}
-+"""
-+        facade = lib.ConfigFacade.from_string(config)
-+        self.assertFalse(facade.is_enabled_auto_tie_breaker())
-+
-+
- class SetQuorumOptionsTest(TestCase):
-     def get_two_node(self, facade):
-         two_node = None
-diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py
-index a4ec0f9..b0ffdbb 100644
---- a/pcs/test/test_lib_external.py
-+++ b/pcs/test/test_lib_external.py
-@@ -1012,12 +1012,14 @@ Copyright (c) 2006-2009 Red Hat, Inc.
- 
- 
- @mock.patch("pcs.lib.external.is_systemctl")
-+@mock.patch("pcs.lib.external.is_service_installed")
- class DisableServiceTest(TestCase):
-     def setUp(self):
-         self.mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
-         self.service = "service_name"
- 
--    def test_systemctl(self, mock_systemctl):
-+    def test_systemctl(self, mock_is_installed, mock_systemctl):
-+        mock_is_installed.return_value = True
-         mock_systemctl.return_value = True
-         self.mock_runner.run.return_value = ("", 0)
-         lib.disable_service(self.mock_runner, self.service)
-@@ -1025,7 +1027,8 @@ class DisableServiceTest(TestCase):
-             ["systemctl", "disable", self.service + ".service"]
-         )
- 
--    def test_systemctl_failed(self, mock_systemctl):
-+    def test_systemctl_failed(self, mock_is_installed, mock_systemctl):
-+        mock_is_installed.return_value = True
-         mock_systemctl.return_value = True
-         self.mock_runner.run.return_value = ("", 1)
-         self.assertRaises(
-@@ -1036,7 +1039,6 @@ class DisableServiceTest(TestCase):
-             ["systemctl", "disable", self.service + ".service"]
-         )
- 
--    @mock.patch("pcs.lib.external.is_service_installed")
-     def test_not_systemctl(self, mock_is_installed, mock_systemctl):
-         mock_is_installed.return_value = True
-         mock_systemctl.return_value = False
-@@ -1046,7 +1048,6 @@ class DisableServiceTest(TestCase):
-             ["chkconfig", self.service, "off"]
-         )
- 
--    @mock.patch("pcs.lib.external.is_service_installed")
-     def test_not_systemctl_failed(self, mock_is_installed, mock_systemctl):
-         mock_is_installed.return_value = True
-         mock_systemctl.return_value = False
-@@ -1059,7 +1060,14 @@ class DisableServiceTest(TestCase):
-             ["chkconfig", self.service, "off"]
-         )
- 
--    @mock.patch("pcs.lib.external.is_service_installed")
-+    def test_systemctl_not_installed(
-+            self, mock_is_installed, mock_systemctl
-+    ):
-+        mock_is_installed.return_value = False
-+        mock_systemctl.return_value = True
-+        lib.disable_service(self.mock_runner, self.service)
-+        self.assertEqual(self.mock_runner.run.call_count, 0)
-+
-     def test_not_systemctl_not_installed(
-             self, mock_is_installed, mock_systemctl
-     ):
-@@ -1068,7 +1076,8 @@ class DisableServiceTest(TestCase):
-         lib.disable_service(self.mock_runner, self.service)
-         self.assertEqual(self.mock_runner.run.call_count, 0)
- 
--    def test_instance_systemctl(self, mock_systemctl):
-+    def test_instance_systemctl(self, mock_is_installed, mock_systemctl):
-+        mock_is_installed.return_value = True
-         mock_systemctl.return_value = True
-         self.mock_runner.run.return_value = ("", 0)
-         lib.disable_service(self.mock_runner, self.service, instance="test")
-@@ -1078,7 +1087,6 @@ class DisableServiceTest(TestCase):
-             "{0}@{1}.service".format(self.service, "test")
-         ])
- 
--    @mock.patch("pcs.lib.external.is_service_installed")
-     def test_instance_not_systemctl(self, mock_is_installed, mock_systemctl):
-         mock_is_installed.return_value = True
-         mock_systemctl.return_value = False
-diff --git a/pcs/test/test_lib_sbd.py b/pcs/test/test_lib_sbd.py
-index e3c1401..fd29484 100644
---- a/pcs/test/test_lib_sbd.py
-+++ b/pcs/test/test_lib_sbd.py
-@@ -28,6 +28,7 @@ from pcs.lib.external import (
-     NodeConnectionException,
- )
- import pcs.lib.sbd as lib_sbd
-+from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade
- 
- 
- class TestException(Exception):
-@@ -85,6 +86,246 @@ class RunParallelAndRaiseLibErrorOnFailureTest(TestCase):
-         )
- 
- 
-+@mock.patch("pcs.lib.sbd.is_sbd_installed")
-+@mock.patch("pcs.lib.sbd.is_sbd_enabled")
-+class IsAutoTieBreakerNeededTest(TestCase):
-+    def setUp(self):
-+        self.runner = "runner"
-+        self.mock_corosync_conf = mock.MagicMock(spec_set=CorosyncConfigFacade)
-+
-+    def _set_ret_vals(self, nodes, qdevice):
-+        self.mock_corosync_conf.get_nodes.return_value = nodes
-+        self.mock_corosync_conf.has_quorum_device.return_value = qdevice
-+
-+    def test_sbd_enabled_even_nodes_has_qdevice(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = True
-+        mock_installed.return_value = True
-+        self._set_ret_vals([1, 2], True)
-+        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf
-+        ))
-+
-+    def test_sbd_enabled_even_nodes_no_qdevice(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = True
-+        mock_installed.return_value = True
-+        self._set_ret_vals([1, 2], False)
-+        self.assertTrue(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf
-+        ))
-+
-+    def test_sbd_not_installed_even_nodes_no_qdevice(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = False
-+        mock_installed.return_value = False
-+        self._set_ret_vals([1, 2], False)
-+        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf
-+        ))
-+
-+    def test_sbd_enabled_odd_nodes_has_qdevice(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = True
-+        mock_installed.return_value = True
-+        self._set_ret_vals([1, 2, 3], True)
-+        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf
-+        ))
-+
-+    def test_sbd_enabled_odd_nodes_no_qdevice(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = True
-+        mock_installed.return_value = True
-+        self._set_ret_vals([1, 2, 3], False)
-+        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf
-+        ))
-+
-+    def test_sbd_disabled_even_nodes_has_qdevice(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = False
-+        mock_installed.return_value = True
-+        self._set_ret_vals([1, 2], True)
-+        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf
-+        ))
-+
-+    def test_sbd_disabled_even_nodes_no_qdevice(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = False
-+        mock_installed.return_value = True
-+        self._set_ret_vals([1, 2], False)
-+        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf
-+        ))
-+
-+    def test_sbd_disabled_odd_nodes_has_qdevice(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = False
-+        mock_installed.return_value = True
-+        self._set_ret_vals([1, 2, 3], True)
-+        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf
-+        ))
-+
-+    def test_sbd_disabled_odd_nodes_no_qdevice(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = False
-+        mock_installed.return_value = True
-+        self._set_ret_vals([1, 2, 3], False)
-+        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf
-+        ))
-+
-+    def test_sbd_enabled_odd_nodes_no_qdevice_plus_node(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = True
-+        mock_installed.return_value = True
-+        self._set_ret_vals([1, 2, 3], False)
-+        self.assertTrue(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf, 1
-+        ))
-+
-+    def test_sbd_not_installed_odd_nodes_no_qdevice_plus_node(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = False
-+        mock_installed.return_value = False
-+        self._set_ret_vals([1, 2, 3], False)
-+        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf, 1
-+        ))
-+
-+    def test_sbd_enabled_odd_nodes_no_qdevice_minus_node(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = True
-+        mock_installed.return_value = True
-+        self._set_ret_vals([1, 2, 3], False)
-+        self.assertTrue(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf, -1
-+        ))
-+
-+    def test_sbd_enabled_odd_nodes_no_qdevice_plus_2_nodes(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = True
-+        mock_installed.return_value = True
-+        self._set_ret_vals([1, 2, 3], False)
-+        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf, 2
-+        ))
-+
-+    def test_sbd_enabled_odd_nodes_no_qdevice_minus_2_nodes(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = True
-+        mock_installed.return_value = True
-+        self._set_ret_vals([1, 2, 3], False)
-+        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf, -2
-+        ))
-+
-+    def test_sbd_enabled_even_nodes_no_qdevice_plus_node(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = True
-+        mock_installed.return_value = True
-+        self._set_ret_vals([1, 2], False)
-+        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf, 1
-+        ))
-+
-+    def test_sbd_enabled_even_nodes_no_qdevice_minus_node(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = True
-+        mock_installed.return_value = True
-+        self._set_ret_vals([1, 2], False)
-+        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf, -1
-+        ))
-+
-+    def test_sbd_enabled_even_nodes_no_qdevice_plus_2_nodes(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = True
-+        mock_installed.return_value = True
-+        self._set_ret_vals([1, 2], False)
-+        self.assertTrue(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf, 2
-+        ))
-+
-+    def test_sbd_enabled_even_nodes_no_qdevice_minus_2_nodes(
-+        self, mock_enabled, mock_installed
-+    ):
-+        mock_enabled.return_value = True
-+        mock_installed.return_value = True
-+        self._set_ret_vals([1, 2, 3, 4], False)
-+        self.assertTrue(lib_sbd.is_auto_tie_breaker_needed(
-+            self.runner, self.mock_corosync_conf, -2
-+        ))
-+
-+
-+@mock.patch("pcs.lib.sbd.is_auto_tie_breaker_needed")
-+class AtbHasToBeEnabledTest(TestCase):
-+    def setUp(self):
-+        self.mock_runner = "runner"
-+        self.mock_conf = mock.MagicMock(spec_set=CorosyncConfigFacade)
-+
-+    def test_atb_needed_is_enabled(self, mock_is_needed):
-+        mock_is_needed.return_value = True
-+        self.mock_conf.is_enabled_auto_tie_breaker.return_value = True
-+        self.assertFalse(lib_sbd.atb_has_to_be_enabled(
-+            self.mock_runner, self.mock_conf, 1
-+        ))
-+        mock_is_needed.assert_called_once_with(
-+            self.mock_runner, self.mock_conf, 1
-+        )
-+
-+    def test_atb_needed_is_disabled(self, mock_is_needed):
-+        mock_is_needed.return_value = True
-+        self.mock_conf.is_enabled_auto_tie_breaker.return_value = False
-+        self.assertTrue(lib_sbd.atb_has_to_be_enabled(
-+            self.mock_runner, self.mock_conf, -1
-+        ))
-+        mock_is_needed.assert_called_once_with(
-+            self.mock_runner, self.mock_conf, -1
-+        )
-+
-+    def test_atb_not_needed_is_enabled(self, mock_is_needed):
-+        mock_is_needed.return_value = False
-+        self.mock_conf.is_enabled_auto_tie_breaker.return_value = True
-+        self.assertFalse(lib_sbd.atb_has_to_be_enabled(
-+            self.mock_runner, self.mock_conf, 2
-+        ))
-+        mock_is_needed.assert_called_once_with(
-+            self.mock_runner, self.mock_conf, 2
-+        )
-+
-+    def test_atb_not_needed_is_disabled(self, mock_is_needed):
-+        mock_is_needed.return_value = False
-+        self.mock_conf.is_enabled_auto_tie_breaker.return_value = False
-+        self.assertFalse(lib_sbd.atb_has_to_be_enabled(
-+            self.mock_runner, self.mock_conf, -2
-+        ))
-+        mock_is_needed.assert_called_once_with(
-+            self.mock_runner, self.mock_conf, -2
-+        )
-+
-+
-+
- class CheckSbdTest(TestCase):
-     def test_success(self):
-         mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-@@ -316,11 +557,11 @@ class SetSbdConfigOnNodeTest(TestCase):
-         }
-         cfg_out = """# This file has been generated by pcs.
- SBD_OPTS="-n node1"
--SBD_WATCHDOG_DEV=/dev/watchdog
-+SBD_WATCHDOG_DEV=/my/watchdog
- SBD_WATCHDOG_TIMEOUT=0
- """
-         lib_sbd.set_sbd_config_on_node(
--            self.mock_rep, self.mock_com, self.node, cfg_in
-+            self.mock_rep, self.mock_com, self.node, cfg_in, "/my/watchdog"
-         )
-         mock_set_sbd_cfg.assert_called_once_with(
-             self.mock_com, self.node, cfg_out
-@@ -340,17 +581,24 @@ class SetSbdConfigOnAllNodesTest(TestCase):
-     def test_success(self, mock_func):
-         mock_com = mock.MagicMock(spec_set=NodeCommunicator)
-         mock_rep = MockLibraryReportProcessor()
--        node_list = [NodeAddresses("node" + str(i)) for i in range(5)]
-+        watchdog_dict = dict([
-+            (NodeAddresses("node" + str(i)), "/dev/watchdog" + str(i))
-+            for i in range(5)
-+        ])
-+        node_list = list(watchdog_dict.keys())
-         config = {
-             "opt1": "val1",
-             "opt2": "val2"
-         }
-         lib_sbd.set_sbd_config_on_all_nodes(
--            mock_rep, mock_com, node_list, config
-+            mock_rep, mock_com, node_list, config, watchdog_dict
-         )
-         mock_func.assert_called_once_with(
-             lib_sbd.set_sbd_config_on_node,
--            [([mock_rep, mock_com, node, config], {}) for node in node_list]
-+            [
-+                ([mock_rep, mock_com, node, config, watchdog_dict[node]], {})
-+                for node in node_list
-+            ]
-         )
- 
- 
-@@ -594,3 +842,17 @@ class IsSbdEnabledTest(TestCase):
-         mock_obj = mock.MagicMock()
-         mock_is_service_enabled.return_value = True
-         self.assertTrue(lib_sbd.is_sbd_enabled(mock_obj))
-+
-+
-+@mock.patch("pcs.lib.external.is_service_installed")
-+class IsSbdInstalledTest(TestCase):
-+    def test_installed(self, mock_is_service_installed):
-+        mock_obj = mock.MagicMock()
-+        mock_is_service_installed.return_value = True
-+        self.assertTrue(lib_sbd.is_sbd_installed(mock_obj))
-+
-+    def test_not_installed(self, mock_is_service_installed):
-+        mock_obj = mock.MagicMock()
-+        mock_is_service_installed.return_value = False
-+        self.assertFalse(lib_sbd.is_sbd_installed(mock_obj))
-+
-diff --git a/pcs/usage.py b/pcs/usage.py
-index b11a5fa..9ebbca9 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -683,6 +683,7 @@ Commands:
-         the whole CIB or be warned in the case of outdated CIB.
- 
-     node add <node[,node-altaddr]> [--start [--wait[=<n>]]] [--enable]
-+            [--watchdog=<watchdog-path>]
-         Add the node to corosync.conf and corosync on all nodes in the cluster
-         and sync the new corosync.conf to the new node.  If --start is
-         specified also start corosync/pacemaker on the new node, if --wait is
-@@ -690,6 +691,8 @@ Commands:
-         is specified enable corosync/pacemaker on new node.
-         When using Redundant Ring Protocol (RRP) with udpu transport, specify
-         the ring 0 address first followed by a ',' and then the ring 1 address.
-+        Use --watchdog to specify path to watchdog on newly added node, when SBD
-+        is enabled in cluster.
- 
-     node remove <node>
-         Shutdown specified node and remove it from pacemaker and corosync on
-diff --git a/pcs/utils.py b/pcs/utils.py
-index 53cc0b0..a7ff7ca 100644
---- a/pcs/utils.py
-+++ b/pcs/utils.py
-@@ -32,7 +32,7 @@ from pcs.cli.common.reports import (
-     LibraryReportProcessorToConsole as LibraryReportProcessorToConsole,
- )
- from pcs.common.tools import simple_cache
--from pcs.lib import reports
-+from pcs.lib import reports, sbd
- from pcs.lib.env import LibraryEnvironment
- from pcs.lib.errors import LibraryError
- from pcs.lib.external import (
-@@ -574,6 +574,23 @@ def getCorosyncActiveNodes():
- 
-     return nodes_active
- 
-+
-+def _enable_auto_tie_breaker_for_sbd(corosync_conf):
-+    """
-+    Enable auto tie breaker in specified corosync conf if it is needed by SBD.
-+
-+    corosync_conf -- parsed corosync conf
-+    """
-+    try:
-+        corosync_facade = corosync_conf_facade(corosync_conf)
-+        if sbd.atb_has_to_be_enabled(cmd_runner(), corosync_facade):
-+            corosync_facade.set_quorum_options(
-+                get_report_processor(), {"auto_tie_breaker": "1"}
-+            )
-+    except LibraryError as e:
-+        process_library_reports(e.args)
-+
-+
- # Add node specified to corosync.conf and reload corosync.conf (if running)
- def addNodeToCorosync(node):
- # Before adding, make sure node isn't already in corosync.conf
-@@ -600,6 +617,9 @@ def addNodeToCorosync(node):
-         new_node.add_attribute("ring1_addr", node1)
-     new_node.add_attribute("nodeid", new_nodeid)
- 
-+    # enable ATB if it's needed
-+    _enable_auto_tie_breaker_for_sbd(corosync_conf)
-+
-     corosync_conf = autoset_2node_corosync(corosync_conf)
-     setCorosyncConf(str(corosync_conf))
-     return True
-@@ -667,6 +687,9 @@ def removeNodeFromCorosync(node):
-                     removed_node = True
- 
-     if removed_node:
-+        # enable ATB if it's needed
-+        _enable_auto_tie_breaker_for_sbd(corosync_conf)
-+
-         corosync_conf = autoset_2node_corosync(corosync_conf)
-         setCorosyncConf(str(corosync_conf))
- 
-diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
-index d46cd62..137bb3d 100644
---- a/pcsd/pcs.rb
-+++ b/pcsd/pcs.rb
-@@ -1995,14 +1995,14 @@ def enable_service(service)
- end
- 
- def disable_service(service)
-+  # fails when the service is not installed, so we need to check it beforehand
-+  if not is_service_installed?(service)
-+    return true
-+  end
-+
-   if ISSYSTEMCTL
--    # returns success even if the service is not installed
-     cmd = ['systemctl', 'disable', "#{service}.service"]
-   else
--    if not is_service_installed?(service)
--      return true
--    end
--    # fails when the service is not installed, so we need to check it beforehand
-     cmd = ['chkconfig', service, 'off']
-   end
-   _, _, retcode = run_cmd(PCSAuth.getSuperuserAuth(), *cmd)
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1164402-03-sbd-fix-check-if-atb-is-required-when-enabling-sbd.patch b/SOURCES/bz1164402-03-sbd-fix-check-if-atb-is-required-when-enabling-sbd.patch
deleted file mode 100644
index 41f8110..0000000
--- a/SOURCES/bz1164402-03-sbd-fix-check-if-atb-is-required-when-enabling-sbd.patch
+++ /dev/null
@@ -1,354 +0,0 @@
-From 66b5e393aebd84b08047f33d09bc4cbce730e205 Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Tue, 23 Aug 2016 11:19:20 +0200
-Subject: [PATCH] sbd: fix check if ATB is required when enabling sbd
-
----
- pcs/common/report_codes.py |   1 +
- pcs/lib/commands/sbd.py    |   3 +-
- pcs/lib/reports.py         |  12 +++
- pcs/lib/sbd.py             |  39 ++++++++-
- pcs/test/test_lib_sbd.py   | 193 +++++++--------------------------------------
- 5 files changed, 80 insertions(+), 168 deletions(-)
-
-diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
-index 5e46a1f..e6a86ec 100644
---- a/pcs/common/report_codes.py
-+++ b/pcs/common/report_codes.py
-@@ -155,6 +155,7 @@ SBD_DISABLING_STARTED = "SBD_DISABLING_STARTED"
- SBD_ENABLING_STARTED = "SBD_ENABLING_STARTED"
- SBD_NOT_INSTALLED = "SBD_NOT_INSTALLED"
- SBD_NOT_ENABLED = "SBD_NOT_ENABLED"
-+SBD_REQUIRES_ATB = "SBD_REQUIRES_ATB"
- SERVICE_DISABLE_ERROR = "SERVICE_DISABLE_ERROR"
- SERVICE_DISABLE_STARTED = "SERVICE_DISABLE_STARTED"
- SERVICE_DISABLE_SUCCESS = "SERVICE_DISABLE_SUCCESS"
-diff --git a/pcs/lib/commands/sbd.py b/pcs/lib/commands/sbd.py
-index 265ebb5..2acb104 100644
---- a/pcs/lib/commands/sbd.py
-+++ b/pcs/lib/commands/sbd.py
-@@ -159,7 +159,8 @@ def enable_sbd(
- 
-     # enable ATB if needed
-     corosync_conf = lib_env.get_corosync_conf()
--    if sbd.atb_has_to_be_enabled(lib_env.cmd_runner(), corosync_conf):
-+    if sbd.atb_has_to_be_enabled_pre_enable_check(corosync_conf):
-+        lib_env.report_processor.process(reports.sbd_requires_atb())
-         corosync_conf.set_quorum_options(
-             lib_env.report_processor, {"auto_tie_breaker": "1"}
-         )
-diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
-index 568bb7e..a701679 100644
---- a/pcs/lib/reports.py
-+++ b/pcs/lib/reports.py
-@@ -1928,3 +1928,15 @@ def quorum_cannot_disable_atb_due_to_sbd(
-         "unable to disable auto_tie_breaker: SBD fencing will have no effect",
-         forceable=forceable
-     )
-+
-+
-+def sbd_requires_atb():
-+    """
-+    Warning that ATB will be enabled in order to make SBD fencing effective.
-+    """
-+    return ReportItem.warning(
-+        report_codes.SBD_REQUIRES_ATB,
-+        "auto_tie_breaker quorum option will be enabled to make SBD fencing "
-+        "effective. Cluster has to be offline to be able to make this change."
-+    )
-+
-diff --git a/pcs/lib/sbd.py b/pcs/lib/sbd.py
-index c9f013b..39de740 100644
---- a/pcs/lib/sbd.py
-+++ b/pcs/lib/sbd.py
-@@ -46,6 +46,25 @@ def _run_parallel_and_raise_lib_error_on_failure(func, param_list):
-         raise LibraryError(*report_list)
- 
- 
-+def _even_number_of_nodes_and_no_qdevice(
-+    corosync_conf_facade, node_number_modifier=0
-+):
-+    """
-+    Returns True whenever cluster has no quorum device configured and number of
-+    nodes + node_number_modifier is even number, False otherwise.
-+
-+    corosync_conf_facade --
-+    node_number_modifier -- this value will be added to current number of nodes.
-+        This can be useful to test whenever is ATB needed when adding/removing
-+        node.
-+    """
-+    return (
-+        not corosync_conf_facade.has_quorum_device()
-+        and
-+        (len(corosync_conf_facade.get_nodes()) + node_number_modifier) % 2 == 0
-+    )
-+
-+
- def is_auto_tie_breaker_needed(
-     runner, corosync_conf_facade, node_number_modifier=0
- ):
-@@ -60,15 +79,29 @@ def is_auto_tie_breaker_needed(
-         node.
-     """
-     return (
--        not corosync_conf_facade.has_quorum_device()
--        and
--        (len(corosync_conf_facade.get_nodes()) + node_number_modifier) % 2 == 0
-+        _even_number_of_nodes_and_no_qdevice(
-+            corosync_conf_facade, node_number_modifier
-+        )
-         and
-         is_sbd_installed(runner)
-         and
-         is_sbd_enabled(runner)
-     )
- 
-+
-+def atb_has_to_be_enabled_pre_enable_check(corosync_conf_facade):
-+    """
-+    Returns True whenever quorum option auto_tie_breaker is needed to be enabled
-+    for proper working of SBD fencing. False if it is not needed. This function
-+    doesn't check if sbd is installed nor enabled.
-+     """
-+    return (
-+        not corosync_conf_facade.is_enabled_auto_tie_breaker()
-+        and
-+        _even_number_of_nodes_and_no_qdevice(corosync_conf_facade)
-+    )
-+
-+
- def atb_has_to_be_enabled(runner, corosync_conf_facade, node_number_modifier=0):
-     """
-     Return True whenever quorum option auto tie breaker has to be enabled for
-diff --git a/pcs/test/test_lib_sbd.py b/pcs/test/test_lib_sbd.py
-index fd29484..516e0bd 100644
---- a/pcs/test/test_lib_sbd.py
-+++ b/pcs/test/test_lib_sbd.py
-@@ -86,195 +86,60 @@ class RunParallelAndRaiseLibErrorOnFailureTest(TestCase):
-         )
- 
- 
--@mock.patch("pcs.lib.sbd.is_sbd_installed")
--@mock.patch("pcs.lib.sbd.is_sbd_enabled")
--class IsAutoTieBreakerNeededTest(TestCase):
-+class EvenNumberOfNodesAndNoQdevice(TestCase):
-     def setUp(self):
--        self.runner = "runner"
-         self.mock_corosync_conf = mock.MagicMock(spec_set=CorosyncConfigFacade)
- 
-     def _set_ret_vals(self, nodes, qdevice):
-         self.mock_corosync_conf.get_nodes.return_value = nodes
-         self.mock_corosync_conf.has_quorum_device.return_value = qdevice
- 
--    def test_sbd_enabled_even_nodes_has_qdevice(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = True
--        mock_installed.return_value = True
--        self._set_ret_vals([1, 2], True)
--        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf
--        ))
--
--    def test_sbd_enabled_even_nodes_no_qdevice(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = True
--        mock_installed.return_value = True
--        self._set_ret_vals([1, 2], False)
--        self.assertTrue(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf
--        ))
--
--    def test_sbd_not_installed_even_nodes_no_qdevice(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = False
--        mock_installed.return_value = False
-+    def test_even_num_no_qdevice(self):
-         self._set_ret_vals([1, 2], False)
--        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf
--        ))
--
--    def test_sbd_enabled_odd_nodes_has_qdevice(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = True
--        mock_installed.return_value = True
--        self._set_ret_vals([1, 2, 3], True)
--        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf
--        ))
--
--    def test_sbd_enabled_odd_nodes_no_qdevice(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = True
--        mock_installed.return_value = True
--        self._set_ret_vals([1, 2, 3], False)
--        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf
-+        self.assertTrue(lib_sbd._even_number_of_nodes_and_no_qdevice(
-+            self.mock_corosync_conf
-         ))
- 
--    def test_sbd_disabled_even_nodes_has_qdevice(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = False
--        mock_installed.return_value = True
-+    def test_even_num_qdevice(self):
-         self._set_ret_vals([1, 2], True)
--        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf
--        ))
--
--    def test_sbd_disabled_even_nodes_no_qdevice(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = False
--        mock_installed.return_value = True
--        self._set_ret_vals([1, 2], False)
--        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf
-+        self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice(
-+            self.mock_corosync_conf
-         ))
- 
--    def test_sbd_disabled_odd_nodes_has_qdevice(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = False
--        mock_installed.return_value = True
--        self._set_ret_vals([1, 2, 3], True)
--        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf
--        ))
--
--    def test_sbd_disabled_odd_nodes_no_qdevice(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = False
--        mock_installed.return_value = True
-+    def test_odd_num_no_qdevice(self):
-         self._set_ret_vals([1, 2, 3], False)
--        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf
-+        self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice(
-+            self.mock_corosync_conf
-         ))
- 
--    def test_sbd_enabled_odd_nodes_no_qdevice_plus_node(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = True
--        mock_installed.return_value = True
--        self._set_ret_vals([1, 2, 3], False)
--        self.assertTrue(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf, 1
--        ))
--
--    def test_sbd_not_installed_odd_nodes_no_qdevice_plus_node(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = False
--        mock_installed.return_value = False
--        self._set_ret_vals([1, 2, 3], False)
--        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf, 1
--        ))
--
--    def test_sbd_enabled_odd_nodes_no_qdevice_minus_node(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = True
--        mock_installed.return_value = True
--        self._set_ret_vals([1, 2, 3], False)
--        self.assertTrue(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf, -1
--        ))
--
--    def test_sbd_enabled_odd_nodes_no_qdevice_plus_2_nodes(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = True
--        mock_installed.return_value = True
--        self._set_ret_vals([1, 2, 3], False)
--        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf, 2
--        ))
--
--    def test_sbd_enabled_odd_nodes_no_qdevice_minus_2_nodes(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = True
--        mock_installed.return_value = True
--        self._set_ret_vals([1, 2, 3], False)
--        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf, -2
-+    def test_odd_num_qdevice(self):
-+        self._set_ret_vals([1, 2, 3], True)
-+        self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice(
-+            self.mock_corosync_conf
-         ))
- 
--    def test_sbd_enabled_even_nodes_no_qdevice_plus_node(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = True
--        mock_installed.return_value = True
-+    def test_even_num_no_qdevice_plus_one(self):
-         self._set_ret_vals([1, 2], False)
--        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf, 1
-+        self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice(
-+            self.mock_corosync_conf, 1
-         ))
- 
--    def test_sbd_enabled_even_nodes_no_qdevice_minus_node(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = True
--        mock_installed.return_value = True
--        self._set_ret_vals([1, 2], False)
--        self.assertFalse(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf, -1
-+    def test_even_num_qdevice_plus_one(self):
-+        self._set_ret_vals([1, 2], True)
-+        self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice(
-+            self.mock_corosync_conf, 1
-         ))
- 
--    def test_sbd_enabled_even_nodes_no_qdevice_plus_2_nodes(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = True
--        mock_installed.return_value = True
--        self._set_ret_vals([1, 2], False)
--        self.assertTrue(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf, 2
-+    def test_odd_num_no_qdevice_plus_one(self):
-+        self._set_ret_vals([1, 2, 3], False)
-+        self.assertTrue(lib_sbd._even_number_of_nodes_and_no_qdevice(
-+            self.mock_corosync_conf, 1
-         ))
- 
--    def test_sbd_enabled_even_nodes_no_qdevice_minus_2_nodes(
--        self, mock_enabled, mock_installed
--    ):
--        mock_enabled.return_value = True
--        mock_installed.return_value = True
--        self._set_ret_vals([1, 2, 3, 4], False)
--        self.assertTrue(lib_sbd.is_auto_tie_breaker_needed(
--            self.runner, self.mock_corosync_conf, -2
-+    def test_odd_num_qdevice_plus_one(self):
-+        self._set_ret_vals([1, 2, 3], True)
-+        self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice(
-+            self.mock_corosync_conf, 1
-         ))
- 
- 
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1165821-01-pcs-CLI-GUI-should-be-capable-of.patch b/SOURCES/bz1165821-01-pcs-CLI-GUI-should-be-capable-of.patch
new file mode 100644
index 0000000..3cf3446
--- /dev/null
+++ b/SOURCES/bz1165821-01-pcs-CLI-GUI-should-be-capable-of.patch
@@ -0,0 +1,240 @@
+From 75488b2abdedb58715a21e365573a64e4ab1c324 Mon Sep 17 00:00:00 2001
+From: Ondrej Mular <omular@redhat.com>
+Date: Tue, 30 May 2017 16:47:55 +0200
+Subject: [PATCH] squash bz1165821 pcs CLI/GUI should be capable of
+
+e60e02d store binary data in the corosync authkey file
+
+bf45303 cli: add option --no-hardened to 'cluster setup'
+
+97dff2f web UI: add option to create not hardened cluster
+---
+ pcs/cli/common/parse_args.py |  2 +-
+ pcs/cluster.py               | 23 ++++++++++++++++-------
+ pcs/lib/tools.py             |  5 ++++-
+ pcs/pcs.8                    |  4 +++-
+ pcs/usage.py                 |  3 +++
+ pcs/utils.py                 |  1 +
+ pcsd/pcs.rb                  |  1 +
+ pcsd/pcsd.rb                 |  3 ++-
+ pcsd/remote.rb               |  3 +++
+ pcsd/views/manage.erb        |  9 +++++++++
+ 10 files changed, 43 insertions(+), 11 deletions(-)
+
+diff --git a/pcs/cli/common/parse_args.py b/pcs/cli/common/parse_args.py
+index e2250c7..5b87fbc 100644
+--- a/pcs/cli/common/parse_args.py
++++ b/pcs/cli/common/parse_args.py
+@@ -32,7 +32,7 @@ PCS_LONG_OPTIONS = [
+     "miss_count_const=", "fail_recv_const=",
+     "corosync_conf=", "cluster_conf=",
+     "booth-conf=", "booth-key=",
+-    "remote", "watchdog=", "device=",
++    "remote", "watchdog=", "device=", "no-hardened",
+     #in pcs status - do not display resorce status on inactive node
+     "hide-inactive",
+     # pcs resource (un)manage - enable or disable monitor operations
+diff --git a/pcs/cluster.py b/pcs/cluster.py
+index 0fc5e2c..0a9289b 100644
+--- a/pcs/cluster.py
++++ b/pcs/cluster.py
+@@ -70,7 +70,11 @@ from pcs.lib.node import NodeAddresses, NodeAddressesList
+ from pcs.lib.nodes_task import check_corosync_offline_on_nodes, distribute_files
+ from pcs.lib import node_communication_format
+ import pcs.lib.pacemaker.live as lib_pacemaker
+-from pcs.lib.tools import environment_file_to_dict, generate_key
++from pcs.lib.tools import (
++    environment_file_to_dict,
++    generate_binary_key,
++    generate_key,
++)
+ 
+ def cluster_cmd(argv):
+     if len(argv) == 0:
+@@ -381,7 +385,8 @@ def cluster_setup(argv):
+             node_list,
+             options["transport_options"],
+             options["totem_options"],
+-            options["quorum_options"]
++            options["quorum_options"],
++            modifiers["hardened"]
+         )
+     process_library_reports(messages)
+ 
+@@ -453,11 +458,12 @@ def cluster_setup(argv):
+             file_definitions.update(
+                 node_communication_format.pcmk_authkey_file(generate_key())
+             )
+-            file_definitions.update(
+-                node_communication_format.corosync_authkey_file(
+-                    generate_key(random_bytes_count=128)
++            if modifiers["hardened"]:
++                file_definitions.update(
++                    node_communication_format.corosync_authkey_file(
++                        generate_binary_key(random_bytes_count=128)
++                    )
+                 )
+-            )
+ 
+             distribute_files(
+                 lib_env.node_communicator(),
+@@ -736,7 +742,8 @@ def cluster_setup_parse_options_cman(options, force=False):
+     return parsed, messages
+ 
+ def cluster_setup_create_corosync_conf(
+-    cluster_name, node_list, transport_options, totem_options, quorum_options
++    cluster_name, node_list, transport_options, totem_options, quorum_options,
++    is_hardened
+ ):
+     messages = []
+ 
+@@ -752,6 +759,8 @@ def cluster_setup_create_corosync_conf(
+ 
+     totem_section.add_attribute("version", "2")
+     totem_section.add_attribute("cluster_name", cluster_name)
++    if not is_hardened:
++        totem_section.add_attribute("secauth", "off")
+ 
+     transport_options_names = (
+         "transport",
+diff --git a/pcs/lib/tools.py b/pcs/lib/tools.py
+index cd2d7f9..b9d7505 100644
+--- a/pcs/lib/tools.py
++++ b/pcs/lib/tools.py
+@@ -9,7 +9,10 @@ import os
+ 
+ 
+ def generate_key(random_bytes_count=32):
+-    return binascii.hexlify(os.urandom(random_bytes_count))
++    return binascii.hexlify(generate_binary_key(random_bytes_count))
++
++def generate_binary_key(random_bytes_count):
++    return os.urandom(random_bytes_count)
+ 
+ def environment_file_to_dict(config):
+     """
+diff --git a/pcs/pcs.8 b/pcs/pcs.8
+index 4edfc72..aee8b3a 100644
+--- a/pcs/pcs.8
++++ b/pcs/pcs.8
+@@ -205,7 +205,7 @@ Add specified utilization options to specified resource. If resource is not spec
+ auth [node] [...] [\fB\-u\fR username] [\fB\-p\fR password] [\fB\-\-force\fR] [\fB\-\-local\fR]
+ Authenticate pcs to pcsd on nodes specified, or on all nodes configured in the local cluster if no nodes are specified (authorization tokens are stored in ~/.pcs/tokens or /var/lib/pcsd/tokens for root). By default all nodes are also authenticated to each other, using \fB\-\-local\fR only authenticates the local node (and does not authenticate the remote nodes with each other). Using \fB\-\-force\fR forces re\-authentication to occur.
+ .TP
+-setup [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1\-altaddr]> [<node2[,node2\-altaddr]>] [...] [\fB\-\-transport\fR udpu|udp] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] | [\fB\-\-broadcast1\fR]]]] [\fB\-\-wait_for_all\fR=<0|1>] [\fB\-\-auto_tie_breaker\fR=<0|1>] [\fB\-\-last_man_standing\fR=<0|1> [\fB\-\-last_man_standing_window\fR=<time in ms>]] [\fB\-\-ipv6\fR] [\fB\-\-token\fR <timeout>] [\fB\-\-token_coefficient\fR <timeout>] [\fB\-\-join\fR <timeout>] [\fB\-\-consensus\fR <timeout>] [\fB\-\-miss_count_const\fR <count>] [\fB\-\-fail_recv_const\fR <failures>]
++setup [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1\-altaddr]> [<node2[,node2\-altaddr]>] [...] [\fB\-\-transport\fR udpu|udp] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] | [\fB\-\-broadcast1\fR]]]] [\fB\-\-wait_for_all\fR=<0|1>] [\fB\-\-auto_tie_breaker\fR=<0|1>] [\fB\-\-last_man_standing\fR=<0|1> [\fB\-\-last_man_standing_window\fR=<time in ms>]] [\fB\-\-ipv6\fR] [\fB\-\-token\fR <timeout>] [\fB\-\-token_coefficient\fR <timeout>] [\fB\-\-join\fR <timeout>] [\fB\-\-consensus\fR <timeout>] [\fB\-\-miss_count_const\fR <count>] [\fB\-\-fail_recv_const\fR <failures>] [\fB\-\-no\-hardened\fR]
+ Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR will only perform changes on the local node, \fB\-\-start\fR will also start the cluster on the specified nodes, \fB\-\-wait\fR will wait up to 'n' seconds for the nodes to start, \fB\-\-enable\fR will enable corosync and pacemaker on node startup, \fB\-\-transport\fR allows specification of corosync transport (default: udpu; udp for CMAN clusters), \fB\-\-rrpmode\fR allows you to set the RRP mode of the system. Currently only 'passive' is supported or tested (using 'active' is not recommended). The \fB\-\-wait_for_all\fR, \fB\-\-auto_tie_breaker\fR, \fB\-\-last_man_standing\fR, \fB\-\-last_man_standing_window\fR options are all documented in corosync's votequorum(5) man page. These options are not supported on CMAN clusters.
+ 
+ \fB\-\-ipv6\fR will configure corosync to use ipv6 (instead of ipv4).  This option is not supported on CMAN clusters.
+@@ -222,6 +222,8 @@ Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR w
+ 
+ \fB\-\-fail_recv_const\fR <failures> specifies how many rotations of the token without receiving any messages when messages should be received may occur before a new configuration is formed (default 2500 failures)
+ 
++If \fB\-\-no\-hardened\fR is specified, the cluster will be set up in way that all corosync communication will be encrypted.
++
+ 
+ Configuring Redundant Ring Protocol (RRP)
+ 
+diff --git a/pcs/usage.py b/pcs/usage.py
+index c73a103..c1ab00f 100644
+--- a/pcs/usage.py
++++ b/pcs/usage.py
+@@ -576,6 +576,7 @@ Commands:
+             [--ipv6] [--token <timeout>] [--token_coefficient <timeout>]
+             [--join <timeout>] [--consensus <timeout>]
+             [--miss_count_const <count>] [--fail_recv_const <failures>]
++            [--no-hardened]
+         Configure corosync and sync configuration out to listed nodes.
+         --local will only perform changes on the local node,
+         --start will also start the cluster on the specified nodes,
+@@ -611,6 +612,8 @@ Commands:
+             without receiving any messages when messages should be received
+             may occur before a new configuration is formed
+             (default 2500 failures)
++        If --no-hardened is specified, the cluster will be set up in way that all
++            corosync communication will be encrypted.
+ 
+         Configuring Redundant Ring Protocol (RRP)
+ 
+diff --git a/pcs/utils.py b/pcs/utils.py
+index 6515e5f..eec832f 100644
+--- a/pcs/utils.py
++++ b/pcs/utils.py
+@@ -2882,6 +2882,7 @@ def get_modificators():
+         "force": "--force" in pcs_options,
+         "full": "--full" in pcs_options,
+         "group": pcs_options.get("--group", None),
++        "hardened": "--no-hardened" not in pcs_options,
+         "monitor": "--monitor" in pcs_options,
+         "name": pcs_options.get("--name", None),
+         "no-default-ops": "--no-default-ops" in pcs_options,
+diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
+index 9764a43..878296b 100644
+--- a/pcsd/pcs.rb
++++ b/pcsd/pcs.rb
+@@ -1835,6 +1835,7 @@ def get_node_status(auth_user, cib_dom)
+         'moving_resource_in_group',
+         'unmanaged_resource',
+         'alerts',
++        'hardened_cluster',
+       ]
+   }
+ 
+diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
+index 33d999d..4d1964d 100644
+--- a/pcsd/pcsd.rb
++++ b/pcsd/pcsd.rb
+@@ -568,7 +568,8 @@ already been added to pcsd.  You may not add two clusters with the same name int
+       {
+         :clustername => @cluster_name,
+         :nodes => @nodes_rrp.join(';'),
+-        :options => options.to_json
++        :options => options.to_json,
++        :no_hardened => params[:no_hardened],
+       },
+       true,
+       nil,
+diff --git a/pcsd/remote.rb b/pcsd/remote.rb
+index f353980..e37abb7 100644
+--- a/pcsd/remote.rb
++++ b/pcsd/remote.rb
+@@ -964,6 +964,9 @@ def setup_cluster(params, request, auth_user)
+   end
+   nodes_options = nodes + options
+   nodes_options += options_udp if transport_udp
++  if params[:no_hardened] == "1"
++      nodes_options << "--no-hardened"
++  end
+   stdout, stderr, retval = run_cmd(
+     auth_user, PCS, "cluster", "setup", "--enable", "--start", "--async",
+     "--name",  params[:clustername], *nodes_options
+diff --git a/pcsd/views/manage.erb b/pcsd/views/manage.erb
+index 39ab41f..a055449 100644
+--- a/pcsd/views/manage.erb
++++ b/pcsd/views/manage.erb
+@@ -222,6 +222,9 @@
+       <table>
+ 	<% transport_desc = "\
+ Enables either udpu (unicast) or udp (multicast) cluster communication (default: udpu)"%>
++	<% hardened_desc = "\
++Create cluster with encrypted corosync communication. This option may not work \
++with pcs version lower than 0.9.159." %>
+ 	<% wait_for_all_desc = "\
+ Enables Wait For All (WFA) feature (default: off).
+ 
+@@ -345,6 +348,12 @@ Specify ring 1 address for each node if you want to use RRP." %>
+             </select>
+           </td>
+         </tr>
++        <tr title="<%= h(hardened_desc) %>"><td align=right>Hardened:</td>
++          <td>
++            <label><input type="radio" name="no_hardened" value="0" checked="checked">Yes</label>
++            <label><input type="radio" name="no_hardened" value="1">No</label>
++          </td>
++        </tr>
+ 	<tr title="<%= h(wait_for_all_desc) %>"><td align=right>Wait for All:</td><td><input type=checkbox name="config-wait_for_all"></td></tr>
+ 	<tr title="<%= h(auto_tie_desc) %>"><td align=right>Auto Tie Breaker:</td><td><input type=checkbox name="config-auto_tie_breaker"></td></tr>
+ 	<tr title="<%= h(last_man_desc) %>"><td align=right>Last Man Standing:</td><td><input type=checkbox name="config-last_man_standing"></td></tr>
+-- 
+1.8.3.1
+
diff --git a/SOURCES/bz1165821-02-pcs-CLI-GUI-should-be-capable-of.patch b/SOURCES/bz1165821-02-pcs-CLI-GUI-should-be-capable-of.patch
new file mode 100644
index 0000000..9f5d2d3
--- /dev/null
+++ b/SOURCES/bz1165821-02-pcs-CLI-GUI-should-be-capable-of.patch
@@ -0,0 +1,404 @@
+From 0049f2b67b084006244f73a9a94979ba524a3bdd Mon Sep 17 00:00:00 2001
+From: Ondrej Mular <omular@redhat.com>
+Date: Mon, 5 Jun 2017 10:14:16 +0200
+Subject: [PATCH] squash bz1165821  pcs CLI/GUI should be capable of
+
+ab3b909 change flag for hardened cluster to --encryption
+
+setup cluster wo corosync encryption by default
+---
+ pcs/cli/common/parse_args.py |  2 +-
+ pcs/cluster.py               | 12 +++++++-----
+ pcs/pcs.8                    |  4 ++--
+ pcs/test/test_cluster.py     | 24 ++++++++++++++++++++++++
+ pcs/usage.py                 |  6 +++---
+ pcs/utils.py                 |  2 +-
+ pcsd/pcsd.rb                 |  2 +-
+ pcsd/remote.rb               |  4 ++--
+ pcsd/views/manage.erb        | 11 +++++++----
+ 9 files changed, 48 insertions(+), 19 deletions(-)
+
+diff --git a/pcs/cli/common/parse_args.py b/pcs/cli/common/parse_args.py
+index 5b87fbc..d72a6d4 100644
+--- a/pcs/cli/common/parse_args.py
++++ b/pcs/cli/common/parse_args.py
+@@ -32,7 +32,7 @@ PCS_LONG_OPTIONS = [
+     "miss_count_const=", "fail_recv_const=",
+     "corosync_conf=", "cluster_conf=",
+     "booth-conf=", "booth-key=",
+-    "remote", "watchdog=", "device=", "no-hardened",
++    "remote", "watchdog=", "device=", "encryption=",
+     #in pcs status - do not display resorce status on inactive node
+     "hide-inactive",
+     # pcs resource (un)manage - enable or disable monitor operations
+diff --git a/pcs/cluster.py b/pcs/cluster.py
+index 0a9289b..d896b0c 100644
+--- a/pcs/cluster.py
++++ b/pcs/cluster.py
+@@ -303,6 +303,8 @@ def cluster_certkey(argv):
+ 
+ def cluster_setup(argv):
+     modifiers = utils.get_modificators()
++    if modifiers["encryption"] not in ["0", "1"]:
++        utils.err("Invalid value for option --encryption")
+     if len(argv) < 2:
+         usage.cluster(["setup"])
+         sys.exit(1)
+@@ -386,7 +388,7 @@ def cluster_setup(argv):
+             options["transport_options"],
+             options["totem_options"],
+             options["quorum_options"],
+-            modifiers["hardened"]
++            modifiers["encryption"] == "1"
+         )
+     process_library_reports(messages)
+ 
+@@ -458,7 +460,7 @@ def cluster_setup(argv):
+             file_definitions.update(
+                 node_communication_format.pcmk_authkey_file(generate_key())
+             )
+-            if modifiers["hardened"]:
++            if modifiers["encryption"] == "1":
+                 file_definitions.update(
+                     node_communication_format.corosync_authkey_file(
+                         generate_binary_key(random_bytes_count=128)
+@@ -743,7 +745,7 @@ def cluster_setup_parse_options_cman(options, force=False):
+ 
+ def cluster_setup_create_corosync_conf(
+     cluster_name, node_list, transport_options, totem_options, quorum_options,
+-    is_hardened
++    encrypted
+ ):
+     messages = []
+ 
+@@ -758,9 +760,9 @@ def cluster_setup_create_corosync_conf(
+     corosync_conf.add_section(logging_section)
+ 
+     totem_section.add_attribute("version", "2")
+-    totem_section.add_attribute("cluster_name", cluster_name)
+-    if not is_hardened:
++    if not encrypted:
+         totem_section.add_attribute("secauth", "off")
++    totem_section.add_attribute("cluster_name", cluster_name)
+ 
+     transport_options_names = (
+         "transport",
+diff --git a/pcs/pcs.8 b/pcs/pcs.8
+index aee8b3a..446e7b3 100644
+--- a/pcs/pcs.8
++++ b/pcs/pcs.8
+@@ -205,7 +205,7 @@ Add specified utilization options to specified resource. If resource is not spec
+ auth [node] [...] [\fB\-u\fR username] [\fB\-p\fR password] [\fB\-\-force\fR] [\fB\-\-local\fR]
+ Authenticate pcs to pcsd on nodes specified, or on all nodes configured in the local cluster if no nodes are specified (authorization tokens are stored in ~/.pcs/tokens or /var/lib/pcsd/tokens for root). By default all nodes are also authenticated to each other, using \fB\-\-local\fR only authenticates the local node (and does not authenticate the remote nodes with each other). Using \fB\-\-force\fR forces re\-authentication to occur.
+ .TP
+-setup [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1\-altaddr]> [<node2[,node2\-altaddr]>] [...] [\fB\-\-transport\fR udpu|udp] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] | [\fB\-\-broadcast1\fR]]]] [\fB\-\-wait_for_all\fR=<0|1>] [\fB\-\-auto_tie_breaker\fR=<0|1>] [\fB\-\-last_man_standing\fR=<0|1> [\fB\-\-last_man_standing_window\fR=<time in ms>]] [\fB\-\-ipv6\fR] [\fB\-\-token\fR <timeout>] [\fB\-\-token_coefficient\fR <timeout>] [\fB\-\-join\fR <timeout>] [\fB\-\-consensus\fR <timeout>] [\fB\-\-miss_count_const\fR <count>] [\fB\-\-fail_recv_const\fR <failures>] [\fB\-\-no\-hardened\fR]
++setup [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1\-altaddr]> [<node2[,node2\-altaddr]>] [...] [\fB\-\-transport\fR udpu|udp] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] | [\fB\-\-broadcast1\fR]]]] [\fB\-\-wait_for_all\fR=<0|1>] [\fB\-\-auto_tie_breaker\fR=<0|1>] [\fB\-\-last_man_standing\fR=<0|1> [\fB\-\-last_man_standing_window\fR=<time in ms>]] [\fB\-\-ipv6\fR] [\fB\-\-token\fR <timeout>] [\fB\-\-token_coefficient\fR <timeout>] [\fB\-\-join\fR <timeout>] [\fB\-\-consensus\fR <timeout>] [\fB\-\-miss_count_const\fR <count>] [\fB\-\-fail_recv_const\fR <failures>] [\fB\-\-encryption\fR 0|1]
+ Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR will only perform changes on the local node, \fB\-\-start\fR will also start the cluster on the specified nodes, \fB\-\-wait\fR will wait up to 'n' seconds for the nodes to start, \fB\-\-enable\fR will enable corosync and pacemaker on node startup, \fB\-\-transport\fR allows specification of corosync transport (default: udpu; udp for CMAN clusters), \fB\-\-rrpmode\fR allows you to set the RRP mode of the system. Currently only 'passive' is supported or tested (using 'active' is not recommended). The \fB\-\-wait_for_all\fR, \fB\-\-auto_tie_breaker\fR, \fB\-\-last_man_standing\fR, \fB\-\-last_man_standing_window\fR options are all documented in corosync's votequorum(5) man page. These options are not supported on CMAN clusters.
+ 
+ \fB\-\-ipv6\fR will configure corosync to use ipv6 (instead of ipv4).  This option is not supported on CMAN clusters.
+@@ -222,7 +222,7 @@ Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR w
+ 
+ \fB\-\-fail_recv_const\fR <failures> specifies how many rotations of the token without receiving any messages when messages should be received may occur before a new configuration is formed (default 2500 failures)
+ 
+-If \fB\-\-no\-hardened\fR is specified, the cluster will be set up in way that all corosync communication will be encrypted.
++\fB\-\-encryption\fR 0|1 disables (0) or enables (1) corosync communication encryption (default 0)
+ 
+ 
+ Configuring Redundant Ring Protocol (RRP)
+diff --git a/pcs/test/test_cluster.py b/pcs/test/test_cluster.py
+index 2b7fd5a..5c7a4a1 100644
+--- a/pcs/test/test_cluster.py
++++ b/pcs/test/test_cluster.py
+@@ -232,6 +232,7 @@ Warning: Unable to resolve hostname: nonexistant-address.invalid
+         corosync_conf = """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udpu
+ }
+@@ -290,6 +291,7 @@ Error: {0} already exists, use --force to overwrite
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udpu
+ }
+@@ -436,6 +438,7 @@ Error: {0} already exists, use --force to overwrite
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udpu
+ }
+@@ -476,6 +479,7 @@ logging {
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udpu
+ }
+@@ -520,6 +524,7 @@ logging {
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udpu
+ }
+@@ -560,6 +565,7 @@ logging {
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udpu
+ }
+@@ -605,6 +611,7 @@ logging {
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udpu
+ }
+@@ -646,6 +653,7 @@ logging {
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udpu
+ }
+@@ -687,6 +695,7 @@ logging {
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udpu
+ }
+@@ -727,6 +736,7 @@ logging {
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udpu
+ }
+@@ -772,6 +782,7 @@ logging {
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udpu
+ }
+@@ -817,6 +828,7 @@ logging {
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udpu
+ }
+@@ -866,6 +878,7 @@ logging {
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udp
+ }
+@@ -1266,6 +1279,7 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udpu
+     ip_version: ipv6
+@@ -1373,6 +1387,7 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udp
+     rrp_mode: passive
+@@ -1431,6 +1446,7 @@ logging {
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udp
+     rrp_mode: passive
+@@ -1489,6 +1505,7 @@ logging {
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udp
+     rrp_mode: passive
+@@ -1547,6 +1564,7 @@ logging {
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udp
+     rrp_mode: passive
+@@ -1614,6 +1632,7 @@ logging {
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udp
+     rrp_mode: active
+@@ -1679,6 +1698,7 @@ logging {
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udp
+     rrp_mode: active
+@@ -1754,6 +1774,7 @@ logging {
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: udpu
+     rrp_mode: passive
+@@ -1842,6 +1863,7 @@ logging {
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: test99
+     transport: udpu
+ }
+@@ -2426,6 +2448,7 @@ Warning: --last_man_standing_window ignored as it is not supported on CMAN clust
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: test99
+     transport: udpu
+     token: 20000
+@@ -2669,6 +2692,7 @@ Warning: --token_coefficient ignored as it is not supported on CMAN clusters
+             ac(data, """\
+ totem {
+     version: 2
++    secauth: off
+     cluster_name: cname
+     transport: unknown
+ }
+diff --git a/pcs/usage.py b/pcs/usage.py
+index c1ab00f..d2262a6 100644
+--- a/pcs/usage.py
++++ b/pcs/usage.py
+@@ -576,7 +576,7 @@ Commands:
+             [--ipv6] [--token <timeout>] [--token_coefficient <timeout>]
+             [--join <timeout>] [--consensus <timeout>]
+             [--miss_count_const <count>] [--fail_recv_const <failures>]
+-            [--no-hardened]
++            [--encryption 0|1]
+         Configure corosync and sync configuration out to listed nodes.
+         --local will only perform changes on the local node,
+         --start will also start the cluster on the specified nodes,
+@@ -612,8 +612,8 @@ Commands:
+             without receiving any messages when messages should be received
+             may occur before a new configuration is formed
+             (default 2500 failures)
+-        If --no-hardened is specified, the cluster will be set up in way that all
+-            corosync communication will be encrypted.
++        --encryption 0|1 disables (0) or enables (1) corosync communication
++            encryption (default 0)
+ 
+         Configuring Redundant Ring Protocol (RRP)
+ 
+diff --git a/pcs/utils.py b/pcs/utils.py
+index eec832f..d6aabf4 100644
+--- a/pcs/utils.py
++++ b/pcs/utils.py
+@@ -2879,10 +2879,10 @@ def get_modificators():
+         "device": pcs_options.get("--device", []),
+         "disabled": "--disabled" in pcs_options,
+         "enable": "--enable" in pcs_options,
++        "encryption": pcs_options.get("--encryption", "0"),
+         "force": "--force" in pcs_options,
+         "full": "--full" in pcs_options,
+         "group": pcs_options.get("--group", None),
+-        "hardened": "--no-hardened" not in pcs_options,
+         "monitor": "--monitor" in pcs_options,
+         "name": pcs_options.get("--name", None),
+         "no-default-ops": "--no-default-ops" in pcs_options,
+diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
+index 4d1964d..1026a36 100644
+--- a/pcsd/pcsd.rb
++++ b/pcsd/pcsd.rb
+@@ -569,7 +569,7 @@ already been added to pcsd.  You may not add two clusters with the same name int
+         :clustername => @cluster_name,
+         :nodes => @nodes_rrp.join(';'),
+         :options => options.to_json,
+-        :no_hardened => params[:no_hardened],
++        :encryption => params[:encryption],
+       },
+       true,
+       nil,
+diff --git a/pcsd/remote.rb b/pcsd/remote.rb
+index e37abb7..af74790 100644
+--- a/pcsd/remote.rb
++++ b/pcsd/remote.rb
+@@ -964,8 +964,8 @@ def setup_cluster(params, request, auth_user)
+   end
+   nodes_options = nodes + options
+   nodes_options += options_udp if transport_udp
+-  if params[:no_hardened] == "1"
+-      nodes_options << "--no-hardened"
++  if ['0', '1'].include?(params[:encryption])
++      nodes_options << "--encryption=#{params[:encryption]}"
+   end
+   stdout, stderr, retval = run_cmd(
+     auth_user, PCS, "cluster", "setup", "--enable", "--start", "--async",
+diff --git a/pcsd/views/manage.erb b/pcsd/views/manage.erb
+index a055449..2b12aaa 100644
+--- a/pcsd/views/manage.erb
++++ b/pcsd/views/manage.erb
+@@ -222,7 +222,7 @@
+       <table>
+ 	<% transport_desc = "\
+ Enables either udpu (unicast) or udp (multicast) cluster communication (default: udpu)"%>
+-	<% hardened_desc = "\
++	<% encryption_desc = "\
+ Create cluster with encrypted corosync communication. This option may not work \
+ with pcs version lower than 0.9.159." %>
+ 	<% wait_for_all_desc = "\
+@@ -348,10 +348,13 @@ Specify ring 1 address for each node if you want to use RRP." %>
+             </select>
+           </td>
+         </tr>
+-        <tr title="<%= h(hardened_desc) %>"><td align=right>Hardened:</td>
++        <tr title="<%= h(encryption_desc) %>"><td align=right>Encryption:</td>
+           <td>
+-            <label><input type="radio" name="no_hardened" value="0" checked="checked">Yes</label>
+-            <label><input type="radio" name="no_hardened" value="1">No</label>
++            <select name="encryption">
++              <option selected="selected">(Default)</option>
++              <option value="1">On</option>
++              <option value="0">Off</option>
++            </select>
+           </td>
+         </tr>
+ 	<tr title="<%= h(wait_for_all_desc) %>"><td align=right>Wait for All:</td><td><input type=checkbox name="config-wait_for_all"></td></tr>
+-- 
+1.8.3.1
+
diff --git a/SOURCES/bz1176018-01-remote-guest-nodes-crashes-fixed.patch b/SOURCES/bz1176018-01-remote-guest-nodes-crashes-fixed.patch
new file mode 100644
index 0000000..ff95495
--- /dev/null
+++ b/SOURCES/bz1176018-01-remote-guest-nodes-crashes-fixed.patch
@@ -0,0 +1,104 @@
+From e13624ef5b2171516979827dcbe7ff03eb8247e5 Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Wed, 31 May 2017 07:39:23 +0200
+Subject: [PATCH] squash 1176018 remote guest nodes crashes fixed
+
+c7a24e6 fix adding a node to a stopped cluster
+
+88ad6e6 fix 'pcs cluster restore' command for pcmk authkey
+
+e4b768c fix crash of 'pcs cluster destroy --all'
+
+6b15785 fix crash of 'pcs cluster setup --force'
+---
+ pcs/cluster.py | 33 +++++++++++++++++++++++++--------
+ pcs/config.py  |  4 ++--
+ 2 files changed, 27 insertions(+), 10 deletions(-)
+
+diff --git a/pcs/cluster.py b/pcs/cluster.py
+index d64194d..b47db4a 100644
+--- a/pcs/cluster.py
++++ b/pcs/cluster.py
+@@ -425,9 +425,9 @@ def cluster_setup(argv):
+     else:
+         # verify and ensure no cluster is set up on the nodes
+         # checks that nodes are authenticated as well
++        lib_env = utils.get_lib_env()
+         if "--force" not in utils.pcs_options:
+             all_nodes_available = True
+-            lib_env = utils.get_lib_env()
+             for node in primary_addr_list:
+                 available, message = utils.canAddNodeToCluster(
+                     lib_env.node_communicator(),
+@@ -1757,9 +1757,12 @@ def node_add(lib_env, node0, node1, modifiers):
+                 NodeAddressesList([node_addr]),
+             )
+ 
++        # do not send pcmk authkey to guest and remote nodes, they either have
++        # it or are not working anyway
++        # if the cluster is stopped, we cannot get the cib anyway
+         _share_authkey(
+             lib_env,
+-            get_nodes(lib_env.get_corosync_conf(), lib_env.get_cib()),
++            get_nodes(lib_env.get_corosync_conf()),
+             node_addr,
+             allow_incomplete_distribution=modifiers["skip_offline_nodes"]
+         )
+@@ -2112,15 +2115,29 @@ def cluster_reload(argv):
+ # Code taken from cluster-clean script in pacemaker
+ def cluster_destroy(argv):
+     if "--all" in utils.pcs_options:
++        # destroy remote and guest nodes
++        cib = None
+         lib_env = utils.get_lib_env()
+-        all_remote_nodes = get_nodes(tree=lib_env.get_cib())
+-        if len(all_remote_nodes) > 0:
+-            _destroy_pcmk_remote_env(
+-                lib_env,
+-                all_remote_nodes,
+-                allow_fails=True
++        try:
++            cib = lib_env.get_cib()
++        except LibraryError as e:
++            warn(
++                "Unable to load CIB to get guest and remote nodes from it, "
++                "those nodes will not be deconfigured."
+             )
++        if cib is not None:
++            try:
++                all_remote_nodes = get_nodes(tree=cib)
++                if len(all_remote_nodes) > 0:
++                    _destroy_pcmk_remote_env(
++                        lib_env,
++                        all_remote_nodes,
++                        allow_fails=True
++                    )
++            except LibraryError as e:
++                utils.process_library_reports(e.args)
+ 
++        # destroy full-stack nodes
+         destroy_cluster(utils.getNodesFromCorosyncConf())
+     else:
+         print("Shutting down pacemaker/corosync services...")
+diff --git a/pcs/config.py b/pcs/config.py
+index 94191e1..5526eb5 100644
+--- a/pcs/config.py
++++ b/pcs/config.py
+@@ -446,12 +446,12 @@ def config_backup_path_list(with_uid_gid=False, force_rhel6=None):
+         "uname": settings.pacemaker_uname,
+         "gname": settings.pacemaker_gname,
+     }
+-    pcmk_authkey_attrs = dict(cib_attrs)
+-    pcmk_authkey_attrs["mode"] = 0o440
+     if with_uid_gid:
+         cib_attrs["uid"] = _get_uid(cib_attrs["uname"])
+         cib_attrs["gid"] = _get_gid(cib_attrs["gname"])
+ 
++    pcmk_authkey_attrs = dict(cib_attrs)
++    pcmk_authkey_attrs["mode"] = 0o440
+     file_list = {
+         "cib.xml": {
+             "path": os.path.join(settings.cib_dir, "cib.xml"),
+-- 
+1.8.3.1
+
diff --git a/SOURCES/bz1176018-02-pcs-pcsd-should-be-able-to-config.patch b/SOURCES/bz1176018-02-pcs-pcsd-should-be-able-to-config.patch
new file mode 100644
index 0000000..4464f97
--- /dev/null
+++ b/SOURCES/bz1176018-02-pcs-pcsd-should-be-able-to-config.patch
@@ -0,0 +1,322 @@
+From b8b59f772b2bbdb9728b32c674e69df851f82397 Mon Sep 17 00:00:00 2001
+From: Ivan Devat <idevat@redhat.com>
+Date: Tue, 30 May 2017 16:56:50 +0200
+Subject: [PATCH] squash bz1176018 pcs/pcsd should be able to config
+
+43aeca1 fix --skip-offline without effect problem
+
+38de786 clean remote/guest node before pushing cib
+---
+ pcs/cli/cluster/command.py           | 15 +++++----
+ pcs/lib/commands/cluster.py          | 65 ++++++++++++++++++++----------------
+ pcs/lib/nodes_task.py                | 13 ++++++--
+ pcs/lib/test/test_nodes_task.py      |  4 ---
+ pcs/test/test_cluster_pcmk_remote.py | 16 +++++----
+ 5 files changed, 66 insertions(+), 47 deletions(-)
+
+diff --git a/pcs/cli/cluster/command.py b/pcs/cli/cluster/command.py
+index f725326..963bd8c 100644
+--- a/pcs/cli/cluster/command.py
++++ b/pcs/cli/cluster/command.py
+@@ -35,6 +35,7 @@ def node_add_remote(lib, arg_list, modifiers):
+ 
+     parts = parse_resource_create_args(rest_args)
+     force = modifiers["force"]
++    skip_offline = modifiers["skip_offline_nodes"]
+ 
+     lib.cluster.node_add_remote(
+         node_host,
+@@ -42,8 +43,8 @@ def node_add_remote(lib, arg_list, modifiers):
+         parts["op"],
+         parts["meta"],
+         parts["options"],
+-        allow_incomplete_distribution=force,
+-        allow_pacemaker_remote_service_fail=force,
++        allow_incomplete_distribution=skip_offline,
++        allow_pacemaker_remote_service_fail=skip_offline,
+         allow_invalid_operation=force,
+         allow_invalid_instance_attributes=force,
+         use_default_operations=not modifiers["no-default-ops"],
+@@ -58,7 +59,7 @@ def create_node_remove_remote(remove_resource):
+             arg_list[0],
+             remove_resource,
+             allow_remove_multiple_nodes=modifiers["force"],
+-            allow_pacemaker_remote_service_fail=modifiers["force"],
++            allow_pacemaker_remote_service_fail=modifiers["skip_offline_nodes"],
+         )
+     return node_remove_remote
+ 
+@@ -71,14 +72,14 @@ def node_add_guest(lib, arg_list, modifiers):
+     resource_id = arg_list[1]
+     meta_options = prepare_options(arg_list[2:])
+ 
+-    force = modifiers["force"]
++    skip_offline = modifiers["skip_offline_nodes"]
+ 
+     lib.cluster.node_add_guest(
+         node_name,
+         resource_id,
+         meta_options,
+-        allow_incomplete_distribution=force,
+-        allow_pacemaker_remote_service_fail=force,
++        allow_incomplete_distribution=skip_offline,
++        allow_pacemaker_remote_service_fail=skip_offline,
+         wait=modifiers["wait"],
+     )
+ 
+@@ -89,7 +90,7 @@ def node_remove_guest(lib, arg_list, modifiers):
+     lib.cluster.node_remove_guest(
+         arg_list[0],
+         allow_remove_multiple_nodes=modifiers["force"],
+-        allow_pacemaker_remote_service_fail=modifiers["force"],
++        allow_pacemaker_remote_service_fail=modifiers["skip_offline_nodes"],
+         wait=modifiers["wait"],
+     )
+ 
+diff --git a/pcs/lib/commands/cluster.py b/pcs/lib/commands/cluster.py
+index 0bafef5..fe883f3 100644
+--- a/pcs/lib/commands/cluster.py
++++ b/pcs/lib/commands/cluster.py
+@@ -21,13 +21,16 @@ from pcs.lib.errors import LibraryError
+ from pcs.lib.pacemaker import state
+ from pcs.lib.pacemaker.live import remove_node
+ 
+-def _ensure_can_add_node_to_remote_cluster(env, node_addresses):
++def _ensure_can_add_node_to_remote_cluster(
++    env, node_addresses, warn_on_communication_exception=False
++):
+     report_items = []
+     nodes_task.check_can_add_node_to_cluster(
+         env.node_communicator(),
+         node_addresses,
+         report_items,
+-        check_response=nodes_task.availability_checker_remote_node
++        check_response=nodes_task.availability_checker_remote_node,
++        warn_on_communication_exception=warn_on_communication_exception,
+     )
+     env.report_processor.process_list(report_items)
+ 
+@@ -88,7 +91,11 @@ def _prepare_pacemaker_remote_environment(
+         return
+ 
+     candidate_node = NodeAddresses(node_host)
+-    _ensure_can_add_node_to_remote_cluster(env, candidate_node)
++    _ensure_can_add_node_to_remote_cluster(
++        env,
++        candidate_node,
++        allow_incomplete_distribution
++    )
+     _share_authkey(
+         env,
+         current_nodes,
+@@ -296,17 +303,13 @@ def _find_resources_to_remove(
+ 
+     return resource_element_list
+ 
+-def _remove_pcmk_remote_from_cib(
+-    nodes, resource_element_list, get_host, remove_resource
+-):
++def _get_node_addresses_from_resources(nodes, resource_element_list, get_host):
+     node_addresses_set = set()
+     for resource_element in resource_element_list:
+         for node in nodes:
+             #remote nodes uses ring0 only
+             if get_host(resource_element) == node.ring0:
+                 node_addresses_set.add(node)
+-        remove_resource(resource_element)
+-
+     return sorted(node_addresses_set, key=lambda node: node.ring0)
+ 
+ def _destroy_pcmk_remote_env(env, node_addresses_list, allow_fails):
+@@ -382,28 +385,31 @@ def node_remove_remote(
+         allow_remove_multiple_nodes,
+         remote_node.find_node_resources,
+     )
+-    node_addresses_list = _remove_pcmk_remote_from_cib(
++
++    node_addresses_list = _get_node_addresses_from_resources(
+         get_nodes_remote(cib),
+         resource_element_list,
+         remote_node.get_host,
+-        lambda resource_element: remove_resource(
+-            resource_element.attrib["id"],
+-            is_remove_remote_context=True,
+-        )
+     )
++
+     if not env.is_corosync_conf_live:
+         env.report_processor.process_list(
+             _report_skip_live_parts_in_remove(node_addresses_list)
+         )
+-        return
++    else:
++        _destroy_pcmk_remote_env(
++            env,
++            node_addresses_list,
++            allow_pacemaker_remote_service_fail
++        )
+ 
+     #remove node from pcmk caches is currently integrated in remove_resource
+     #function
+-    _destroy_pcmk_remote_env(
+-        env,
+-        node_addresses_list,
+-        allow_pacemaker_remote_service_fail
+-    )
++    for resource_element in resource_element_list:
++        remove_resource(
++            resource_element.attrib["id"],
++            is_remove_remote_context=True,
++        )
+ 
+ def node_remove_guest(
+     env, node_identifier,
+@@ -435,29 +441,32 @@ def node_remove_guest(
+         guest_node.find_node_resources,
+     )
+ 
+-    node_addresses_list =  _remove_pcmk_remote_from_cib(
++    node_addresses_list = _get_node_addresses_from_resources(
+         get_nodes_guest(cib),
+         resource_element_list,
+         guest_node.get_host,
+-        guest_node.unset_guest,
+     )
+-    env.push_cib(cib, wait)
+ 
+     if not env.is_corosync_conf_live:
+         env.report_processor.process_list(
+             _report_skip_live_parts_in_remove(node_addresses_list)
+         )
+-        return
++    else:
++        _destroy_pcmk_remote_env(
++            env,
++            node_addresses_list,
++            allow_pacemaker_remote_service_fail
++        )
++
++    for resource_element in resource_element_list:
++        guest_node.unset_guest(resource_element)
++
++    env.push_cib(cib, wait)
+ 
+     #remove node from pcmk caches
+     for node_addresses in node_addresses_list:
+         remove_node(env.cmd_runner(), node_addresses.name)
+ 
+-    _destroy_pcmk_remote_env(
+-        env,
+-        node_addresses_list,
+-        allow_pacemaker_remote_service_fail
+-    )
+ 
+ def node_clear(env, node_name, allow_clear_cluster_node=False):
+     """
+diff --git a/pcs/lib/nodes_task.py b/pcs/lib/nodes_task.py
+index 703609b..6086c4b 100644
+--- a/pcs/lib/nodes_task.py
++++ b/pcs/lib/nodes_task.py
+@@ -277,7 +277,8 @@ def availability_checker_remote_node(
+ 
+ def check_can_add_node_to_cluster(
+     node_communicator, node, report_items,
+-    check_response=availability_checker_node
++    check_response=availability_checker_node,
++    warn_on_communication_exception=False,
+ ):
+     """
+     Analyze result of node_available check if it is possible use the node as
+@@ -294,13 +295,21 @@ def check_can_add_node_to_cluster(
+         node_communicator,
+         node,
+         "remote/node_available",
+-        safe_report_items
++        safe_report_items,
++        warn_on_communication_exception=warn_on_communication_exception
+     )
+     report_items.extend(safe_report_items)
+ 
+     if ReportListAnalyzer(safe_report_items).error_list:
+         return
+ 
++    #If there was a communication error and --skip-offline is in effect, no
++    #exception was raised. If there is no result cannot process it.
++    #Note: the error may be caused by older pcsd daemon not supporting commands
++    #sent by newer client.
++    if not availability_info:
++        return
++
+     is_in_expected_format = (
+         isinstance(availability_info, dict)
+         and
+diff --git a/pcs/lib/test/test_nodes_task.py b/pcs/lib/test/test_nodes_task.py
+index 61ba132..5459337 100644
+--- a/pcs/lib/test/test_nodes_task.py
++++ b/pcs/lib/test/test_nodes_task.py
+@@ -790,10 +790,6 @@ class CheckCanAddNodeToCluster(TestCase):
+     def test_report_no_dict_in_json_response(self):
+         self.assert_result_causes_invalid_format("bad answer")
+ 
+-    def test_report_dict_without_mandatory_key(self):
+-        self.assert_result_causes_invalid_format({})
+-
+-
+ class OnNodeTest(TestCase):
+     def setUp(self):
+         self.reporter = MockLibraryReportProcessor()
+diff --git a/pcs/test/test_cluster_pcmk_remote.py b/pcs/test/test_cluster_pcmk_remote.py
+index 5dc1633..0db4a5c 100644
+--- a/pcs/test/test_cluster_pcmk_remote.py
++++ b/pcs/test/test_cluster_pcmk_remote.py
+@@ -399,11 +399,11 @@ class NodeRemoveRemote(ResourceTest):
+         self.assert_effect(
+             "cluster node remove-remote NODE-HOST",
+             "<resources/>",
+-            outdent(
++            fixture_nolive_remove_report(["NODE-HOST"]) + outdent(
+                 """\
+                 Deleting Resource - NODE-NAME
+                 """
+-            ) + fixture_nolive_remove_report(["NODE-HOST"])
++            )
+         )
+ 
+     def test_success_remove_by_node_name(self):
+@@ -411,11 +411,11 @@ class NodeRemoveRemote(ResourceTest):
+         self.assert_effect(
+             "cluster node remove-remote NODE-NAME",
+             "<resources/>",
+-            outdent(
++            fixture_nolive_remove_report(["NODE-HOST"]) + outdent(
+                 """\
+                 Deleting Resource - NODE-NAME
+                 """
+-            ) + fixture_nolive_remove_report(["NODE-HOST"])
++            )
+         )
+ 
+     def test_refuse_on_duplicit(self):
+@@ -431,13 +431,17 @@ class NodeRemoveRemote(ResourceTest):
+         self.assert_effect(
+             "cluster node remove-remote HOST-A --force",
+             "<resources/>",
++
++            "Warning: multiple resource for 'HOST-A' found: 'HOST-A', 'NODE-NAME'\n"
++            +
++            fixture_nolive_remove_report(["HOST-A", "HOST-B"])
++            +
+             outdent(
+                 """\
+-                Warning: multiple resource for 'HOST-A' found: 'HOST-A', 'NODE-NAME'
+                 Deleting Resource - NODE-NAME
+                 Deleting Resource - HOST-A
+                 """
+-            ) + fixture_nolive_remove_report(["HOST-A", "HOST-B"])
++            )
+         )
+ 
+ class NodeRemoveGuest(ResourceTest):
+-- 
+1.8.3.1
+
diff --git a/SOURCES/bz1176018-03-don-t-call-remove-guest-node-when-f-is-used.patch b/SOURCES/bz1176018-03-don-t-call-remove-guest-node-when-f-is-used.patch
new file mode 100644
index 0000000..359f147
--- /dev/null
+++ b/SOURCES/bz1176018-03-don-t-call-remove-guest-node-when-f-is-used.patch
@@ -0,0 +1,30 @@
+From 1a2c01a82aa7e791a5d9925ec82792e764e53740 Mon Sep 17 00:00:00 2001
+From: Ivan Devat <idevat@redhat.com>
+Date: Wed, 31 May 2017 10:44:22 +0200
+Subject: [PATCH] don't call remove guest node when -f is used
+
+`pcs cluster node remove-guest` now does not call `crm_node --remove`
+when -f is used
+---
+ pcs/lib/commands/cluster.py | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/pcs/lib/commands/cluster.py b/pcs/lib/commands/cluster.py
+index fe883f3..0b04d3d 100644
+--- a/pcs/lib/commands/cluster.py
++++ b/pcs/lib/commands/cluster.py
+@@ -464,8 +464,9 @@ def node_remove_guest(
+     env.push_cib(cib, wait)
+ 
+     #remove node from pcmk caches
+-    for node_addresses in node_addresses_list:
+-        remove_node(env.cmd_runner(), node_addresses.name)
++    if env.is_cib_live:
++        for node_addresses in node_addresses_list:
++            remove_node(env.cmd_runner(), node_addresses.name)
+ 
+ 
+ def node_clear(env, node_name, allow_clear_cluster_node=False):
+-- 
+1.8.3.1
+
diff --git a/SOURCES/bz1225423-01-allow-to-remove-a-dead-node-from-a-cluster.patch b/SOURCES/bz1225423-01-allow-to-remove-a-dead-node-from-a-cluster.patch
deleted file mode 100644
index b3c10e4..0000000
--- a/SOURCES/bz1225423-01-allow-to-remove-a-dead-node-from-a-cluster.patch
+++ /dev/null
@@ -1,122 +0,0 @@
-From 2a080e5986331989a3164a35129e576641b2cca5 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Tue, 19 Jul 2016 16:42:44 +0200
-Subject: [PATCH 1/2] allow to remove a dead node from a cluster
-
----
- pcs/cluster.py | 41 +++++++++++++++++++++++++++--------------
- 1 file changed, 27 insertions(+), 14 deletions(-)
-
-diff --git a/pcs/cluster.py b/pcs/cluster.py
-index baa0f44..7a8615d 100644
---- a/pcs/cluster.py
-+++ b/pcs/cluster.py
-@@ -1076,7 +1076,7 @@ def disable_cluster_nodes(nodes):
-     if len(error_list) > 0:
-         utils.err("unable to disable all nodes\n" + "\n".join(error_list))
- 
--def destroy_cluster(argv):
-+def destroy_cluster(argv, keep_going=False):
-     if len(argv) > 0:
-         # stop pacemaker and resources while cluster is still quorate
-         nodes = argv
-@@ -1085,7 +1085,14 @@ def destroy_cluster(argv):
-         # destroy will stop any remaining cluster daemons
-         error_list = parallel_for_nodes(utils.destroyCluster, nodes, quiet=True)
-         if error_list:
--            utils.err("unable to destroy cluster\n" + "\n".join(error_list))
-+            if keep_going:
-+                print(
-+                    "Warning: unable to destroy cluster\n"
-+                    +
-+                    "\n".join(error_list)
-+                )
-+            else:
-+                utils.err("unable to destroy cluster\n" + "\n".join(error_list))
- 
- def stop_cluster(argv):
-     if len(argv) > 0:
-@@ -1347,19 +1354,25 @@ def cluster_node(argv):
- 
-     node = argv[1]
-     node0, node1 = utils.parse_multiring_node(node)
--
-     if not node0:
-         utils.err("missing ring 0 address of the node")
--    status,output = utils.checkAuthorization(node0)
--    if status == 2:
--        utils.err("pcsd is not running on %s" % node0)
--    elif status == 3:
--        utils.err(
--            "%s is not yet authenticated (try pcs cluster auth %s)"
--            % (node0, node0)
--        )
--    elif status != 0:
--        utils.err(output)
-+
-+    # allow to continue if removing a node with --force
-+    if add_node or "--force" not in utils.pcs_options:
-+        status, output = utils.checkAuthorization(node0)
-+        if status != 0:
-+            if status == 2:
-+                msg = "pcsd is not running on {0}".format(node0)
-+            elif status == 3:
-+                msg = (
-+                    "{node} is not yet authenticated "
-+                    + " (try pcs cluster auth {node})"
-+                ).format(node=node0)
-+            else:
-+                msg = output
-+            if not add_node:
-+                msg += ", use --force to override"
-+            utils.err(msg)
- 
-     if add_node == True:
-         wait = False
-@@ -1540,7 +1553,7 @@ def cluster_node(argv):
- 
-         nodesRemoved = False
-         c_nodes = utils.getNodesFromCorosyncConf()
--        destroy_cluster([node0])
-+        destroy_cluster([node0], keep_going=("--force" in utils.pcs_options))
-         for my_node in c_nodes:
-             if my_node == node0:
-                 continue
--- 
-1.8.3.1
-
-
-From c48716233ace08c16e7e4b66075aebeca9366321 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Wed, 20 Jul 2016 10:01:13 +0200
-Subject: [PATCH 2/2] gui: allow to remove a dead node from a cluster
-
----
- pcsd/remote.rb | 11 +++++++++--
- 1 file changed, 9 insertions(+), 2 deletions(-)
-
-diff --git a/pcsd/remote.rb b/pcsd/remote.rb
-index 25fb74d..05a6d03 100644
---- a/pcsd/remote.rb
-+++ b/pcsd/remote.rb
-@@ -837,8 +837,15 @@ def remote_remove_nodes(params, request, auth_user)
-   stdout, stderr, retval = run_cmd(
-     auth_user, PCS, "cluster", "stop", *stop_params
-   )
--  if retval != 0
--    return [400, stderr.join]
-+  if retval != 0 and not params['force']
-+    # If forced, keep going even if unable to stop all nodes (they may be dead).
-+    # Add info this error is forceable if pcs did not do it (e.g. when unable
-+    # to connect to some nodes).
-+    message = stderr.join
-+    if not message.include?(', use --force to override')
-+      message += ', use --force to override'
-+    end
-+    return [400, message]
-   end
- 
-   node_list.each {|node|
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1231858-01-web-UI-fix-occasional-issue-with-not-showing-optiona.patch b/SOURCES/bz1231858-01-web-UI-fix-occasional-issue-with-not-showing-optiona.patch
deleted file mode 100644
index 6f3c99a..0000000
--- a/SOURCES/bz1231858-01-web-UI-fix-occasional-issue-with-not-showing-optiona.patch
+++ /dev/null
@@ -1,84 +0,0 @@
-From 4fbf6a24492b0ac61be7822208275f1837165ae2 Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Fri, 22 Jul 2016 13:37:28 +0200
-Subject: [PATCH] web UI: fix occasional issue with not showing optional
- arguments of resources
-
----
- pcsd/public/js/nodes-ember.js | 12 ++++--------
- pcsd/public/js/pcsd.js        | 17 +++++++----------
- 2 files changed, 11 insertions(+), 18 deletions(-)
-
-diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
-index cb62806..2b43559 100644
---- a/pcsd/public/js/nodes-ember.js
-+++ b/pcsd/public/js/nodes-ember.js
-@@ -210,20 +210,16 @@ Pcs = Ember.Application.createWithMixins({
-           Ember.run.scheduleOnce('afterRender', Pcs, function () {
-             if (self.get('cur_fence')) {
-               if (fence_change) {
--                if (first_run) {
--                  update_instance_attributes(self.get('cur_fence').get('id'));
--                }
--                tree_view_onclick(self.get('cur_fence').get('id'), true);
-+                tree_view_onclick(self.get('cur_fence').get('id'), first_run);
-               } else {
-                 tree_view_select(self.get('cur_fence').get('id'));
-               }
-             }
-             if (self.get('cur_resource')) {
-               if (resource_change) {
--                if (first_run) {
--                  update_instance_attributes(self.get('cur_resource').get('id'));
--                }
--                tree_view_onclick(self.get('cur_resource').get('id'), true);
-+                tree_view_onclick(
-+                  self.get('cur_resource').get('id'), first_run
-+                );
-               } else {
-                 tree_view_select(self.get('cur_resource').get('id'));
-               }
-diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
-index e763482..1ec0f1c 100644
---- a/pcsd/public/js/pcsd.js
-+++ b/pcsd/public/js/pcsd.js
-@@ -2108,29 +2108,26 @@ function update_instance_attributes(resource_id) {
-   }, res_obj.get("stonith"));
- }
- 
--function tree_view_onclick(resource_id, auto) {
--  auto = typeof auto !== 'undefined' ? auto : false;
-+function tree_view_onclick(resource_id, first_run) {
-+  first_run = typeof first_run !== 'undefined' ? first_run : false;
-   var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id);
-   if (!resource_obj) {
-     console.log("Resource " + resource_id + "not found.");
-     return;
-   }
-   if (resource_obj.get('stonith')) {
--    Pcs.resourcesContainer.set('cur_fence', resource_obj);
--    if (!auto) {
-+    if (!first_run) {
-       window.location.hash = "/fencedevices/" + resource_id;
--      update_instance_attributes(resource_id);
-     }
-+    Pcs.resourcesContainer.set('cur_fence', resource_obj);
-   } else {
--    Pcs.resourcesContainer.set('cur_resource', resource_obj);
--
--    if (!auto) {
-+    if (!first_run) {
-       window.location.hash = "/resources/" + resource_id;
--      update_instance_attributes(resource_id);
-     }
-+    Pcs.resourcesContainer.set('cur_resource', resource_obj);
-     auto_show_hide_constraints();
-   }
--
-+  update_instance_attributes(resource_id);
-   tree_view_select(resource_id);
- }
- 
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1231858-02-web-UI-don-t-change-current-resource-in-URL-if-not-i.patch b/SOURCES/bz1231858-02-web-UI-don-t-change-current-resource-in-URL-if-not-i.patch
deleted file mode 100644
index 7f7c9a6..0000000
--- a/SOURCES/bz1231858-02-web-UI-don-t-change-current-resource-in-URL-if-not-i.patch
+++ /dev/null
@@ -1,76 +0,0 @@
-From 590157ae3e595560632ddc25c725b67c42a3f2ab Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Wed, 27 Jul 2016 09:56:55 +0200
-Subject: [PATCH] web UI: don't change current resource in URL if not in
- resources tab
-
----
- pcsd/public/js/nodes-ember.js |  6 ++----
- pcsd/public/js/pcsd.js        | 11 +++++------
- 2 files changed, 7 insertions(+), 10 deletions(-)
-
-diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
-index 2b43559..efc0192 100644
---- a/pcsd/public/js/nodes-ember.js
-+++ b/pcsd/public/js/nodes-ember.js
-@@ -210,16 +210,14 @@ Pcs = Ember.Application.createWithMixins({
-           Ember.run.scheduleOnce('afterRender', Pcs, function () {
-             if (self.get('cur_fence')) {
-               if (fence_change) {
--                tree_view_onclick(self.get('cur_fence').get('id'), first_run);
-+                tree_view_onclick(self.get('cur_fence').get('id'));
-               } else {
-                 tree_view_select(self.get('cur_fence').get('id'));
-               }
-             }
-             if (self.get('cur_resource')) {
-               if (resource_change) {
--                tree_view_onclick(
--                  self.get('cur_resource').get('id'), first_run
--                );
-+                tree_view_onclick(self.get('cur_resource').get('id'));
-               } else {
-                 tree_view_select(self.get('cur_resource').get('id'));
-               }
-diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
-index c8ed340..a646bed 100644
---- a/pcsd/public/js/pcsd.js
-+++ b/pcsd/public/js/pcsd.js
-@@ -1134,8 +1134,8 @@ function hover_out(o) {
- }
- 
- function reload_current_resource() {
--  tree_view_onclick(curResource(), true);
--  tree_view_onclick(curStonith(), true);
-+  tree_view_onclick(curResource());
-+  tree_view_onclick(curStonith());
- }
- 
- function load_row(node_row, ac, cur_elem, containing_elem, also_set, initial_load){
-@@ -2112,20 +2112,19 @@ function update_instance_attributes(resource_id) {
-   }, res_obj.get("stonith"));
- }
- 
--function tree_view_onclick(resource_id, first_run) {
--  first_run = typeof first_run !== 'undefined' ? first_run : false;
-+function tree_view_onclick(resource_id) {
-   var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id);
-   if (!resource_obj) {
-     console.log("Resource " + resource_id + "not found.");
-     return;
-   }
-   if (resource_obj.get('stonith')) {
--    if (!first_run) {
-+    if (window.location.hash.startsWith("#/fencedevices")) {
-       window.location.hash = "/fencedevices/" + resource_id;
-     }
-     Pcs.resourcesContainer.set('cur_fence', resource_obj);
-   } else {
--    if (!first_run) {
-+    if (window.location.hash.startsWith("#/resources")) {
-       window.location.hash = "/resources/" + resource_id;
-     }
-     Pcs.resourcesContainer.set('cur_resource', resource_obj);
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1231858-03-resourcefence-agent-options-form-needs-an-overhau.patch b/SOURCES/bz1231858-03-resourcefence-agent-options-form-needs-an-overhau.patch
deleted file mode 100644
index 99c01e2..0000000
--- a/SOURCES/bz1231858-03-resourcefence-agent-options-form-needs-an-overhau.patch
+++ /dev/null
@@ -1,401 +0,0 @@
-From 0d440890ade31a2050ac861270a39be5c91d4bbb Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Wed, 14 Sep 2016 15:29:06 +0200
-Subject: [PATCH] squash bz1231858 resource/fence agent options form
-
-6007fba70212 web UI: treat resource as managed by default
-
-f1b60c3a2bac WebUI: fix node standby for pcs 0.9.138 and older
-
-73adbedf268e webUI: allow change groups, clone and unclone of resource on clusters running older pcsd
-
-1302b4e62e19 webUI: fix group list when managing cluster running older pcsd
-
-f639c0dded12 webUI: don't show group selector in case cluster doesn't support it
-
-584092ce7d04 webUI: consolidate backward compatibility code
----
- pcsd/cluster_entity.rb        |   2 +-
- pcsd/pcs.rb                   |  20 ++++-
- pcsd/pcsd.rb                  | 169 +++++++++++++++++++++++++++++++++++++-----
- pcsd/public/js/nodes-ember.js |  11 ++-
- pcsd/remote.rb                |   6 +-
- pcsd/views/main.erb           |  20 ++---
- 6 files changed, 194 insertions(+), 34 deletions(-)
-
-diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb
-index 4ffcd4b..b8f363a 100644
---- a/pcsd/cluster_entity.rb
-+++ b/pcsd/cluster_entity.rb
-@@ -120,7 +120,7 @@ module ClusterEntity
-       status = ClusterEntity::CRMResourceStatus.new
-       status.id = primitive.id
-       status.resource_agent = primitive.agentname
--      status.managed = false
-+      status.managed = true
-       status.failed = resource[:failed]
-       status.role = nil
-       status.active = resource[:active]
-diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
-index 137bb3d..e05f3ef 100644
---- a/pcsd/pcs.rb
-+++ b/pcsd/pcs.rb
-@@ -1864,7 +1864,7 @@ end
- def status_v1_to_v2(status)
-   new_status = status.select { |k,_|
-     [:cluster_name, :username, :is_cman_with_udpu_transport,
--     :need_ring1_address, :cluster_settings, :constraints, :groups,
-+     :need_ring1_address, :cluster_settings, :constraints,
-      :corosync_online, :corosync_offline, :pacemaker_online, :pacemaker_standby,
-      :pacemaker_offline, :acls, :fence_levels
-     ].include?(k)
-@@ -1885,6 +1885,8 @@ def status_v1_to_v2(status)
-     ].include?(k)
-   }
- 
-+  new_status[:groups] = get_group_list_from_tree_of_resources(resources)
-+
-   new_status[:node].update(
-     {
-       :id => status[:node_id],
-@@ -1901,6 +1903,22 @@ def status_v1_to_v2(status)
-   return new_status
- end
- 
-+def get_group_list_from_tree_of_resources(tree)
-+  group_list = []
-+  tree.each { |resource|
-+    if resource.instance_of?(ClusterEntity::Group)
-+      group_list << resource.id
-+    end
-+    if (
-+      resource.kind_of?(ClusterEntity::MultiInstance) and
-+      resource.member.instance_of?(ClusterEntity::Group)
-+    )
-+      group_list << resource.member.id
-+    end
-+  }
-+  return group_list
-+end
-+
- def allowed_for_local_cluster(auth_user, action)
-   pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
-   return pcs_config.permissions_local.allows?(
-diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
-index 287cf03..dcfd5a0 100644
---- a/pcsd/pcsd.rb
-+++ b/pcsd/pcsd.rb
-@@ -908,7 +908,7 @@ already been added to pcsd.  You may not add two clusters with the same name int
-             'type' => 'boolean',
-             'shortdesc' => 'Should deleted actions be cancelled',
-             'longdesc' => 'Should deleted actions be cancelled',
--            'readable_name' => 'top Orphan Actions',
-+            'readable_name' => 'Stop Orphan Actions',
-             'advanced' => false
-           },
-           'start-failure-is-fatal' => {
-@@ -1215,33 +1215,168 @@ already been added to pcsd.  You may not add two clusters with the same name int
-     return [200, "Node added successfully."]
-   end
- 
-+  def pcs_0_9_142_resource_change_group(auth_user, params)
-+    parameters = {
-+      :resource_id => params[:resource_id],
-+      :resource_group => '',
-+      :_orig_resource_group => '',
-+    }
-+    parameters[:resource_group] = params[:group_id] if params[:group_id]
-+    if params[:old_group_id]
-+      parameters[:_orig_resource_group] = params[:old_group_id]
-+    end
-+    return send_cluster_request_with_token(
-+      auth_user, params[:cluster], 'update_resource', true, parameters
-+    )
-+  end
-+
-+  def pcs_0_9_142_resource_clone(auth_user, params)
-+    parameters = {
-+      :resource_id => params[:resource_id],
-+      :resource_clone => true,
-+      :_orig_resource_clone => 'false',
-+    }
-+    return send_cluster_request_with_token(
-+      auth_user, params[:cluster], 'update_resource', true, parameters
-+    )
-+  end
-+
-+  def pcs_0_9_142_resource_unclone(auth_user, params)
-+    parameters = {
-+      :resource_id => params[:resource_id],
-+      :resource_clone => nil,
-+      :_orig_resource_clone => 'true',
-+    }
-+    return send_cluster_request_with_token(
-+      auth_user, params[:cluster], 'update_resource', true, parameters
-+    )
-+  end
-+
-+  def pcs_0_9_142_resource_master(auth_user, params)
-+    parameters = {
-+      :resource_id => params[:resource_id],
-+      :resource_ms => true,
-+      :_orig_resource_ms => 'false',
-+    }
-+    return send_cluster_request_with_token(
-+      auth_user, params[:cluster], 'update_resource', true, parameters
-+    )
-+  end
-+
-+  # There is a bug in pcs-0.9.138 and older in processing the standby and
-+  # unstandby request. JS of that pcsd always sent nodename in "node"
-+  # parameter, which caused pcsd daemon to run the standby command locally with
-+  # param["node"] as node name. This worked fine if the local cluster was
-+  # managed from JS, as pacemaker simply put the requested node into standby.
-+  # However it didn't work for managing non-local clusters, as the command was
-+  # run on the local cluster everytime. Pcsd daemon would send the request to a
-+  # remote cluster if the param["name"] variable was set, and that never
-+  # happened. That however wouldn't work either, as then the required parameter
-+  # "node" wasn't sent in the request causing an exception on the receiving
-+  # node. This is fixed in commit 053f63ca109d9ef9e7f0416e90aab8e140480f5b
-+  #
-+  # In order to be able to put nodes running pcs-0.9.138 into standby, the
-+  # nodename must be sent in "node" param, and the "name" must not be sent.
-+  def pcs_0_9_138_node_standby(auth_user, params)
-+    translated_params = {
-+      'node' => params[:name],
-+    }
-+    return send_cluster_request_with_token(
-+      auth_user, params[:cluster], 'node_standby', true, translated_params
-+    )
-+  end
-+
-+  def pcs_0_9_138_node_unstandby(auth_user, params)
-+    translated_params = {
-+      'node' => params[:name],
-+    }
-+    return send_cluster_request_with_token(
-+      auth_user, params[:cluster], 'node_unstandby', true, translated_params
-+    )
-+  end
-+
-   post '/managec/:cluster/?*' do
-     auth_user = PCSAuth.sessionToAuthUser(session)
-     raw_data = request.env["rack.input"].read
-     if params[:cluster]
-       request = "/" + params[:splat].join("/")
--      code, out = send_cluster_request_with_token(
--        auth_user, params[:cluster], request, true, params, true, raw_data
--      )
- 
-       # backward compatibility layer BEGIN
--      # This code correctly remove constraints on pcs/pcsd version 0.9.137 and older
--      redirection = {
--          "/remove_constraint_remote" => "/resource_cmd/rm_constraint",
--          "/remove_constraint_rule_remote" => "/resource_cmd/rm_constraint_rule"
-+      translate_for_version = {
-+        '/node_standby' => [
-+          [[0, 9, 138], method(:pcs_0_9_138_node_standby)],
-+        ],
-+        '/node_unstandby' => [
-+          [[0, 9, 138], method(:pcs_0_9_138_node_unstandby)],
-+        ],
-       }
--      if code == 404 and redirection.key?(request)
-+      if translate_for_version.key?(request)
-+        target_pcsd_version = [0, 0, 0]
-+        version_code, version_out = send_cluster_request_with_token(
-+          auth_user, params[:cluster], 'get_sw_versions'
-+        )
-+        if version_code == 200
-+          begin
-+            versions = JSON.parse(version_out)
-+            target_pcsd_version = versions['pcs'] if versions['pcs']
-+          rescue JSON::ParserError
-+          end
-+        end
-+        translate_function = nil
-+        translate_for_version[request].each { |pair|
-+          if (target_pcsd_version <=> pair[0]) != 1 # target <= pair
-+            translate_function = pair[1]
-+            break
-+          end
-+        }
-+      end
-+      # backward compatibility layer END
-+
-+      if translate_function
-+        code, out = translate_function.call(auth_user, params)
-+      else
-         code, out = send_cluster_request_with_token(
--          auth_user,
--          params[:cluster],
--          redirection[request],
--          true,
--          params,
--          false,
--          raw_data
-+          auth_user, params[:cluster], request, true, params, true, raw_data
-         )
-       end
--      # bcl END
-+
-+      # backward compatibility layer BEGIN
-+      if code == 404
-+        case request
-+          # supported since pcs-0.9.143 (tree view of resources)
-+          when '/resource_change_group'
-+            code, out =  pcs_0_9_142_resource_change_group(auth_user, params)
-+          # supported since pcs-0.9.143 (tree view of resources)
-+          when '/resource_clone'
-+            code, out = pcs_0_9_142_resource_clone(auth_user, params)
-+          # supported since pcs-0.9.143 (tree view of resources)
-+          when '/resource_unclone'
-+            code, out = pcs_0_9_142_resource_unclone(auth_user, params)
-+          # supported since pcs-0.9.143 (tree view of resources)
-+          when '/resource_master'
-+            code, out = pcs_0_9_142_resource_master(auth_user, params)
-+          else
-+            redirection = {
-+              # constraints removal for pcs-0.9.137 and older
-+              "/remove_constraint_remote" => "/resource_cmd/rm_constraint",
-+              # constraints removal for pcs-0.9.137 and older
-+              "/remove_constraint_rule_remote" => "/resource_cmd/rm_constraint_rule"
-+            }
-+            if redirection.key?(request)
-+              code, out = send_cluster_request_with_token(
-+                auth_user,
-+                params[:cluster],
-+                redirection[request],
-+                true,
-+                params,
-+                false,
-+                raw_data
-+              )
-+            end
-+        end
-+      end
-+      # backward compatibility layer END
-+
-       return code, out
-     end
-   end
-diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
-index 19caf14..6ef49e2 100644
---- a/pcsd/public/js/nodes-ember.js
-+++ b/pcsd/public/js/nodes-ember.js
-@@ -922,6 +922,15 @@ Pcs.ResourceObj = Ember.Object.extend({
-         return "";
-     }
-   }.property("status_val"),
-+  show_group_selector: function() {
-+    var parent = this.get("parent");
-+    return !(
-+      parent &&
-+      parent.is_group &&
-+      parent.get("parent") &&
-+      Pcs.resourcesContainer.get("is_version_1")
-+    );
-+  }.property(),
- 
-   location_constraints: [],
-   ordering_constraints: [],
-@@ -1012,7 +1021,7 @@ Pcs.PrimitiveObj = Pcs.ResourceObj.extend({
-   is_unmanaged: function() {
-     var instance_status_list = this.get("instance_status");
-     if (!instance_status_list) {
--      return false;
-+      return true;
-     }
-     var is_managed = true;
-     $.each(instance_status_list, function(_, instance_status) {
-diff --git a/pcsd/remote.rb b/pcsd/remote.rb
-index 7dc7951..97e63f1 100644
---- a/pcsd/remote.rb
-+++ b/pcsd/remote.rb
-@@ -334,9 +334,8 @@ end
- def node_standby(params, request, auth_user)
-   if params[:name]
-     code, response = send_request_with_token(
--      auth_user, params[:name], 'node_standby', true, {"node"=>params[:name]}
-+      auth_user, params[:name], 'node_standby', true
-     )
--    # data={"node"=>params[:name]} for backward compatibility with older versions of pcs/pcsd
-   else
-     if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
-       return 403, 'Permission denied'
-@@ -350,9 +349,8 @@ end
- def node_unstandby(params, request, auth_user)
-   if params[:name]
-     code, response = send_request_with_token(
--      auth_user, params[:name], 'node_unstandby', true, {"node"=>params[:name]}
-+      auth_user, params[:name], 'node_unstandby', true
-     )
--    # data={"node"=>params[:name]} for backward compatibility with older versions of pcs/pcsd
-   else
-     if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
-       return 403, 'Permission denied'
-diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb
-index 8de1c60..a138f68 100644
---- a/pcsd/views/main.erb
-+++ b/pcsd/views/main.erb
-@@ -246,7 +246,6 @@
-             <td class="bold" nowrap>Current Location:</td>
-             <td id="cur_res_loc" class="reg">{{resource.nodes_running_on_string}}</td>
-           </tr>
--          {{#unless old_pcsd}}
-           {{#unless resource.parent}}
-             <tr>
-               <td class="bold" nowrap>Clone:</td>
-@@ -268,6 +267,7 @@
-             </tr>
-           {{else}}
-             {{#if resource.parent.is_group}}
-+            {{#if resource.show_group_selector}}
-             <tr>
-               <td class="bold" nowrap>Group:</td>
-               <td id="cur_res_loc" class="reg">
-@@ -275,11 +275,10 @@
-               </td>
-             </tr>
-             {{/if}}
--          {{/unless}}
-+            {{/if}}
-           {{/unless}}
-         {{/if}}
-         {{/unless}}
--        {{#unless old_pcsd}}
-         {{#if resource.is_group}}
-         {{#unless resource.parent}}
-           <tr>
-@@ -294,12 +293,14 @@
-               <input type="button" onclick="resource_master(curResource());" value="Create master/slave">
-             </td>
-           </tr>
--          <tr>
--            <td class="bold" nowrap>Group:</td>
--            <td id="cur_res_loc" class="reg">
--              <input type="button" onclick="resource_ungroup(curResource());" value="Ungroup">
--            </td>
--          </tr>
-+          {{#unless old_pcsd}}
-+            <tr>
-+              <td class="bold" nowrap>Group:</td>
-+              <td id="cur_res_loc" class="reg">
-+                <input type="button" onclick="resource_ungroup(curResource());" value="Ungroup">
-+              </td>
-+            </tr>
-+          {{/unless}}
-         {{/unless}}
-         {{/if}}
-         {{#if resource.is_multi_instance}}
-@@ -310,7 +311,6 @@
-             </td>
-           </tr>
-         {{/if}}
--        {{/unless}}
-       </table>
-       {{#unless resource.stonith}}
-         {{location_constraints-table constraints=resource.location_constraints}}
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1247088-01-fix-error-message-in-node-maintenanceunmaintenance-commands.patch b/SOURCES/bz1247088-01-fix-error-message-in-node-maintenanceunmaintenance-commands.patch
deleted file mode 100644
index 4eedb64..0000000
--- a/SOURCES/bz1247088-01-fix-error-message-in-node-maintenanceunmaintenance-commands.patch
+++ /dev/null
@@ -1,102 +0,0 @@
-From d1a31c8b887fc668eff8ef582124a84524a5b760 Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Mon, 22 Aug 2016 15:52:08 +0200
-Subject: [PATCH] fix error message in node maintenance/unmaintenance commands
-
----
- pcs/node.py           | 23 ++++++++++++++---------
- pcs/test/test_node.py | 10 ++++++++--
- 2 files changed, 22 insertions(+), 11 deletions(-)
-
-diff --git a/pcs/node.py b/pcs/node.py
-index be2fb13..ed77d5d 100644
---- a/pcs/node.py
-+++ b/pcs/node.py
-@@ -77,8 +77,8 @@ def node_maintenance(argv, on=True):
-         for node in argv:
-             if node not in cluster_nodes:
-                 utils.err(
--                    "Node '%s' does not appear to exist in configuration" %
--                    argv[0],
-+                    "Node '{0}' does not appear to exist in "
-+                    "configuration".format(node),
-                     False
-                 )
-                 failed_count += 1
-@@ -87,25 +87,30 @@ def node_maintenance(argv, on=True):
-     else:
-         nodes.append("")
- 
-+    if failed_count > 0:
-+        sys.exit(1)
-+
-     for node in nodes:
--        node = ["-N", node] if node else []
-+        node_attr = ["-N", node] if node else []
-         output, retval = utils.run(
-             ["crm_attribute", "-t", "nodes", "-n", "maintenance"] + action +
--            node
-+            node_attr
-         )
-         if retval != 0:
--            node_name = ("node '%s'" % node) if argv else "current node"
-+            node_name = ("node '{0}'".format(node)) if argv else "current node"
-             failed_count += 1
-             if on:
-                 utils.err(
--                    "Unable to put %s to maintenance mode.\n%s" %
--                    (node_name, output),
-+                    "Unable to put {0} to maintenance mode: {1}".format(
-+                        node_name, output
-+                    ),
-                     False
-                 )
-             else:
-                 utils.err(
--                    "Unable to remove %s from maintenance mode.\n%s" %
--                    (node_name, output),
-+                    "Unable to remove {0} from maintenance mode: {1}".format(
-+                        node_name, output
-+                    ),
-                     False
-                 )
-     if failed_count > 0:
-diff --git a/pcs/test/test_node.py b/pcs/test/test_node.py
-index 6f03112..785c711 100644
---- a/pcs/test/test_node.py
-+++ b/pcs/test/test_node.py
-@@ -88,11 +88,14 @@ Node Attributes:
- """
-         ac(expected_out, output)
- 
--        output, returnVal = pcs(temp_cib, "node maintenance nonexistant-node")
-+        output, returnVal = pcs(
-+            temp_cib, "node maintenance nonexistant-node and-another"
-+        )
-         self.assertEqual(returnVal, 1)
-         self.assertEqual(
-             output,
-             "Error: Node 'nonexistant-node' does not appear to exist in configuration\n"
-+            "Error: Node 'and-another' does not appear to exist in configuration\n"
-         )
-         output, _ = pcs(temp_cib, "property")
-         expected_out = """\
-@@ -134,11 +137,14 @@ Cluster Properties:
- """
-         ac(expected_out, output)
- 
--        output, returnVal = pcs(temp_cib, "node unmaintenance nonexistant-node")
-+        output, returnVal = pcs(
-+            temp_cib, "node unmaintenance nonexistant-node and-another"
-+        )
-         self.assertEqual(returnVal, 1)
-         self.assertEqual(
-             output,
-             "Error: Node 'nonexistant-node' does not appear to exist in configuration\n"
-+            "Error: Node 'and-another' does not appear to exist in configuration\n"
-         )
-         output, _ = pcs(temp_cib, "property")
-         expected_out = """\
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1264360-01-web-UI-add-support-for-unmanaged-resources.patch b/SOURCES/bz1264360-01-web-UI-add-support-for-unmanaged-resources.patch
deleted file mode 100644
index 78764a3..0000000
--- a/SOURCES/bz1264360-01-web-UI-add-support-for-unmanaged-resources.patch
+++ /dev/null
@@ -1,320 +0,0 @@
-From cf1c95354a9db8b81712d7b98d0cc55e777e0516 Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Thu, 4 Aug 2016 00:59:11 +0200
-Subject: [PATCH] web UI: add support for unmanaged resources
-
----
- pcsd/cluster_entity.rb        | 13 ++++++++--
- pcsd/pcs.rb                   |  1 +
- pcsd/public/js/nodes-ember.js | 22 +++++++++++++----
- pcsd/public/js/pcsd.js        | 52 ++++++++++++++++++++++++++++++++++++++++
- pcsd/remote.rb                | 55 +++++++++++++++++++++++++++++++++++++++----
- pcsd/views/main.erb           | 26 ++++++++++++++++++++
- 6 files changed, 158 insertions(+), 11 deletions(-)
-
-diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb
-index fa56fe2..7216626 100644
---- a/pcsd/cluster_entity.rb
-+++ b/pcsd/cluster_entity.rb
-@@ -332,7 +332,11 @@ module ClusterEntity
-       :unknown => {
-         :val => 6,
-         :str => 'unknown'
--      }
-+      },
-+      :unmanaged => {
-+        :val => 7,
-+        :str => 'unmanaged'
-+      },
-     }
- 
-     def initialize(status=:unknown)
-@@ -532,8 +536,11 @@ module ClusterEntity
-     def get_status
-       running = 0
-       failed = 0
-+      unmanaged = 0
-       @crm_status.each do |s|
--        if s.active
-+        if !s.managed
-+          unmanaged += 1
-+        elsif s.active
-           running += 1
-         elsif s.failed
-           failed += 1
-@@ -542,6 +549,8 @@ module ClusterEntity
- 
-       if disabled?
-         status = ClusterEntity::ResourceStatus.new(:disabled)
-+      elsif unmanaged >0
-+        status = ClusterEntity::ResourceStatus.new(:unmanaged)
-       elsif running > 0
-         status = ClusterEntity::ResourceStatus.new(:running)
-       elsif failed > 0 or @error_list.length > 0
-diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
-index 1eb9e9e..553a20c 100644
---- a/pcsd/pcs.rb
-+++ b/pcsd/pcs.rb
-@@ -1703,6 +1703,7 @@ def get_node_status(auth_user, cib_dom)
-         'sbd',
-         'ticket_constraints',
-         'moving_resource_in_group',
-+        'unmanaged_resource',
-       ]
-   }
- 
-diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
-index 3d4fe79..c51a341 100644
---- a/pcsd/public/js/nodes-ember.js
-+++ b/pcsd/public/js/nodes-ember.js
-@@ -57,6 +57,9 @@ Pcs = Ember.Application.createWithMixins({
-       this.get("available_features").indexOf("moving_resource_in_group") != -1
-     );
-   }.property("available_features"),
-+  is_supported_unmanaged_resource: function() {
-+    return (this.get("available_features").indexOf("unmanaged_resource") != -1);
-+  }.property("available_features"),
-   is_sbd_running: false,
-   is_sbd_enabled: false,
-   is_sbd_enabled_or_running: function() {
-@@ -869,9 +872,17 @@ Pcs.ResourceObj = Ember.Object.extend({
-     return '<span style="' + this.get('status_style') + '">' + this.get('status') + '</span>';
-   }.property("status_style", "disabled"),
-   status_class: function() {
--    var show = ((Pcs.clusterController.get("show_all_resources"))? "" : "hidden ");
--    return ((this.get("status_val") == get_status_value("ok") || this.status == "disabled") ? show + "default-hidden" : "");
--  }.property("status_val"),
-+    if (
-+      this.get("status_val") == get_status_value("ok") ||
-+      ["disabled", "unmanaged"].indexOf(this.get("status")) != -1
-+    ) {
-+      return (
-+        Pcs.clusterController.get("show_all_resources") ? "" : "hidden "
-+        ) + "default-hidden";
-+    } else {
-+      return "";
-+    }
-+  }.property("status_val", "status"),
-   status_class_fence: function() {
-     var show = ((Pcs.clusterController.get("show_all_fence"))? "" : "hidden ");
-     return ((this.get("status_val") == get_status_value("ok")) ? show + "default-hidden" : "");
-@@ -1681,8 +1692,9 @@ Pcs.Cluster = Ember.Object.extend({
-     var num = 0;
-     $.each(this.get(type), function(key, value) {
-       if (value.get("status_val") < get_status_value("ok") &&
--        value.status != "disabled" && value.status != "standby" &&
--        value.status != "maintenance"
-+        [
-+          "unmanaged", "disabled", "standby", "maintenance"
-+        ].indexOf(value.status) == -1
-       ) {
-         num++;
-       }
-diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
-index 82187ef..56219d4 100644
---- a/pcsd/public/js/pcsd.js
-+++ b/pcsd/public/js/pcsd.js
-@@ -1333,6 +1333,9 @@ function remove_resource(ids, force) {
-           message += "\n\n" + xhr.responseText.replace(
-             "--force", "'Enforce removal'"
-           );
-+          alert(message);
-+          $("#verify_remove_submit_btn").button("option", "disabled", false);
-+          return;
-         }
-       }
-       alert(message);
-@@ -1957,6 +1960,7 @@ function get_status_value(status) {
-     maintenance: 2,
-     "partially running": 2,
-     disabled: 3,
-+    unmanaged: 3,
-     unknown: 4,
-     ok: 5,
-     running: 5,
-@@ -2987,3 +2991,51 @@ function sbd_status_dialog() {
-     buttons: buttonsOpts
-   });
- }
-+
-+function unmanage_resource(resource_id) {
-+  if (!resource_id) {
-+    return;
-+  }
-+  fade_in_out("#resource_unmanage_link");
-+  ajax_wrapper({
-+    type: 'POST',
-+    url: get_cluster_remote_url() + "unmanage_resource",
-+    data: {
-+      resource_list_json: JSON.stringify([resource_id]),
-+    },
-+    timeout: pcs_timeout,
-+    complete: function() {
-+      Pcs.update();
-+    },
-+    error: function (xhr, status, error) {
-+      alert(
-+        `Unable to unmanage '${resource_id}': ` +
-+        ajax_simple_error(xhr, status, error)
-+      );
-+    },
-+  });
-+}
-+
-+function manage_resource(resource_id) {
-+  if (!resource_id) {
-+    return;
-+  }
-+  fade_in_out("#resource_manage_link");
-+  ajax_wrapper({
-+    type: 'POST',
-+    url: get_cluster_remote_url() + "manage_resource",
-+    data: {
-+      resource_list_json: JSON.stringify([resource_id]),
-+    },
-+    timeout: pcs_timeout,
-+    complete: function() {
-+      Pcs.update();
-+    },
-+    error: function (xhr, status, error) {
-+      alert(
-+        `Unable to manage '${resource_id}': ` +
-+        ajax_simple_error(xhr, status, error)
-+      );
-+    }
-+  });
-+}
-diff --git a/pcsd/remote.rb b/pcsd/remote.rb
-index 4844adf..ebf425c 100644
---- a/pcsd/remote.rb
-+++ b/pcsd/remote.rb
-@@ -116,7 +116,9 @@ def remote(params, request, auth_user)
-       :set_resource_utilization => method(:set_resource_utilization),
-       :set_node_utilization => method(:set_node_utilization),
-       :get_resource_agent_metadata => method(:get_resource_agent_metadata),
--      :get_fence_agent_metadata => method(:get_fence_agent_metadata)
-+      :get_fence_agent_metadata => method(:get_fence_agent_metadata),
-+      :manage_resource => method(:manage_resource),
-+      :unmanage_resource => method(:unmanage_resource),
-   }
- 
-   command = params[:command].to_sym
-@@ -1575,10 +1577,10 @@ def remove_resource(params, request, auth_user)
-       end
-       cmd = [PCS, '-f', tmp_file.path, 'resource', 'disable']
-       resource_list.each { |resource|
--        _, err, retval = run_cmd(user, *(cmd + [resource]))
-+        out, err, retval = run_cmd(user, *(cmd + [resource]))
-         if retval != 0
-           unless (
--            err.join('').index('unable to find a resource') != -1 and
-+            (out + err).join('').include?(' does not exist.') and
-             no_error_if_not_exists
-           )
-             errors += "Unable to stop resource '#{resource}': #{err.join('')}"
-@@ -1613,7 +1615,10 @@ def remove_resource(params, request, auth_user)
-     end
-     out, err, retval = run_cmd(auth_user, *cmd)
-     if retval != 0
--      unless out.index(' does not exist.') != -1 and no_error_if_not_exists
-+      unless (
-+        (out + err).join('').include?(' does not exist.') and
-+        no_error_if_not_exists
-+      )
-         errors += err.join(' ').strip + "\n"
-       end
-     end
-@@ -2630,3 +2635,45 @@ def qdevice_client_start(param, request, auth_user)
-     return [400, msg]
-   end
- end
-+
-+def manage_resource(param, request, auth_user)
-+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
-+    return 403, 'Permission denied'
-+  end
-+  unless param[:resource_list_json]
-+    return [400, "Required parameter 'resource_list_json' is missing."]
-+  end
-+  begin
-+    resource_list = JSON.parse(param[:resource_list_json])
-+    _, err, retval = run_cmd(
-+      auth_user, PCS, 'resource', 'manage', *resource_list
-+    )
-+    if retval != 0
-+      return [400, err.join('')]
-+    end
-+    return [200, '']
-+  rescue JSON::ParserError
-+    return [400, 'Invalid input data format']
-+  end
-+end
-+
-+def unmanage_resource(param, request, auth_user)
-+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
-+    return 403, 'Permission denied'
-+  end
-+  unless param[:resource_list_json]
-+    return [400, "Required parameter 'resource_list_json' is missing."]
-+  end
-+  begin
-+    resource_list = JSON.parse(param[:resource_list_json])
-+    _, err, retval = run_cmd(
-+      auth_user, PCS, 'resource', 'unmanage', *resource_list
-+    )
-+    if retval != 0
-+      return [400, err.join('')]
-+    end
-+    return [200, '']
-+  rescue JSON::ParserError
-+    return [400, 'Invalid input data format']
-+  end
-+end
-diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb
-index 1b21f92..64fe560 100644
---- a/pcsd/views/main.erb
-+++ b/pcsd/views/main.erb
-@@ -160,6 +160,7 @@
-       </table>
-     </div>
-     <div id="node_options_buttons">
-+    <div>
-     {{#if resource.stonith}}
-       <div class="xdark sprites" style="float: left"></div>
-       <div id="stonith_delete_link" class="link" onclick="verify_remove_fence_devices(curStonith());">Remove</div>
-@@ -174,7 +175,32 @@
-       <div id="resource_cleanup_link" class="link" onclick="cleanup_resource();">Cleanup</div>
-       <div class="xdark sprites" style="float: left"></div>
-       <div id="resource_delete_link" class="link" onclick="verify_remove_resources(curResource());">Remove</div>
-+      </div>
-+      <div>
-+      {{#if Pcs.is_supported_unmanaged_resource}}
-+        <div>
-+        <div class="checkdark sprites" style="float: left"></div>
-+        <div
-+          id="resource_manage_link"
-+          class="link"
-+          onclick="manage_resource(curResource());"
-+        >
-+          Manage
-+        </div>
-+        </div>
-+        <div>
-+        <div class="cancel sprites" style="float: left"></div>
-+        <div
-+          id="resource_unmanage_link"
-+          class="link"
-+          onclick="unmanage_resource(curResource());"
-+        >
-+          Unmanage
-+        </div>
-+        </div>
-+      {{/if}}
-     {{/if}}
-+    </div>
-       <!--
-       <div class="move sprites" style="float: left"></div>
-       <div id="resource_move_link" class="link">Move</div>
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1264360-02-web-ui-change-way-of-displaying-status-of-unmanaged-primitive-resources.patch b/SOURCES/bz1264360-02-web-ui-change-way-of-displaying-status-of-unmanaged-primitive-resources.patch
deleted file mode 100644
index 08e123f..0000000
--- a/SOURCES/bz1264360-02-web-ui-change-way-of-displaying-status-of-unmanaged-primitive-resources.patch
+++ /dev/null
@@ -1,185 +0,0 @@
-From 563a2c51877b9cf2a5ae419fc6d4eeb680eed04f Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Wed, 24 Aug 2016 10:04:01 +0200
-Subject: [PATCH] web UI: change way of displaying status of unmanaged
- primitive resources
-
----
- pcsd/cluster_entity.rb        | 11 +----------
- pcsd/public/js/nodes-ember.js | 27 ++++++++++++++++++++++-----
- pcsd/public/js/pcsd.js        | 10 ++++++----
- pcsd/views/manage.erb         | 15 ++++++++++-----
- 4 files changed, 39 insertions(+), 24 deletions(-)
-
-diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb
-index 7216626..4ffcd4b 100644
---- a/pcsd/cluster_entity.rb
-+++ b/pcsd/cluster_entity.rb
-@@ -333,10 +333,6 @@ module ClusterEntity
-         :val => 6,
-         :str => 'unknown'
-       },
--      :unmanaged => {
--        :val => 7,
--        :str => 'unmanaged'
--      },
-     }
- 
-     def initialize(status=:unknown)
-@@ -536,11 +532,8 @@ module ClusterEntity
-     def get_status
-       running = 0
-       failed = 0
--      unmanaged = 0
-       @crm_status.each do |s|
--        if !s.managed
--          unmanaged += 1
--        elsif s.active
-+        if s.active
-           running += 1
-         elsif s.failed
-           failed += 1
-@@ -549,8 +542,6 @@ module ClusterEntity
- 
-       if disabled?
-         status = ClusterEntity::ResourceStatus.new(:disabled)
--      elsif unmanaged >0
--        status = ClusterEntity::ResourceStatus.new(:unmanaged)
-       elsif running > 0
-         status = ClusterEntity::ResourceStatus.new(:running)
-       elsif failed > 0 or @error_list.length > 0
-diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
-index f176c39..c650fe6 100644
---- a/pcsd/public/js/nodes-ember.js
-+++ b/pcsd/public/js/nodes-ember.js
-@@ -851,7 +851,9 @@ Pcs.ResourceObj = Ember.Object.extend({
-   }.property("class_type"),
-   res_type: Ember.computed.alias('resource_type'),
-   status_icon: function() {
--    var icon_class = get_status_icon_class(this.get("status_val"));
-+    var icon_class = get_status_icon_class(
-+      this.get("status_val"), this.get("is_unmanaged")
-+    );
-     return "<div style=\"float:left;margin-right:6px;height:16px;\" class=\"" + icon_class + " sprites\"></div>";
-   }.property("status_val"),
-   status_val: function() {
-@@ -867,19 +869,23 @@ Pcs.ResourceObj = Ember.Object.extend({
-     }
-   }.property('status', 'error_list.@each.message', 'warning_list.@each.message'),
-   status_color: function() {
--    return get_status_color(this.get("status_val"));
-+    return get_status_color(this.get("status_val"), this.get("is_unmanaged"));
-   }.property("status_val"),
-   status_style: function() {
--    var color = get_status_color(this.get("status_val"));
-+    var color = get_status_color(
-+      this.get("status_val"), this.get("is_unmanaged")
-+    );
-     return "color: " + color + ((color != "green")? "; font-weight: bold;" : "");
-   }.property("status_val"),
-   show_status: function() {
--    return '<span style="' + this.get('status_style') + '">' + this.get('status') + '</span>';
-+    return '<span style="' + this.get('status_style') + '">'
-+      + this.get('status') + (this.get("is_unmanaged") ? " (unmanaged)" : "")
-+      + '</span>';
-   }.property("status_style", "disabled"),
-   status_class: function() {
-     if (
-       this.get("status_val") == get_status_value("ok") ||
--      ["disabled", "unmanaged"].indexOf(this.get("status")) != -1
-+      this.get("status") == "disabled"
-     ) {
-       return (
-         Pcs.clusterController.get("show_all_resources") ? "" : "hidden "
-@@ -1003,6 +1009,17 @@ Pcs.PrimitiveObj = Pcs.ResourceObj.extend({
-   instance_status: [],
-   operations: [],
-   utilization: [],
-+  is_unmanaged: function() {
-+    var instance_status_list = this.get("instance_status");
-+    if (!instance_status_list) {
-+      return false;
-+    }
-+    var is_managed = true;
-+    $.each(instance_status_list, function(_, instance_status) {
-+      is_managed = is_managed && instance_status.get("managed");
-+    });
-+    return !is_managed;
-+  }.property("instance_status.@each.managed"),
-   resource_type: function() {
-     var agent = this.get("agentname");
-     if (agent) {
-diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
-index 1060bd3..67a0bdb 100644
---- a/pcsd/public/js/pcsd.js
-+++ b/pcsd/public/js/pcsd.js
-@@ -1977,7 +1977,8 @@ function status_comparator(a,b) {
-   return valA - valB;
- }
- 
--function get_status_icon_class(status_val) {
-+function get_status_icon_class(status_val, is_unmanaged) {
-+  var is_unmanaged = typeof is_unmanaged !== 'undefined' ? is_unmanaged : false;
-   switch (status_val) {
-     case get_status_value("error"):
-       return "error";
-@@ -1985,15 +1986,16 @@ function get_status_icon_class(status_val) {
-     case get_status_value("warning"):
-       return "warning";
-     case get_status_value("ok"):
--      return "check";
-+      return is_unmanaged ? "warning" : "check";
-     default:
-       return "x";
-   }
- }
- 
--function get_status_color(status_val) {
-+function get_status_color(status_val, is_unmanaged) {
-+  var is_unmanaged = typeof is_unmanaged !== 'undefined' ? is_unmanaged : false;
-   if (status_val == get_status_value("ok")) {
--    return "green";
-+    return is_unmanaged? "orange" : "green";
-   }
-   else if (status_val == get_status_value("warning") || status_val == get_status_value("unknown") || status_val == get_status_value('disabled')) {
-     return "orange";
-diff --git a/pcsd/views/manage.erb b/pcsd/views/manage.erb
-index 885b327..39ab41f 100644
---- a/pcsd/views/manage.erb
-+++ b/pcsd/views/manage.erb
-@@ -113,13 +113,18 @@
-               <td>
-                 <table class="datatable">
-                   <tr>
--                    <th style="width: 150px;">RESOURCE</th>
--                    <th style="width: 100px;">STATUS</th>
-+                    <th style="width: 170px;">RESOURCE</th>
-+                    <th style="width: 150px;">STATUS</th>
-                   </tr>
-                   {{#each r in Pcs.clusterController.cur_cluster.resource_list}}
-                   <tr {{bind-attr title=r.tooltip}} {{bind-attr class=r.status_class}}>
-                     <td><a {{bind-attr href=r.url_link}}>{{r.id}}</a></td>
--                    <td {{bind-attr style=r.status_style}}>{{{r.status_icon}}}{{r.status}}</td>
-+                    <td {{bind-attr style=r.status_style}}>
-+                      {{{r.status_icon}}}{{r.status}}
-+                      {{#if r.is_unmanaged}}
-+                        (unmanaged)
-+                      {{/if}}
-+                    </td>
-                   </tr>
-                   {{else}}
-                   <tr>
-@@ -144,8 +149,8 @@
-               <td>
-                 <table class="datatable">
-                   <tr>
--                    <th style="width: 150px;">FENCE-DEVICE</th>
--                    <th style="width: 100px;">STATUS</th>
-+                    <th style="width: 170px;">FENCE-DEVICE</th>
-+                    <th style="width: 150px;">STATUS</th>
-                   </tr>
-                   {{#each f in Pcs.clusterController.cur_cluster.fence_list}}
-                   <tr {{bind-attr title=f.tooltip}} {{bind-attr class=f.status_class_fence}}>
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1269242-01-fix-displaying-cluster-config-when-cib-is-provided-a.patch b/SOURCES/bz1269242-01-fix-displaying-cluster-config-when-cib-is-provided-a.patch
deleted file mode 100644
index c583575..0000000
--- a/SOURCES/bz1269242-01-fix-displaying-cluster-config-when-cib-is-provided-a.patch
+++ /dev/null
@@ -1,87 +0,0 @@
-From 8696f5e4f072ac88a3e20b1b376ea8de823f7aa7 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Fri, 8 Jul 2016 12:20:59 +0200
-Subject: [PATCH] fix displaying cluster config when cib is provided as a file
-
----
- pcs/config.py | 11 +++++++++--
- pcs/status.py |  8 +++++++-
- pcs/utils.py  | 13 +++++++------
- 3 files changed, 23 insertions(+), 9 deletions(-)
-
-diff --git a/pcs/config.py b/pcs/config.py
-index 4659c5b..3d86b39 100644
---- a/pcs/config.py
-+++ b/pcs/config.py
-@@ -94,7 +94,14 @@ def config_show(argv):
-     status.nodes_status(["config"])
-     print()
-     config_show_cib()
--    cluster.cluster_uidgid([], True)
-+    if (
-+        utils.is_rhel6()
-+        or
-+        (not utils.usefile and "--corosync_conf" not in utils.pcs_options)
-+    ):
-+        # with corosync 1 and cman, uid gid is part of cluster.conf file
-+        # with corosync 2, uid gid is in a separate directory
-+        cluster.cluster_uidgid([], True)
-     if "--corosync_conf" in utils.pcs_options or not utils.is_rhel6():
-         print()
-         print("Quorum:")
-@@ -113,8 +120,8 @@ def config_show_cib():
-     print("Stonith Devices:")
-     resource.resource_show([], True)
-     print("Fencing Levels:")
--    print()
-     stonith.stonith_level_show()
-+    print()
- 
-     lib = utils.get_library_wrapper()
-     constraint.location_show([])
-diff --git a/pcs/status.py b/pcs/status.py
-index 0e5e0e7..e1f367f 100644
---- a/pcs/status.py
-+++ b/pcs/status.py
-@@ -66,7 +66,13 @@ def full_status():
-     if utils.stonithCheck():
-         print("WARNING: no stonith devices and stonith-enabled is not false")
- 
--    if not utils.is_rhel6() and utils.corosyncPacemakerNodeCheck():
-+    if (
-+        not utils.usefile
-+        and
-+        not utils.is_rhel6()
-+        and
-+        utils.corosyncPacemakerNodeCheck()
-+    ):
-         print("WARNING: corosync and pacemaker node names do not match (IPs used in setup?)")
- 
-     print(output)
-diff --git a/pcs/utils.py b/pcs/utils.py
-index 171fbdd..01db081 100644
---- a/pcs/utils.py
-+++ b/pcs/utils.py
-@@ -1808,12 +1808,13 @@ def stonithCheck():
-         if p.attrib["class"] == "stonith":
-             return False
- 
--    # check if SBD daemon is running
--    try:
--        if is_service_running(cmd_runner(), "sbd"):
--            return False
--    except LibraryError:
--        pass
-+    if not usefile:
-+        # check if SBD daemon is running
-+        try:
-+            if is_service_running(cmd_runner(), "sbd"):
-+                return False
-+        except LibraryError:
-+            pass
- 
-     return True
- 
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1281364-01-gui-add-constraint-colocation-set-support.patch b/SOURCES/bz1281364-01-gui-add-constraint-colocation-set-support.patch
deleted file mode 100644
index cf9130e..0000000
--- a/SOURCES/bz1281364-01-gui-add-constraint-colocation-set-support.patch
+++ /dev/null
@@ -1,190 +0,0 @@
-From aeb87c63c2f37bdc241b2c9add7cf0e9be9d7789 Mon Sep 17 00:00:00 2001
-From: Marek Grac <mgrac@redhat.com>
-Date: Thu, 7 Jul 2016 14:05:14 +0200
-Subject: [PATCH] gui: add constraint colocation set support
-
----
- pcsd/pcs.rb                   | 19 ++++++++++++++++++-
- pcsd/public/js/nodes-ember.js |  9 +++++++++
- pcsd/public/js/pcsd.js        |  1 +
- pcsd/remote.rb                |  5 +++++
- pcsd/views/main.erb           | 44 +++++++++++++++++++++++++++++++++++++++++++
- 5 files changed, 77 insertions(+), 1 deletion(-)
-
-diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
-index 7c25e10..57082be 100644
---- a/pcsd/pcs.rb
-+++ b/pcsd/pcs.rb
-@@ -131,6 +131,19 @@ def add_order_set_constraint(
-   return retval, stderr.join(' ')
- end
- 
-+def add_colocation_set_constraint(
-+  auth_user, resource_set_list, force=false, autocorrect=true
-+)
-+  command = [PCS, "constraint", "colocation"]
-+  resource_set_list.each { |resource_set|
-+    command << "set"
-+    command.concat(resource_set)
-+  }
-+  command << '--force' if force
-+  command << '--autocorrect' if autocorrect
-+  stdout, stderr, retval = run_cmd(auth_user, *command)
-+  return retval, stderr.join(' ')
-+end
- 
- def add_ticket_constraint(
-     auth_user, ticket, resource_id, role, loss_policy,
-@@ -1681,7 +1694,11 @@ def get_node_status(auth_user, cib_dom)
-       :node_attr => node_attrs_to_v2(get_node_attributes(auth_user, cib_dom)),
-       :nodes_utilization => get_nodes_utilization(cib_dom),
-       :known_nodes => [],
--      :available_features => ['sbd', 'ticket_constraints']
-+      :available_features => [
-+        'constraint_colocation_set',
-+        'sbd',
-+        'ticket_constraints',
-+      ]
-   }
- 
-   nodes = get_nodes_status()
-diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
-index bf1bb92..cb62806 100644
---- a/pcsd/public/js/nodes-ember.js
-+++ b/pcsd/public/js/nodes-ember.js
-@@ -47,6 +47,11 @@ Pcs = Ember.Application.createWithMixins({
-       this.get("available_features").indexOf("ticket_constraints") != -1
-     );
-   }.property("available_features"),
-+  is_supported_constraint_colocation_set: function() {
-+    return (
-+      this.get("available_features").indexOf("constraint_colocation_set") != -1
-+    );
-+  }.property("available_features"),
-   is_sbd_running: false,
-   is_sbd_enabled: false,
-   is_sbd_enabled_or_running: function() {
-@@ -767,6 +772,7 @@ Pcs.ResourceObj = Ember.Object.extend({
-   ordering_constraints: [],
-   ordering_set_constraints: [],
-   colocation_constraints: [],
-+  colocation_set_constraints: [],
- 
-   get_map: function() {
-     var self = this;
-@@ -2381,6 +2387,7 @@ function constraint_resort(constraints){
-       ordering_constraints: {},
-       ordering_set_constraints: {},
-       colocation_constraints: {},
-+      colocation_set_constraints: {},
-     };
-   }
- 
-@@ -2391,6 +2398,7 @@ function constraint_resort(constraints){
- 
-   var colocations = constraint_resort_part(constraints.rsc_colocation, {
-     plain: constraint_colocation_create_resource_keyed_map,
-+    with_sets: constraint_set_create_resource_keyed_map,
-   });
- 
-   var locations = constraint_resort_part(constraints.rsc_location, {
-@@ -2409,5 +2417,6 @@ function constraint_resort(constraints){
-     ticket_constraints: tickets.plain,
-     ticket_set_constraints: tickets.with_sets,
-     colocation_constraints: colocations.plain,
-+    colocation_set_constraints: colocations.with_sets,
-   };
- }
-diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
-index 41c481e..6c88888 100644
---- a/pcsd/public/js/pcsd.js
-+++ b/pcsd/public/js/pcsd.js
-@@ -2046,6 +2046,7 @@ function auto_show_hide_constraints() {
-     "ordering_constraints",
-     "ordering_set_constraints",
-     "colocation_constraints",
-+    "colocation_set_constraints",
-     "ticket_constraints",
-     "ticket_set_constraints",
-     "meta_attributes",
-diff --git a/pcsd/remote.rb b/pcsd/remote.rb
-index b1e00fa..75c9465 100644
---- a/pcsd/remote.rb
-+++ b/pcsd/remote.rb
-@@ -1821,6 +1821,11 @@ def add_constraint_set_remote(params, request, auth_user)
-       auth_user,
-       params["resources"].values, params["force"], !params['disable_autocorrect']
-     )
-+  when "col"
-+    retval, error = add_colocation_set_constraint(
-+      auth_user,
-+      params["resources"].values, params["force"], !params['disable_autocorrect']
-+    )
-   when "ticket"
-     unless params["options"]["ticket"]
-       return [400, "Error adding constraint ticket: option ticket missing"]
-diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb
-index 5461515..52c1900 100644
---- a/pcsd/views/main.erb
-+++ b/pcsd/views/main.erb
-@@ -291,6 +291,9 @@
-         {{ordering_constraints-table constraints=resource.ordering_constraints resource_id=resource._id}}
-         {{ordering_set_constraints-table constraints=resource.ordering_set_constraints}}
-         {{colocation_constraints-table constraints=resource.colocation_constraints}}
-+        {{#if Pcs.is_supported_constraint_colocation_set}}
-+          {{colocation_set_constraints-table constraints=resource.colocation_set_constraints}}
-+        {{/if}}
-         {{#if Pcs.is_ticket_constraints_supported}}
-           {{ticket_constraints-table constraints=resource.ticket_constraints resource_id=resource._id}}
-           {{ticket_set_constraints-table constraints=resource.ticket_set_constraints}}
-@@ -696,6 +699,47 @@ Use the 'Add' button to submit the form.">
- 	      </table>
-   </script>
- 
-+  <script type="text/x-handlebars" data-template-name="components/colocation_set_constraints-table">
-+    <table style="clear:left;float:left;">
-+          <tr><td style="display: block;" onclick="show_hide_constraints(this)" class="datatable_header hover-pointer" id="colocation_set_constraints"><span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span>Resource Colocation Set Preferences ({{#if constraints.length}}{{constraints.length}}{{else}}0{{/if}})</td></tr>
-+          <tr><td>
-+            <div id="locationdep">
-+              <table class="datatable">
-+                <tr>
-+                  <th>Preference Name/Set of Resources</th>
-+                  <th style="text-align: center;">Remove</th>
-+                </tr>
-+                {{#each cons in constraints}}
-+                <tr>
-+                  <td>{{cons.id}}</td>
-+                  <td {{bind-attr constraint_id="cons.id"}} style="text-align:center;">
-+                    <a onclick="return remove_constraint_action(this);" href="#" class="remove">X</a>
-+                  </td>
-+                </tr>
-+                {{#each set in cons.sets}}
-+                <tr>
-+                  <td style="padding-left:2em;">Set:{{#each rsc in set.resources}} {{rsc}}{{/each}}</td>
-+                  <td></td>
-+                </tr>
-+                {{/each}}
-+                {{else}}
-+                <tr><td style="color: gray;">NONE</td><td></td></tr>
-+                {{/each}}
-+                <tr id="new_res_col_set" title="Enter the resources you want to be in one set into the 'Set' field separated by space.
-+Use the 'New Set' button to create more sets.
-+Use the 'Add' button to submit the form.">
-+                  <td>Set: <input type="text" name="resource_ids[]"></td>
-+                  <td style="vertical-align: bottom;">
-+                    <button type="button" onclick="new_constraint_set_row('#new_res_col_set');" name="new-row">New Set</button>
-+                    <button type="button" onclick="add_constraint_set('#new_res_col_set', 'col', false);" name="add">Add</button>
-+                  </td>
-+                </tr>
-+              </table>
-+            </div>
-+          </td></tr>
-+        </table>
-+  </script>
-+
-   <script type="text/x-handlebars" data-template-name="components/meta_attributes-table">
-     <table style="clear:left;float:left">
- 		<tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer" id="meta_attributes"><span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span>Resource Meta Attributes ({{#if resource.meta_attr.length}}{{resource.meta_attr.length}}{{else}}0{{/if}})</td></tr>
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1281391-01-web-UI-add-possibility-to-change-order-of-resources-.patch b/SOURCES/bz1281391-01-web-UI-add-possibility-to-change-order-of-resources-.patch
deleted file mode 100644
index 8613126..0000000
--- a/SOURCES/bz1281391-01-web-UI-add-possibility-to-change-order-of-resources-.patch
+++ /dev/null
@@ -1,600 +0,0 @@
-From 0a96fde9b1d691268948091442c2f0075e81ab95 Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Thu, 28 Jul 2016 15:21:18 +0200
-Subject: [PATCH] web UI: add possibility to change order of resources in group
-
----
- pcsd/pcs.rb                   |   1 +
- pcsd/public/css/style.css     |  10 +++
- pcsd/public/js/nodes-ember.js | 167 ++++++++++++++++++++++++++++++++++++++----
- pcsd/public/js/pcsd.js        | 117 +++++++++++++++++------------
- pcsd/remote.rb                |  47 ++++++++----
- pcsd/views/_dialogs.erb       |  21 ++++++
- pcsd/views/_resource.erb      |   6 --
- pcsd/views/main.erb           |  51 +++++++++++--
- 8 files changed, 334 insertions(+), 86 deletions(-)
-
-diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
-index ad54a75..1eb9e9e 100644
---- a/pcsd/pcs.rb
-+++ b/pcsd/pcs.rb
-@@ -1702,6 +1702,7 @@ def get_node_status(auth_user, cib_dom)
-         'constraint_colocation_set',
-         'sbd',
-         'ticket_constraints',
-+        'moving_resource_in_group',
-       ]
-   }
- 
-diff --git a/pcsd/public/css/style.css b/pcsd/public/css/style.css
-index d41b164..0d744d5 100644
---- a/pcsd/public/css/style.css
-+++ b/pcsd/public/css/style.css
-@@ -848,3 +848,13 @@ table.args-table td.reg {
- .constraint-ticket-add-attribute {
-   vertical-align: top;
- }
-+
-+.cursor-move {
-+  cursor: move;
-+}
-+
-+.sortable-table td {
-+  height: 1.5em;
-+  line-height: 1.2em;
-+  background: black;
-+}
-diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
-index efc0192..3d4fe79 100644
---- a/pcsd/public/js/nodes-ember.js
-+++ b/pcsd/public/js/nodes-ember.js
-@@ -52,6 +52,11 @@ Pcs = Ember.Application.createWithMixins({
-       this.get("available_features").indexOf("constraint_colocation_set") != -1
-     );
-   }.property("available_features"),
-+  is_supported_moving_resource_in_group: function() {
-+    return (
-+      this.get("available_features").indexOf("moving_resource_in_group") != -1
-+    );
-+  }.property("available_features"),
-   is_sbd_running: false,
-   is_sbd_enabled: false,
-   is_sbd_enabled_or_running: function() {
-@@ -245,6 +250,154 @@ Pcs = Ember.Application.createWithMixins({
-   }
- });
- 
-+Pcs.GroupSelectorComponent = Ember.Component.extend({
-+  resource_id: null,
-+  resource: function() {
-+    var id = this.get("resource_id");
-+    if (id) {
-+      var resource = Pcs.resourcesContainer.get_resource_by_id(id);
-+      if (resource) {
-+        return resource;
-+      }
-+    }
-+    return null;
-+  }.property("resource_id"),
-+  resource_change: function() {
-+    this._refresh_fn();
-+    this._update_resource_select_content();
-+    this._update_resource_select_value();
-+  }.observes("resource", "resource_id"),
-+  group_list: [],
-+  group_select_content: function() {
-+    var list = [];
-+    $.each(this.get("group_list"), function(_, group) {
-+      list.push({
-+        name: group,
-+        value: group
-+      });
-+    });
-+    return list;
-+  }.property("group_list"),
-+  group_select_value: null,
-+  group: function() {
-+    var id = this.get("group_select_value");
-+    if (id) {
-+      var group = Pcs.resourcesContainer.get_resource_by_id(id);
-+      if (group) {
-+        return group;
-+      }
-+    }
-+    return null;
-+  }.property("group_select_value"),
-+  position_select_content: [
-+    {
-+      name: "before",
-+      value: "before"
-+    },
-+    {
-+      name: "after",
-+      value: "after"
-+    }
-+  ],
-+  position_select_value: null,
-+  position_select_value_changed: function() {
-+  }.observes("position_select_value"),
-+  resource_select_content: [],
-+  resource_select_value: null,
-+  group_select_value_changed: function () {
-+    this._update_resource_select_content();
-+    this._update_resource_select_value();
-+  }.observes("group_select_value"),
-+  actions: {
-+    refresh: function() {
-+      this.set("group_list", Pcs.resourcesContainer.get("group_list"));
-+      this._refresh_fn();
-+      this._update_resource_select_content();
-+      this._update_resource_select_value();
-+    }
-+  },
-+  _refresh_fn: function() {
-+    var id = this.get("resource_id");
-+    if (id) {
-+      var resource = Pcs.resourcesContainer.get_resource_by_id(id);
-+      if (resource) {
-+        var parent = resource.get("parent");
-+        if (parent && parent.get("is_group")) {
-+          this.set("group_select_value", parent.get("id"));
-+          return;
-+        }
-+      }
-+    }
-+    this.set("group_select_value", null);
-+  },
-+  _update_resource_select_content: function() {
-+    var self = this;
-+    var group = self.get("group");
-+    if (!group) {
-+      self.set("resource_select_content", []);
-+      return;
-+    }
-+    var list = [];
-+    var resource_id;
-+    $.each(group.get("members"), function(_, resource) {
-+      resource_id = resource.get("id");
-+      if (resource_id != self.get("resource_id")) {
-+        list.push({
-+          name: resource_id,
-+          value: resource_id
-+        });
-+      }
-+    });
-+    self.set("resource_select_content", list);
-+  },
-+  _update_resource_select_value: function() {
-+    var self = this;
-+    var group = self.get("group");
-+    var resource = self.get("resource");
-+    if (!group) {
-+      self.set("resource_select_value", null);
-+      return;
-+    }
-+    var resource_list = group.get("members");
-+    if (
-+      !resource ||
-+      !resource.get("parent") ||
-+      resource.get("parent").get("id") != group.get("id")
-+    ) {
-+      self.set("position_select_value", "after");
-+      self.set("resource_select_value", resource_list.slice(-1)[0].get("id"));
-+    } else {
-+      var index = resource_list.findIndex(function(item) {
-+        return item.get("id") == resource.get("id");
-+      });
-+      if (index == 0) {
-+        self.set("position_select_value", "before");
-+        self.set(
-+          "resource_select_value",
-+          (resource_list[1]) ? resource_list[1].get("id") : null // second
-+        );
-+      } else if (index == -1) {
-+        self.set("position_select_value", "after");
-+        self.set("resource_select_value", resource_list.slice(-1)[0].get("id"));
-+      } else {
-+        self.set("position_select_value", "after");
-+        self.set("resource_select_value", resource_list[index-1].get("id"));
-+      }
-+    }
-+  },
-+  group_input_name: "group_id",
-+  classNames: "group-selector",
-+  init: function() {
-+    this._super();
-+    if (this.get("resource_id")) {
-+      this.set("group_list", Pcs.resourcesContainer.get("group_list"));
-+    }
-+    this._refresh_fn();
-+    this._update_resource_select_content();
-+    this._update_resource_select_value();
-+  }
-+});
-+
- Pcs.ValueSelectorComponent = Ember.Component.extend({
-   tagName: 'select',
-   attributeBindings: ['name'],
-@@ -682,20 +835,6 @@ Pcs.ResourceObj = Ember.Object.extend({
-     }
-     return null;
-   }.property('parent'),
--  group_selector: function() {
--    var self = this;
--    var cur_group = self.get('get_group_id');
--    var html = '<select>\n<option value="">None</option>\n';
--    $.each(self.get('group_list'), function(_, group) {
--      html += '<option value="' + group + '"';
--      if (cur_group === group) {
--        html += 'selected';
--      }
--      html += '>' + group + '</option>\n';
--    });
--    html += '</select><input type="button" value="Change group" onclick="resource_change_group(curResource(), $(this).prev().prop(\'value\'));">';
--    return html;
--  }.property('group_list', 'get_group_id'),
-   status: "unknown",
-   class_type: null, // property to determine type of the resource
-   resource_type: function() { // this property is just for displaying resource type in GUI
-diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
-index a646bed..82187ef 100644
---- a/pcsd/public/js/pcsd.js
-+++ b/pcsd/public/js/pcsd.js
-@@ -96,50 +96,77 @@ function select_menu(menu, item, initial) {
- }
- 
- function create_group() {
--  var num_nodes = 0;
--  var node_names = "";
--  $("#resource_list :checked").parent().parent().each(function (index,element) {
--    if (element.getAttribute("nodeID")) {
--      num_nodes++;
--      node_names += element.getAttribute("nodeID") + " "
--    }
--  });
--
--  if (num_nodes == 0) {
-+  var resource_list = get_checked_ids_from_nodelist("resource_list");
-+  if (resource_list.length == 0) {
-     alert("You must select at least one resource to add to a group");
-     return;
-   }
--
--  $("#resources_to_add_to_group").val(node_names);
-+  var not_primitives = resource_list.filter(function(resource_id) {
-+    return !Pcs.resourcesContainer.get_resource_by_id(resource_id).get(
-+      "is_primitive"
-+    );
-+  });
-+  if (not_primitives.length != 0) {
-+    alert("Members of group have to be primitive resources. These resources" +
-+      " are not primitives: " + not_primitives.join(", "));
-+    return;
-+  }
-+  var order_el = $("#new_group_resource_list tbody");
-+  order_el.empty();
-+  order_el.append(resource_list.map(function (item) {
-+    return `<tr value="${item}" class="cursor-move"><td>${item}</td></tr>`;
-+  }));
-+  var order_obj = order_el.sortable();
-+  order_el.disableSelection();
-   $("#add_group").dialog({
-     title: 'Create Group',
-+    width: 'auto',
-     modal: true,
-     resizable: false,
--    buttons: {
--      Cancel: function() {
--        $(this).dialog("close");
-+    buttons: [
-+      {
-+        text: "Cancel",
-+        click: function() {
-+          $(this).dialog("close");
-+        }
-       },
--      "Create Group": function() {
--        var data = $('#add_group > form').serialize();
--        var url = get_cluster_remote_url() + "add_group";
--        ajax_wrapper({
--          type: "POST",
--          url: url,
--          data: data,
--          success: function() {
--            Pcs.update();
--            $("#add_group").dialog("close");
--          },
--          error: function (xhr, status, error) {
--            alert(
--              "Error creating group "
--              + ajax_simple_error(xhr, status, error)
--            );
--            $("#add_group").dialog("close");
--          }
--        });
-+      {
-+        text: "Create Group",
-+        id: "add_group_submit_btn",
-+        click: function() {
-+          var dialog_obj = $(this);
-+          var submit_btn_obj = dialog_obj.parent().find(
-+            "#add_group_submit_btn"
-+          );
-+          submit_btn_obj.button("option", "disabled", true);
-+
-+          ajax_wrapper({
-+            type: "POST",
-+            url: get_cluster_remote_url() + "add_group",
-+            data: {
-+              resource_group: $(
-+                '#add_group:visible input[name=resource_group]'
-+              ).val(),
-+              resources: order_obj.sortable(
-+                "toArray", {attribute: "value"}
-+              ).join(" ")
-+            },
-+            success: function() {
-+              submit_btn_obj.button("option", "disabled", false);
-+              Pcs.update();
-+              dialog_obj.dialog("close");
-+            },
-+            error: function (xhr, status, error) {
-+              alert(
-+                "Error creating group "
-+                + ajax_simple_error(xhr, status, error)
-+              );
-+              submit_btn_obj.button("option", "disabled", false);
-+            }
-+          });
-+        }
-       }
--    }
-+    ]
-   });
- }
- 
-@@ -2257,24 +2284,24 @@ function resource_ungroup(group_id) {
-   });
- }
- 
--function resource_change_group(resource_id, group_id) {
-+function resource_change_group(resource_id, form) {
-   if (resource_id == null) {
-     return;
-   }
-   show_loading_screen();
-   var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id);
-   var data = {
--    resource_id: resource_id,
--    group_id: group_id
-+    resource_id: resource_id
-   };
-+  $.each($(form).serializeArray(), function(_, item) {
-+    data[item.name] = item.value;
-+  });
- 
--  if (resource_obj.get('parent')) {
--    if (resource_obj.get('parent').get('id') == group_id) {
--      return;
--    }
--    if (resource_obj.get('parent').get('class_type') == 'group') {
--      data['old_group_id'] = resource_obj.get('parent').get('id');
--    }
-+  if (
-+    resource_obj.get('parent') &&
-+    resource_obj.get('parent').get('class_type') == 'group'
-+  ) {
-+    data['old_group_id'] = resource_obj.get('parent').get('id');
-   }
- 
-   ajax_wrapper({
-diff --git a/pcsd/remote.rb b/pcsd/remote.rb
-index 05a6d03..4844adf 100644
---- a/pcsd/remote.rb
-+++ b/pcsd/remote.rb
-@@ -1415,21 +1415,23 @@ def update_resource (params, request, auth_user)
- 
-   param_line = getParamList(params)
-   if not params[:resource_id]
--    out, stderr, retval = run_cmd(
--      auth_user,
--      PCS, "resource", "create", params[:name], params[:resource_type],
--      *param_line
--    )
--    if retval != 0
--      return JSON.generate({"error" => "true", "stderr" => stderr, "stdout" => out})
--    end
-+    cmd = [PCS, "resource", "create", params[:name], params[:resource_type]]
-+    cmd += param_line
-     if params[:resource_group] and params[:resource_group] != ""
--      run_cmd(
--        auth_user,
--        PCS, "resource","group", "add", params[:resource_group], params[:name]
-+      cmd += ['--group', params[:resource_group]]
-+      if (
-+        ['before', 'after'].include?(params[:in_group_position]) and
-+        params[:in_group_reference_resource_id]
-       )
-+        cmd << "--#{params[:in_group_position]}"
-+        cmd << params[:in_group_reference_resource_id]
-+      end
-       resource_group = params[:resource_group]
-     end
-+    out, stderr, retval = run_cmd(auth_user, *cmd)
-+    if retval != 0
-+      return JSON.generate({"error" => "true", "stderr" => stderr, "stdout" => out})
-+    end
- 
-     if params[:resource_clone] and params[:resource_clone] != ""
-       name = resource_group ? resource_group : params[:name]
-@@ -1461,10 +1463,18 @@ def update_resource (params, request, auth_user)
-         )
-       end
-     else
--      run_cmd(
--        auth_user, PCS, "resource", "group", "add", params[:resource_group],
-+      cmd = [
-+        PCS, "resource", "group", "add", params[:resource_group],
-         params[:resource_id]
-+      ]
-+      if (
-+        ['before', 'after'].include?(params[:in_group_position]) and
-+        params[:in_group_reference_resource_id]
-       )
-+        cmd << "--#{params[:in_group_position]}"
-+        cmd << params[:in_group_reference_resource_id]
-+      end
-+      run_cmd(auth_user, *cmd)
-     end
-   end
- 
-@@ -2098,10 +2108,17 @@ def resource_change_group(params, request, auth_user)
-     end
-     return 200
-   end
--  _, stderr, retval = run_cmd(
--    auth_user,
-+  cmd = [
-     PCS, 'resource', 'group', 'add', params[:group_id], params[:resource_id]
-+  ]
-+  if (
-+  ['before', 'after'].include?(params[:in_group_position]) and
-+    params[:in_group_reference_resource_id]
-   )
-+    cmd << "--#{params[:in_group_position]}"
-+    cmd << params[:in_group_reference_resource_id]
-+  end
-+  _, stderr, retval = run_cmd(auth_user, *cmd)
-   if retval != 0
-     return [400, "Unable to add resource '#{params[:resource_id]}' to " +
-       "group '#{params[:group_id]}': #{stderr.join('')}"
-diff --git a/pcsd/views/_dialogs.erb b/pcsd/views/_dialogs.erb
-index 46e7fdb..d18ac71 100644
---- a/pcsd/views/_dialogs.erb
-+++ b/pcsd/views/_dialogs.erb
-@@ -215,3 +215,24 @@
-   </table>
-   {{/if}}
- </div>
-+
-+<div id="add_group" style="display: none;">
-+  <form method=POST onkeypress="if (event.keyCode == 13) {$(this).parent().parent().find('.ui-dialog-buttonpane button:eq(1)').trigger('click');return false;} " action="/resource_group_add">
-+    <table>
-+      <tr>
-+        <td>Group Name:</td>
-+        <td>
-+          <input name="resource_group" type="text" />
-+        </td>
-+      </tr>
-+      <tr>
-+        <td style="vertical-align: top;">Change order of resources:</td>
-+        <td>
-+          <table id="new_group_resource_list" class="sortable-table">
-+            <tbody></tbody>
-+          </table>
-+        </td>
-+      </tr>
-+    </table>
-+  </form>
-+</div>
-diff --git a/pcsd/views/_resource.erb b/pcsd/views/_resource.erb
-index a337160..ad2251c 100644
---- a/pcsd/views/_resource.erb
-+++ b/pcsd/views/_resource.erb
-@@ -116,10 +116,4 @@
-           table_id_suffix="_new"
-       }}
-     </div>
--    <div id="add_group" style="display: none;">
--      <form method=POST onkeypress="if (event.keyCode == 13) {$(this).parent().parent().find('.ui-dialog-buttonpane button:eq(1)').trigger('click');return false;} " action="/resource_group_add">
--	<p style="font-size:12px;">Group Name:</p><input name="resource_group" type=text>
--	<input id="resources_to_add_to_group"  type=hidden name="resources" value="">
--      </form>
--    </div>
-     <% end %>
-diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb
-index 52c1900..1b21f92 100644
---- a/pcsd/views/main.erb
-+++ b/pcsd/views/main.erb
-@@ -237,7 +237,7 @@
-             <tr>
-               <td class="bold" nowrap>Group:</td>
-               <td id="cur_res_loc" class="reg">
--                {{{resource.group_selector}}}
-+                {{group-selector resource_id=resource._id}}
-               </td>
-             </tr>
-           {{else}}
-@@ -245,7 +245,7 @@
-             <tr>
-               <td class="bold" nowrap>Group:</td>
-               <td id="cur_res_loc" class="reg">
--                {{{resource.group_selector}}}
-+                {{group-selector resource_id=resource._id}}
-               </td>
-             </tr>
-             {{/if}}
-@@ -909,10 +909,9 @@ Use the 'Add' button to submit the form.">
-                     </div>
-                   </td>
-                   <td>
--                    {{value-selector
--                        prompt="None"
--                        content=groups
--                        name="resource_group"
-+                    {{group-selector
-+                        group_list=Pcs.resourcesContainer.group_list
-+                        group_input_name="resource_group"
-                     }}
-                   </td>
-                 </tr>
-@@ -1095,6 +1094,46 @@ Use the 'Add' button to submit the form.">
-     </td>
-   </script>
- 
-+  <script type="text/x-handlebars" data-template-name="components/group-selector">
-+    {{value-selector
-+        name=group_input_name
-+        content=group_select_content
-+        value=group_select_value
-+        prompt="None"
-+    }}
-+    {{#if Pcs.is_supported_moving_resource_in_group}}
-+    {{#if group_select_value}}
-+    {{#if resource_select_content}}
-+      {{value-selector
-+          name="in_group_position"
-+          content=position_select_content
-+          value=position_select_value
-+          prompt=""
-+      }}
-+      {{value-selector
-+          name="in_group_reference_resource_id"
-+          content=resource_select_content
-+          value=resource_select_value
-+          prompt=""
-+      }}
-+    {{/if}}
-+    {{/if}}
-+    {{/if}}
-+    {{#if resource_id}}
-+      <br/>
-+      <button
-+        onclick="
-+          resource_change_group(curResource(), $(this).parent().find('select'));
-+          return false;
-+        "
-+      >
-+        Update group
-+      </button>
-+      <button {{action refresh}}>Refresh</button>
-+    {{/if}}
-+
-+  </script>
-+
-   <script type="text/x-handlebars">
- <div id="wrapper">
- 
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1281391-02-web-ui-reset-selected-group-when-displaying-new-resource-dialog.patch b/SOURCES/bz1281391-02-web-ui-reset-selected-group-when-displaying-new-resource-dialog.patch
deleted file mode 100644
index df08815..0000000
--- a/SOURCES/bz1281391-02-web-ui-reset-selected-group-when-displaying-new-resource-dialog.patch
+++ /dev/null
@@ -1,48 +0,0 @@
-From d0731ed0ccbcb24e2bc080dba6ba05a8eb0eecc4 Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Tue, 13 Sep 2016 09:02:22 +0200
-Subject: [PATCH] web UI: reset selected group when displaying new resource
- dialog
-
----
- pcsd/public/js/pcsd.js   | 13 +++++++++++++
- pcsd/views/_resource.erb |  2 +-
- 2 files changed, 14 insertions(+), 1 deletion(-)
-
-diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
-index 67a0bdb..371b76b 100644
---- a/pcsd/public/js/pcsd.js
-+++ b/pcsd/public/js/pcsd.js
-@@ -3046,3 +3046,16 @@ function manage_resource(resource_id) {
-     }
-   });
- }
-+
-+function show_add_resource_dialog() {
-+  var new_resource_group_selector_id = $(
-+    "#new_resource_agent .group-selector"
-+  ).attr("id");
-+  Ember.View.views[new_resource_group_selector_id].set(
-+    "group_select_value", null
-+  );
-+  $('#new_resource_agent').dialog({
-+    title: 'Add Resource',
-+    modal:true, width: 'auto'
-+  });
-+}
-diff --git a/pcsd/views/_resource.erb b/pcsd/views/_resource.erb
-index ad2251c..86e5567 100644
---- a/pcsd/views/_resource.erb
-+++ b/pcsd/views/_resource.erb
-@@ -14,7 +14,7 @@
-       Remove</a>&nbsp;&nbsp;&nbsp;&nbsp;</div>
-   <div class="plus sprites"></div><div class="link">&nbsp;
-     <% if @myView == "resource" %>
--      <a href="#" onclick="$('#new_resource_agent').dialog({title: 'Add Resource', modal:true, width: 'auto'});return false;">
-+      <a href="#" onclick="show_add_resource_dialog();return false;">
-     <% else %>
-       <a href="#" onclick="$('#new_stonith_agent').dialog({title: 'Add Fence Device', modal:true, width: 'auto'});return false;">
-     <% end %>
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1284404-01-web-UI-fix-creating-a-new-cluster.patch b/SOURCES/bz1284404-01-web-UI-fix-creating-a-new-cluster.patch
new file mode 100644
index 0000000..f1b95f1
--- /dev/null
+++ b/SOURCES/bz1284404-01-web-UI-fix-creating-a-new-cluster.patch
@@ -0,0 +1,138 @@
+From e0496566d2634ee6e37939a7fd9b2ee25539df46 Mon Sep 17 00:00:00 2001
+From: Ondrej Mular <omular@redhat.com>
+Date: Tue, 30 May 2017 16:46:48 +0200
+Subject: [PATCH] web UI: fix creating a new cluster
+
+---
+ pcs/cli/common/parse_args.py |  2 +-
+ pcs/cluster.py               |  5 ++++-
+ pcs/pcsd.py                  | 12 +++++++++---
+ pcs/utils.py                 |  1 +
+ pcsd/pcs.rb                  |  3 ++-
+ pcsd/remote.rb               |  4 ++--
+ 6 files changed, 19 insertions(+), 8 deletions(-)
+
+diff --git a/pcs/cli/common/parse_args.py b/pcs/cli/common/parse_args.py
+index 465cb96..e2250c7 100644
+--- a/pcs/cli/common/parse_args.py
++++ b/pcs/cli/common/parse_args.py
+@@ -17,7 +17,7 @@ PCS_SHORT_OPTIONS = "hf:p:u:V"
+ PCS_LONG_OPTIONS = [
+     "debug", "version", "help", "fullhelp",
+     "force", "skip-offline", "autocorrect", "interactive", "autodelete",
+-    "all", "full", "groups", "local", "wait", "config",
++    "all", "full", "groups", "local", "wait", "config", "async",
+     "start", "enable", "disabled", "off", "request-timeout=",
+     "pacemaker", "corosync",
+     "no-default-ops", "defaults", "nodesc",
+diff --git a/pcs/cluster.py b/pcs/cluster.py
+index b47db4a..0fc5e2c 100644
+--- a/pcs/cluster.py
++++ b/pcs/cluster.py
+@@ -298,6 +298,7 @@ def cluster_certkey(argv):
+ 
+ 
+ def cluster_setup(argv):
++    modifiers = utils.get_modificators()
+     if len(argv) < 2:
+         usage.cluster(["setup"])
+         sys.exit(1)
+@@ -515,7 +516,9 @@ def cluster_setup(argv):
+ 
+         # sync certificates as the last step because it restarts pcsd
+         print()
+-        pcsd.pcsd_sync_certs([], exit_after_error=False)
++        pcsd.pcsd_sync_certs(
++            [], exit_after_error=False, async_restart=modifiers["async"]
++        )
+         if wait:
+             print()
+             wait_for_nodes_started(primary_addr_list, wait_timeout)
+diff --git a/pcs/pcsd.py b/pcs/pcsd.py
+index 629b4c0..7f7c660 100644
+--- a/pcs/pcsd.py
++++ b/pcs/pcsd.py
+@@ -79,7 +79,7 @@ def pcsd_certkey(argv):
+ 
+     print("Certificate and key updated, you may need to restart pcsd (service pcsd restart) for new settings to take effect")
+ 
+-def pcsd_sync_certs(argv, exit_after_error=True):
++def pcsd_sync_certs(argv, exit_after_error=True, async_restart=False):
+     error = False
+     nodes_sync = argv if argv else utils.getNodesFromCorosyncConf()
+     nodes_restart = []
+@@ -117,7 +117,9 @@ def pcsd_sync_certs(argv, exit_after_error=True):
+         return
+ 
+     print("Restarting pcsd on the nodes in order to reload the certificates...")
+-    pcsd_restart_nodes(nodes_restart, exit_after_error)
++    pcsd_restart_nodes(
++        nodes_restart, exit_after_error, async_restart=async_restart
++    )
+ 
+ def pcsd_clear_auth(argv):
+     output = []
+@@ -148,7 +150,7 @@ def pcsd_clear_auth(argv):
+             print("Error: " + o)
+         sys.exit(1)
+ 
+-def pcsd_restart_nodes(nodes, exit_after_error=True):
++def pcsd_restart_nodes(nodes, exit_after_error=True, async_restart=False):
+     pcsd_data = {
+         "nodes": nodes,
+     }
+@@ -188,6 +190,10 @@ def pcsd_restart_nodes(nodes, exit_after_error=True):
+         utils.err("Unable to restart pcsd", exit_after_error)
+         return
+ 
++    if async_restart:
++        print("Not waiting for restart of pcsd on all nodes.")
++        return
++
+     # check if the restart was performed already
+     error = False
+     for _ in range(5):
+diff --git a/pcs/utils.py b/pcs/utils.py
+index 4753b87..6515e5f 100644
+--- a/pcs/utils.py
++++ b/pcs/utils.py
+@@ -2870,6 +2870,7 @@ def get_modificators():
+     return {
+         "after": pcs_options.get("--after", None),
+         "all": "--all" in pcs_options,
++        "async": "--async" in pcs_options,
+         "autocorrect": "--autocorrect" in pcs_options,
+         "autodelete": "--autodelete" in pcs_options,
+         "before": pcs_options.get("--before", None),
+diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
+index 930b4a0..9764a43 100644
+--- a/pcsd/pcs.rb
++++ b/pcsd/pcs.rb
+@@ -1034,7 +1034,8 @@ def pcsd_restart()
+   # request
+   fork {
+     # let us send the response to the restart request
+-    sleep(3)
++    # we need little bit more time to finish some things when setting up cluster
++    sleep(10)
+     if ISSYSTEMCTL
+       exec("systemctl", "restart", "pcsd")
+     else
+diff --git a/pcsd/remote.rb b/pcsd/remote.rb
+index 005d45e..f353980 100644
+--- a/pcsd/remote.rb
++++ b/pcsd/remote.rb
+@@ -965,8 +965,8 @@ def setup_cluster(params, request, auth_user)
+   nodes_options = nodes + options
+   nodes_options += options_udp if transport_udp
+   stdout, stderr, retval = run_cmd(
+-    auth_user, PCS, "cluster", "setup", "--enable", "--start",
+-    "--name", params[:clustername], *nodes_options
++    auth_user, PCS, "cluster", "setup", "--enable", "--start", "--async",
++    "--name",  params[:clustername], *nodes_options
+   )
+   if retval != 0
+     return [
+-- 
+1.8.3.1
+
diff --git a/SOURCES/bz1284404-02-web-ui-fix-timeout-when-cluster-setup-takes-long.patch b/SOURCES/bz1284404-02-web-ui-fix-timeout-when-cluster-setup-takes-long.patch
new file mode 100644
index 0000000..e6dff97
--- /dev/null
+++ b/SOURCES/bz1284404-02-web-ui-fix-timeout-when-cluster-setup-takes-long.patch
@@ -0,0 +1,44 @@
+From 1f64220204383bea38c52d5e96e0c5ba05e98ccb Mon Sep 17 00:00:00 2001
+From: Ivan Devat <idevat@redhat.com>
+Date: Thu, 15 Jun 2017 11:46:12 +0200
+Subject: [PATCH] web UI: fix timeout when cluster setup takes long
+
+---
+ pcsd/public/js/pcsd.js | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
+index b7ad72f..4754139 100644
+--- a/pcsd/public/js/pcsd.js
++++ b/pcsd/public/js/pcsd.js
+@@ -1080,7 +1080,7 @@ function update_create_cluster_dialog(nodes, version_info) {
+     ajax_wrapper({
+       type: "POST",
+       url: "/manage/newcluster",
+-      timeout: pcs_timeout,
++      timeout: 60*1000,
+       data: $('#create_new_cluster_form').serialize(),
+       success: function(data) {
+         if (data) {
+@@ -1090,7 +1090,17 @@ function update_create_cluster_dialog(nodes, version_info) {
+         Pcs.update();
+       },
+       error: function (xhr, status, error) {
+-        alert(xhr.responseText);
++        var err_msg = "";
++        if ((status == "timeout") || ($.trim(error) == "timeout")) {
++          err_msg = (
++            "Operation takes longer to complete than expected. " +
++            "It may continue running in the background. Later, you can try " +
++            "to add this cluster as existing one."
++          );
++        } else {
++          err_msg = xhr.responseText;
++        }
++        alert(err_msg);
+         $("#create_cluster_submit_btn").button("option", "disabled", false);
+       }
+     });
+-- 
+1.8.3.1
+
diff --git a/SOURCES/bz1289418-01-fixes-for-pcs-cli-running-on-a-remote-node.patch b/SOURCES/bz1289418-01-fixes-for-pcs-cli-running-on-a-remote-node.patch
deleted file mode 100644
index f0b86f4..0000000
--- a/SOURCES/bz1289418-01-fixes-for-pcs-cli-running-on-a-remote-node.patch
+++ /dev/null
@@ -1,554 +0,0 @@
-From 5d8bab038a7aa64c38b79e5de9579af4c73e70a2 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Thu, 14 Jul 2016 17:04:04 +0200
-Subject: [PATCH] fixes for pcs cli running on a remote node
-
----
- pcs/acl.py             |   2 +-
- pcs/cluster.py         |  13 ++++++-
- pcs/config.py          |  37 +++++++++++++-----
- pcs/constraint.py      |   2 +-
- pcs/prop.py            |  16 +-------
- pcs/quorum.py          |   3 +-
- pcs/status.py          |  29 +++++---------
- pcs/stonith.py         |  12 +++++-
- pcs/utils.py           | 104 +++++++++++++++++++++++++++++++------------------
- pcsd/cluster_entity.rb |   4 +-
- pcsd/pcs.rb            |   4 ++
- pcsd/remote.rb         |  14 ++++++-
- 12 files changed, 149 insertions(+), 91 deletions(-)
-
-diff --git a/pcs/acl.py b/pcs/acl.py
-index 118ceed..0378c10 100644
---- a/pcs/acl.py
-+++ b/pcs/acl.py
-@@ -55,7 +55,7 @@ def acl_cmd(argv):
- def acl_show(argv):
-     dom = utils.get_cib_dom()
- 
--    properties = prop.get_set_properties(defaults=prop.get_default_properties())
-+    properties = utils.get_set_properties(defaults=prop.get_default_properties())
-     acl_enabled = properties.get("enable-acl", "").lower()
-     if is_true(acl_enabled):
-         print("ACLs are enabled")
-diff --git a/pcs/cluster.py b/pcs/cluster.py
-index 4155103..13446d4 100644
---- a/pcs/cluster.py
-+++ b/pcs/cluster.py
-@@ -1157,7 +1157,18 @@ def stop_cluster_corosync():
-                 utils.err("unable to stop {0}".format(service))
- 
- def kill_cluster(argv):
--    daemons = ["crmd", "pengine", "attrd", "lrmd", "stonithd", "cib", "pacemakerd", "corosync-qdevice", "corosync"]
-+    daemons = [
-+        "crmd",
-+        "pengine",
-+        "attrd",
-+        "lrmd",
-+        "stonithd",
-+        "cib",
-+        "pacemakerd",
-+        "pacemaker_remoted",
-+        "corosync-qdevice",
-+        "corosync",
-+    ]
-     dummy_output, dummy_retval = utils.run(["killall", "-9"] + daemons)
- #    if dummy_retval != 0:
- #        print "Error: unable to execute killall -9"
-diff --git a/pcs/config.py b/pcs/config.py
-index 3d86b39..9119c3c 100644
---- a/pcs/config.py
-+++ b/pcs/config.py
-@@ -95,14 +95,22 @@ def config_show(argv):
-     print()
-     config_show_cib()
-     if (
--        utils.is_rhel6()
--        or
--        (not utils.usefile and "--corosync_conf" not in utils.pcs_options)
-+        utils.hasCorosyncConf()
-+        and
-+        (
-+            utils.is_rhel6()
-+            or
-+            (not utils.usefile and "--corosync_conf" not in utils.pcs_options)
-+        )
-     ):
-         # with corosync 1 and cman, uid gid is part of cluster.conf file
-         # with corosync 2, uid gid is in a separate directory
-         cluster.cluster_uidgid([], True)
--    if "--corosync_conf" in utils.pcs_options or not utils.is_rhel6():
-+    if (
-+        "--corosync_conf" in utils.pcs_options
-+        or
-+        (not utils.is_rhel6() and utils.hasCorosyncConf())
-+    ):
-         print()
-         print("Quorum:")
-         try:
-@@ -267,7 +275,16 @@ def config_restore_remote(infile_name, infile_obj):
-                 err_msgs.append(output)
-                 continue
-             status = json.loads(output)
--            if status["corosync"] or status["pacemaker"] or status["cman"]:
-+            if (
-+                status["corosync"]
-+                or
-+                status["pacemaker"]
-+                or
-+                status["cman"]
-+                or
-+                # not supported by older pcsd, do not fail if not present
-+                status.get("pacemaker_remote", False)
-+            ):
-                 err_msgs.append(
-                     "Cluster is currently running on node %s. You need to stop "
-                         "the cluster in order to restore the configuration."
-@@ -286,7 +303,7 @@ def config_restore_remote(infile_name, infile_obj):
-     # If node returns HTTP 404 it does not support config syncing at all.
-     for node in node_list:
-         retval, output = utils.pauseConfigSyncing(node, 10 * 60)
--        if not (retval == 0 or output.endswith("(HTTP error: 404)")):
-+        if not (retval == 0 or "(HTTP error: 404)" in output):
-             utils.err(output)
- 
-     if infile_obj:
-@@ -306,11 +323,13 @@ def config_restore_remote(infile_name, infile_obj):
- 
- def config_restore_local(infile_name, infile_obj):
-     if (
--        status.is_cman_running()
-+        status.is_service_running("cman")
-+        or
-+        status.is_service_running("corosync")
-         or
--        status.is_corosyc_running()
-+        status.is_service_running("pacemaker")
-         or
--        status.is_pacemaker_running()
-+        status.is_service_running("pacemaker_remote")
-     ):
-         utils.err(
-             "Cluster is currently running on this node. You need to stop "
-diff --git a/pcs/constraint.py b/pcs/constraint.py
-index 5d9b0df..e32f1a3 100644
---- a/pcs/constraint.py
-+++ b/pcs/constraint.py
-@@ -593,7 +593,7 @@ def location_show(argv):
-             print("  Node: " + node)
- 
-             nodehash_label = (
--                (nodehashon, "    Allowed to run:")
-+                (nodehashon, "    Allowed to run:"),
-                 (nodehashoff, "    Not allowed to run:")
-             )
-             for nodehash, label in nodehash_label:
-diff --git a/pcs/prop.py b/pcs/prop.py
-index 3a65990..36eba60 100644
---- a/pcs/prop.py
-+++ b/pcs/prop.py
-@@ -7,7 +7,6 @@ from __future__ import (
- 
- import sys
- import json
--from xml.dom.minidom import parseString
- 
- from pcs import usage
- from pcs import utils
-@@ -116,7 +115,7 @@ def list_property(argv):
-         properties = {}
- 
-     if "--defaults" not in utils.pcs_options:
--        properties = get_set_properties(
-+        properties = utils.get_set_properties(
-             None if print_all else argv[0],
-             properties
-         )
-@@ -141,16 +140,3 @@ def get_default_properties():
-         parameters[name] = prop["default"]
-     return parameters
- 
--def get_set_properties(prop_name=None, defaults=None):
--    properties = {} if defaults is None else dict(defaults)
--    (output, retVal) = utils.run(["cibadmin","-Q","--scope", "crm_config"])
--    if retVal != 0:
--        utils.err("unable to get crm_config\n"+output)
--    dom = parseString(output)
--    de = dom.documentElement
--    crm_config_properties = de.getElementsByTagName("nvpair")
--    for prop in crm_config_properties:
--        if prop_name is None or (prop_name == prop.getAttribute("name")):
--            properties[prop.getAttribute("name")] = prop.getAttribute("value")
--    return properties
--
-diff --git a/pcs/quorum.py b/pcs/quorum.py
-index a849282..1c2d41d 100644
---- a/pcs/quorum.py
-+++ b/pcs/quorum.py
-@@ -8,7 +8,6 @@ from __future__ import (
- import sys
- 
- from pcs import (
--    prop,
-     stonith,
-     usage,
-     utils,
-@@ -234,7 +233,7 @@ def quorum_unblock_cmd(argv):
-         utils.err("unable to cancel waiting for nodes")
-     print("Quorum unblocked")
- 
--    startup_fencing = prop.get_set_properties().get("startup-fencing", "")
-+    startup_fencing = utils.get_set_properties().get("startup-fencing", "")
-     utils.set_cib_property(
-         "startup-fencing",
-         "false" if startup_fencing.lower() != "false" else "true"
-diff --git a/pcs/status.py b/pcs/status.py
-index bdfcc85..86216ea 100644
---- a/pcs/status.py
-+++ b/pcs/status.py
-@@ -103,7 +103,7 @@ def full_status():
-     print(output)
- 
-     if not utils.usefile:
--        if  "--full" in utils.pcs_options:
-+        if  "--full" in utils.pcs_options and utils.hasCorosyncConf():
-             print_pcsd_daemon_status()
-             print()
-         utils.serviceStatus("  ")
-@@ -121,7 +121,10 @@ def nodes_status(argv):
-         return
- 
-     if len(argv) == 1 and (argv[0] == "config"):
--        corosync_nodes = utils.getNodesFromCorosyncConf()
-+        if utils.hasCorosyncConf():
-+            corosync_nodes = utils.getNodesFromCorosyncConf()
-+        else:
-+            corosync_nodes = []
-         try:
-             pacemaker_nodes = sorted([
-                 node.attrs.name for node
-@@ -244,7 +247,7 @@ def cluster_status(argv):
-         else:
-             print("",line)
- 
--    if not utils.usefile:
-+    if not utils.usefile and utils.hasCorosyncConf():
-         print()
-         print_pcsd_daemon_status()
- 
-@@ -262,25 +265,11 @@ def xml_status():
-         utils.err("running crm_mon, is pacemaker running?")
-     print(output, end="")
- 
--def is_cman_running():
--    if utils.is_systemctl():
--        dummy_output, retval = utils.run(["systemctl", "status", "cman.service"])
--    else:
--        dummy_output, retval = utils.run(["service", "cman", "status"])
--    return retval == 0
--
--def is_corosyc_running():
--    if utils.is_systemctl():
--        dummy_output, retval = utils.run(["systemctl", "status", "corosync.service"])
--    else:
--        dummy_output, retval = utils.run(["service", "corosync", "status"])
--    return retval == 0
--
--def is_pacemaker_running():
-+def is_service_running(service):
-     if utils.is_systemctl():
--        dummy_output, retval = utils.run(["systemctl", "status", "pacemaker.service"])
-+        dummy_output, retval = utils.run(["systemctl", "status", service])
-     else:
--        dummy_output, retval = utils.run(["service", "pacemaker", "status"])
-+        dummy_output, retval = utils.run(["service", service, "status"])
-     return retval == 0
- 
- def print_pcsd_daemon_status():
-diff --git a/pcs/stonith.py b/pcs/stonith.py
-index ab9e926..c02f35a 100644
---- a/pcs/stonith.py
-+++ b/pcs/stonith.py
-@@ -225,7 +225,11 @@ def stonith_level_add(level, node, devices):
-         for dev in devices.split(","):
-             if not utils.is_stonith_resource(dev):
-                 utils.err("%s is not a stonith id (use --force to override)" % dev)
--        if not utils.is_pacemaker_node(node) and not utils.is_corosync_node(node):
-+        corosync_nodes = []
-+        if utils.hasCorosyncConf():
-+            corosync_nodes = utils.getNodesFromCorosyncConf()
-+        pacemaker_nodes = utils.getNodesFromPacemaker()
-+        if node not in corosync_nodes and node not in pacemaker_nodes:
-             utils.err("%s is not currently a node (use --force to override)" % node)
- 
-     ft = dom.getElementsByTagName("fencing-topology")
-@@ -321,6 +325,10 @@ def stonith_level_clear(node = None):
- 
- def stonith_level_verify():
-     dom = utils.get_cib_dom()
-+    corosync_nodes = []
-+    if utils.hasCorosyncConf():
-+        corosync_nodes = utils.getNodesFromCorosyncConf()
-+    pacemaker_nodes = utils.getNodesFromPacemaker()
- 
-     fls = dom.getElementsByTagName("fencing-level")
-     for fl in fls:
-@@ -329,7 +337,7 @@ def stonith_level_verify():
-         for dev in devices.split(","):
-             if not utils.is_stonith_resource(dev):
-                 utils.err("%s is not a stonith id" % dev)
--        if not utils.is_corosync_node(node) and not utils.is_pacemaker_node(node):
-+        if node not in corosync_nodes and node not in pacemaker_nodes:
-             utils.err("%s is not currently a node" % node)
- 
- def stonith_level_show():
-diff --git a/pcs/utils.py b/pcs/utils.py
-index 2cfb693..3970eff 100644
---- a/pcs/utils.py
-+++ b/pcs/utils.py
-@@ -301,6 +301,8 @@ def canAddNodeToCluster(node):
-                 return (False, "unable to authenticate to node")
-             if "node_available" in myout and myout["node_available"] == True:
-                 return (True, "")
-+            elif myout.get("pacemaker_remote", False):
-+                return (False, "node is running pacemaker_remote")
-             else:
-                 return (False, "node is already in a cluster")
-         except ValueError:
-@@ -465,6 +467,14 @@ def getNodesFromPacemaker():
-     except LibraryError as e:
-         process_library_reports(e.args)
- 
-+def hasCorosyncConf(conf=None):
-+    if not conf:
-+        if is_rhel6():
-+            conf = settings.cluster_conf_file
-+        else:
-+            conf = settings.corosync_conf_file
-+    return os.path.isfile(conf)
-+
- def getCorosyncConf(conf=None):
-     if not conf:
-         if is_rhel6():
-@@ -1071,18 +1081,6 @@ def does_exist(xpath_query):
-         return False
-     return True
- 
--def is_pacemaker_node(node):
--    p_nodes = getNodesFromPacemaker()
--    if node in p_nodes:
--        return True
--    return False
--
--def is_corosync_node(node):
--    c_nodes = getNodesFromCorosyncConf()
--    if node in c_nodes:
--        return True
--    return False
--
- def get_group_children(group_id):
-     child_resources = []
-     dom = get_cib_dom()
-@@ -1838,7 +1836,7 @@ def getCorosyncNodesID(allow_failure=False):
-         err_msgs, retval, output, dummy_std_err = call_local_pcsd(
-             ['status', 'nodes', 'corosync-id'], True
-         )
--        if err_msgs:
-+        if err_msgs and not allow_failure:
-             for msg in err_msgs:
-                 err(msg, False)
-             sys.exit(1)
-@@ -1866,6 +1864,7 @@ def getCorosyncNodesID(allow_failure=False):
- 
- # Warning, if a node has never started the hostname may be '(null)'
- #TODO This doesn't work on CMAN clusters at all and should be removed completely
-+# Doesn't work on pacemaker-remote nodes either
- def getPacemakerNodesID(allow_failure=False):
-     if os.getuid() == 0:
-         (output, retval) = run(['crm_node', '-l'])
-@@ -1873,7 +1872,7 @@ def getPacemakerNodesID(allow_failure=False):
-         err_msgs, retval, output, dummy_std_err = call_local_pcsd(
-             ['status', 'nodes', 'pacemaker-id'], True
-         )
--        if err_msgs:
-+        if err_msgs and not allow_failure:
-             for msg in err_msgs:
-                 err(msg, False)
-             sys.exit(1)
-@@ -1893,9 +1892,11 @@ def getPacemakerNodesID(allow_failure=False):
-     return pm_nodes
- 
- def corosyncPacemakerNodeCheck():
--    # does not work on CMAN clusters
--    pm_nodes = getPacemakerNodesID()
--    cs_nodes = getCorosyncNodesID()
-+    # does not work on CMAN clusters and pacemaker-remote nodes
-+    # we do not want a failure to exit pcs as this is only a minor information
-+    # function
-+    pm_nodes = getPacemakerNodesID(allow_failure=True)
-+    cs_nodes = getCorosyncNodesID(allow_failure=True)
- 
-     for node_id in pm_nodes:
-         if pm_nodes[node_id] == "(null)":
-@@ -1920,10 +1921,9 @@ def getClusterName():
-     if is_rhel6():
-         try:
-             dom = parse(settings.cluster_conf_file)
-+            return dom.documentElement.getAttribute("name")
-         except (IOError,xml.parsers.expat.ExpatError):
--            return ""
--
--        return dom.documentElement.getAttribute("name")
-+            pass
-     else:
-         try:
-             f = open(settings.corosync_conf_file,'r')
-@@ -1937,7 +1937,15 @@ def getClusterName():
-             if cluster_name:
-                 return cluster_name
-         except (IOError, corosync_conf_parser.CorosyncConfParserException):
--            return ""
-+            pass
-+
-+    # there is no corosync.conf or cluster.conf on remote nodes, we can try to
-+    # get cluster name from pacemaker
-+    try:
-+        return get_set_properties("cluster-name")["cluster-name"]
-+    except:
-+        # we need to catch SystemExit (from utils.err), parse errors and so on
-+        pass
- 
-     return ""
- 
-@@ -2024,23 +2032,30 @@ def serviceStatus(prefix):
-     if not is_systemctl():
-         return
-     print("Daemon Status:")
--    for service in ["corosync", "pacemaker", "pcsd"]:
--        print('{0}{1}: {2}/{3}'.format(
--            prefix, service,
--            run(["systemctl", 'is-active', service])[0].strip(),
--            run(["systemctl", 'is-enabled', service])[0].strip()
--        ))
--    try:
--        sbd_running = is_service_running(cmd_runner(), "sbd")
--        sbd_enabled = is_service_enabled(cmd_runner(), "sbd")
--        if sbd_enabled or sbd_running:
--            print("{prefix}sbd: {active}/{enabled}".format(
--                prefix=prefix,
--                active=("active" if sbd_running else "inactive"),
--                enabled=("enabled" if sbd_enabled else "disabled")
--            ))
--    except LibraryError:
--        pass
-+    service_def = [
-+        # (
-+        #     service name,
-+        #     display even if not enabled nor running
-+        # )
-+        ("corosync", True),
-+        ("pacemaker", True),
-+        ("pacemaker_remote", False),
-+        ("pcsd", True),
-+        ("sbd", False),
-+    ]
-+    for service, display_always in service_def:
-+        try:
-+            running = is_service_running(cmd_runner(), service)
-+            enabled = is_service_enabled(cmd_runner(), service)
-+            if display_always or enabled or running:
-+                print("{prefix}{service}: {active}/{enabled}".format(
-+                    prefix=prefix,
-+                    service=service,
-+                    active=("active" if running else "inactive"),
-+                    enabled=("enabled" if enabled else "disabled")
-+                ))
-+        except LibraryError:
-+            pass
- 
- def enableServices():
-     # do NOT handle SBD in here, it is started by pacemaker not systemd or init
-@@ -2677,3 +2692,16 @@ def exit_on_cmdline_input_errror(error, main_name, usage_name):
- 
- def get_report_processor():
-     return LibraryReportProcessorToConsole(debug=("--debug" in pcs_options))
-+
-+def get_set_properties(prop_name=None, defaults=None):
-+    properties = {} if defaults is None else dict(defaults)
-+    (output, retVal) = run(["cibadmin","-Q","--scope", "crm_config"])
-+    if retVal != 0:
-+        err("unable to get crm_config\n"+output)
-+    dom = parseString(output)
-+    de = dom.documentElement
-+    crm_config_properties = de.getElementsByTagName("nvpair")
-+    for prop in crm_config_properties:
-+        if prop_name is None or (prop_name == prop.getAttribute("name")):
-+            properties[prop.getAttribute("name")] = prop.getAttribute("value")
-+    return properties
-diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb
-index f54cd30..fa56fe2 100644
---- a/pcsd/cluster_entity.rb
-+++ b/pcsd/cluster_entity.rb
-@@ -1011,7 +1011,9 @@ module ClusterEntity
-       @uptime = 'unknown'
-       @name = nil
-       @services = {}
--      [:pacemaker, :corosync, :pcsd, :cman, :sbd].each do |service|
-+      [
-+        :pacemaker, :pacemaker_remote, :corosync, :pcsd, :cman, :sbd
-+      ].each do |service|
-         @services[service] = {
-           :installed => nil,
-           :running => nil,
-diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
-index 57082be..0956de9 100644
---- a/pcsd/pcs.rb
-+++ b/pcsd/pcs.rb
-@@ -892,6 +892,10 @@ def pacemaker_running?()
-   is_service_running?('pacemaker')
- end
- 
-+def pacemaker_remote_running?()
-+  is_service_running?('pacemaker_remote')
-+end
-+
- def get_pacemaker_version()
-   begin
-     stdout, stderror, retval = run_cmd(
-diff --git a/pcsd/remote.rb b/pcsd/remote.rb
-index 75c9465..6a3a692 100644
---- a/pcsd/remote.rb
-+++ b/pcsd/remote.rb
-@@ -769,9 +769,19 @@ def get_sw_versions(params, request, auth_user)
- end
- 
- def remote_node_available(params, request, auth_user)
--  if (not ISRHEL6 and File.exist?(Cfgsync::CorosyncConf.file_path)) or (ISRHEL6 and File.exist?(Cfgsync::ClusterConf.file_path)) or File.exist?("/var/lib/pacemaker/cib/cib.xml")
-+  if (
-+    (not ISRHEL6 and File.exist?(Cfgsync::CorosyncConf.file_path)) or
-+    (ISRHEL6 and File.exist?(Cfgsync::ClusterConf.file_path)) or
-+    File.exist?("/var/lib/pacemaker/cib/cib.xml")
-+  )
-     return JSON.generate({:node_available => false})
-   end
-+  if pacemaker_remote_running?()
-+    return JSON.generate({
-+      :node_available => false,
-+      :pacemaker_remote => true,
-+    })
-+  end
-   return JSON.generate({:node_available => true})
- end
- 
-@@ -1038,6 +1048,8 @@ def node_status(params, request, auth_user)
-     :cman => node.cman,
-     :corosync_enabled => node.corosync_enabled,
-     :pacemaker_enabled => node.pacemaker_enabled,
-+    :pacemaker_remote => node.services[:pacemaker_remote][:running],
-+    :pacemaker_remote_enabled => node.services[:pacemaker_remote][:enabled],
-     :pcsd_enabled => node.pcsd_enabled,
-     :corosync_online => status[:corosync_online],
-     :corosync_offline => status[:corosync_offline],
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1298585-01-add-possibility-to-hide-inactive-resources-in-pcs-resource-show.patch b/SOURCES/bz1298585-01-add-possibility-to-hide-inactive-resources-in-pcs-resource-show.patch
deleted file mode 100644
index 43c4dd3..0000000
--- a/SOURCES/bz1298585-01-add-possibility-to-hide-inactive-resources-in-pcs-resource-show.patch
+++ /dev/null
@@ -1,221 +0,0 @@
-From 0cfbd1bd87d4484eca054d41aea1d8ac9b55e93c Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Mon, 8 Aug 2016 13:32:07 +0200
-Subject: [PATCH] add possibility to hide inactive resources in "pcs resource
- show"
-
----
- .pylintrc                 |  2 +-
- pcs/pcs.8                 | 12 ++++++------
- pcs/resource.py           | 33 +++++++++++++++++++++++++++------
- pcs/test/test_resource.py | 21 ++++++++++++++-------
- pcs/usage.py              | 16 ++++++++++------
- 5 files changed, 58 insertions(+), 26 deletions(-)
-
-diff --git a/.pylintrc b/.pylintrc
-index e378e6a..1dd6d5d 100644
---- a/.pylintrc
-+++ b/.pylintrc
-@@ -92,7 +92,7 @@ dummy-variables-rgx=_$|dummy
- 
- [FORMAT]
- # Maximum number of lines in a module
--max-module-lines=4577
-+max-module-lines=4584
- # Maximum number of characters on a single line.
- max-line-length=1291
- 
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 52497a0..9064054 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -64,8 +64,8 @@ alert
- Manage pacemaker alerts.
- .SS "resource"
- .TP
--[show [resource id]] [\fB\-\-full\fR] [\fB\-\-groups\fR]
--Show all currently configured resources or if a resource is specified show the options for the configured resource.  If \fB\-\-full\fR is specified all configured resource options will be displayed.  If \fB\-\-groups\fR is specified, only show groups (and their resources).
-+[show [<resource id>] | \fB\-\-full\fR | \fB\-\-groups\fR | \fB\-\-hide\-inactive\fR]
-+Show all currently configured resources or if a resource is specified show the options for the configured resource.  If \fB\-\-full\fR is specified, all configured resource options will be displayed.  If \fB\-\-groups\fR is specified, only show groups (and their resources).  If \fB\-\-hide\-inactive\fR is specified, only show active resources.
- .TP
- list [<standard|provider|type>] [\fB\-\-nodesc\fR]
- Show list of all available resources, optionally filtered by specified type, standard or provider. If \fB\-\-nodesc\fR is used then descriptions of resources are not printed.
-@@ -627,11 +627,11 @@ stop
- Stop booth arbitrator service.
- .SS "status"
- .TP
--[status] [\fB\-\-full\fR | \fB\-\-hide-inactive\fR]
--View all information about the cluster and resources (\fB\-\-full\fR provides more details, \fB\-\-hide-inactive\fR hides inactive resources).
-+[status] [\fB\-\-full\fR | \fB\-\-hide\-inactive\fR]
-+View all information about the cluster and resources (\fB\-\-full\fR provides more details, \fB\-\-hide\-inactive\fR hides inactive resources).
- .TP
--resources
--View current status of cluster resources.
-+resources [<resource id> | \fB\-\-full\fR | \fB\-\-groups\fR | \fB\-\-hide\-inactive\fR]
-+Show all currently configured resources or if a resource is specified show the options for the configured resource.  If \fB\-\-full\fR is specified, all configured resource options will be displayed.  If \fB\-\-groups\fR is specified, only show groups (and their resources).  If \fB\-\-hide\-inactive\fR is specified, only show active resources.
- .TP
- groups
- View currently configured groups and their resources.
-diff --git a/pcs/resource.py b/pcs/resource.py
-index 66c743c..74adac6 100644
---- a/pcs/resource.py
-+++ b/pcs/resource.py
-@@ -1993,6 +1993,17 @@ def resource_group_list(argv):
-         print(" ".join(line_parts))
- 
- def resource_show(argv, stonith=False):
-+    mutually_exclusive_opts = ("--full", "--groups", "--hide-inactive")
-+    modifiers = [
-+        key for key in utils.pcs_options if key in mutually_exclusive_opts
-+    ]
-+    if (len(modifiers) > 1) or (argv and modifiers):
-+        utils.err(
-+            "you can specify only one of resource id, {0}".format(
-+                ", ".join(mutually_exclusive_opts)
-+            )
-+        )
-+
-     if "--groups" in utils.pcs_options:
-         resource_group_list(argv)
-         return
-@@ -2009,15 +2020,28 @@ def resource_show(argv, stonith=False):
-         return
- 
-     if len(argv) == 0:
--        output, retval = utils.run(["crm_mon", "-1", "-r"])
-+        monitor_command = ["crm_mon", "--one-shot"]
-+        if "--hide-inactive" not in utils.pcs_options:
-+            monitor_command.append('--inactive')
-+        output, retval = utils.run(monitor_command)
-         if retval != 0:
-             utils.err("unable to get cluster status from crm_mon\n"+output.rstrip())
-         preg = re.compile(r'.*(stonith:.*)')
-         resources_header = False
-         in_resources = False
-         has_resources = False
-+        no_resources_line = (
-+            "NO stonith devices configured" if stonith
-+            else "NO resources configured"
-+        )
-         for line in output.split('\n'):
--            if line == "Full list of resources:":
-+            if line == "No active resources":
-+                print(line)
-+                return
-+            if line == "No resources":
-+                print(no_resources_line)
-+                return
-+            if line in ("Full list of resources:", "Active resources:"):
-                 resources_header = True
-                 continue
-             if line == "":
-@@ -2026,10 +2050,7 @@ def resource_show(argv, stonith=False):
-                     in_resources = True
-                 elif in_resources:
-                     if not has_resources:
--                        if not stonith:
--                            print("NO resources configured")
--                        else:
--                            print("NO stonith devices configured")
-+                        print(no_resources_line)
-                     return
-                 continue
-             if in_resources:
-diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
-index 2fa5088..614b895 100644
---- a/pcs/test/test_resource.py
-+++ b/pcs/test/test_resource.py
-@@ -213,8 +213,7 @@ the health of a system via IPMI.
-  ClusterIP7\t(ocf::heartbeat:IPaddr2):\tStopped (disabled)
- """)
- 
--        output, returnVal = pcs(temp_cib, "resource show ClusterIP6 --full")
--        assert returnVal == 0
-+        output, returnVal = pcs(temp_cib, "resource show --full")
-         ac(output, """\
-  Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
-   Attributes: ip=192.168.0.99 cidr_netmask=32
-@@ -241,6 +240,7 @@ the health of a system via IPMI.
-   Meta Attrs: target-role=Stopped 
-   Operations: monitor interval=30s (ClusterIP7-monitor-interval-30s)
- """)
-+        self.assertEqual(0, returnVal)
- 
-         output, returnVal = pcs(
-             temp_cib,
-@@ -785,7 +785,7 @@ monitor interval=60s (state-monitor-interval-60s)
-         assert returnVal == 0
-         assert output == ""
- 
--        line = 'resource show ClusterIP --full'
-+        line = 'resource show ClusterIP'
-         output, returnVal = pcs(temp_cib, line)
-         ac(output, """\
-  Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
-@@ -3463,16 +3463,23 @@ Error: Cannot remove more than one resource from cloned group
-         ac(o,"")
-         assert r == 0
- 
--        o,r = pcs(temp_cib, "resource show D1 --full")
--        ac(o," Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n  Meta Attrs: target-role=Stopped \n  Operations: monitor interval=60s (D1-monitor-interval-60s)\n")
-+        o,r = pcs(temp_cib, "resource show D1")
-+        ac(o, """\
-+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
-+  Meta Attrs: target-role=Stopped 
-+  Operations: monitor interval=60s (D1-monitor-interval-60s)
-+""")
-         assert r == 0
- 
-         o,r = pcs(temp_cib, "resource enable D1")
-         ac(o,"")
-         assert r == 0
- 
--        o,r = pcs(temp_cib, "resource show D1 --full")
--        ac(o," Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (D1-monitor-interval-60s)\n")
-+        o,r = pcs(temp_cib, "resource show D1")
-+        ac(o, """\
-+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
-+  Operations: monitor interval=60s (D1-monitor-interval-60s)
-+""")
-         assert r == 0
- 
-         # bad resource name
-diff --git a/pcs/usage.py b/pcs/usage.py
-index baa70d0..b11a5fa 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -189,12 +189,12 @@ Usage: pcs resource [commands]...
- Manage pacemaker resources
- 
- Commands:
--    [show [resource id]] [--full] [--groups]
-+    [show [<resource id>] | --full | --groups | --hide-inactive]
-         Show all currently configured resources or if a resource is specified
--        show the options for the configured resource.  If --full is specified
-+        show the options for the configured resource.  If --full is specified,
-         all configured resource options will be displayed.  If --groups is
--        specified, only show groups (and their resources).
--
-+        specified, only show groups (and their resources).  If --hide-inactive
-+        is specified, only show active resources.
- 
-     list [<standard|provider|type>] [--nodesc]
-         Show list of all available resources, optionally filtered by specified
-@@ -1108,8 +1108,12 @@ Commands:
-         View all information about the cluster and resources (--full provides
-         more details, --hide-inactive hides inactive resources).
- 
--    resources
--        View current status of cluster resources.
-+    resources [<resource id> | --full | --groups | --hide-inactive]
-+        Show all currently configured resources or if a resource is specified
-+        show the options for the configured resource.  If --full is specified,
-+        all configured resource options will be displayed.  If --groups is
-+        specified, only show groups (and their resources).  If --hide-inactive
-+        is specified, only show active resources.
- 
-     groups
-         View currently configured groups and their resources.
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1301993-01-improve-node-properties-commands.patch b/SOURCES/bz1301993-01-improve-node-properties-commands.patch
deleted file mode 100644
index 5f812e7..0000000
--- a/SOURCES/bz1301993-01-improve-node-properties-commands.patch
+++ /dev/null
@@ -1,570 +0,0 @@
-From b221d83628cf1413abfa5d836c103a94184b3c46 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Fri, 22 Jul 2016 12:06:24 +0200
-Subject: [PATCH] improve node properties commands
-
-* added "pcs node attribute" command
-* allow to list value of specified attribute / utilization from all nodes
----
- pcs/node.py           |  64 +++++++++---
- pcs/pcs.8             |   7 +-
- pcs/prop.py           |  13 ++-
- pcs/test/test_node.py | 278 ++++++++++++++++++++++++++++++++++++++++++++++++--
- pcs/usage.py          |  21 ++--
- pcs/utils.py          |  13 ++-
- 6 files changed, 355 insertions(+), 41 deletions(-)
-
-diff --git a/pcs/node.py b/pcs/node.py
-index ac154d4..be2fb13 100644
---- a/pcs/node.py
-+++ b/pcs/node.py
-@@ -12,6 +12,8 @@ from pcs import (
-     usage,
-     utils,
- )
-+from pcs.cli.common.errors import CmdLineInputError
-+from pcs.cli.common.parse_args import prepare_options
- from pcs.lib.errors import LibraryError
- import pcs.lib.pacemaker as lib_pacemaker
- from pcs.lib.pacemaker_values import get_valid_timeout_seconds
-@@ -33,11 +35,26 @@ def node_cmd(argv):
-         node_standby(argv)
-     elif sub_cmd == "unstandby":
-         node_standby(argv, False)
-+    elif sub_cmd == "attribute":
-+        if "--name" in utils.pcs_options and len(argv) > 1:
-+            usage.node("attribute")
-+            sys.exit(1)
-+        filter_attr=utils.pcs_options.get("--name", None)
-+        if len(argv) == 0:
-+            attribute_show_cmd(filter_attr=filter_attr)
-+        elif len(argv) == 1:
-+            attribute_show_cmd(argv.pop(0), filter_attr=filter_attr)
-+        else:
-+            attribute_set_cmd(argv.pop(0), argv)
-     elif sub_cmd == "utilization":
-+        if "--name" in utils.pcs_options and len(argv) > 1:
-+            usage.node("utilization")
-+            sys.exit(1)
-+        filter_name=utils.pcs_options.get("--name", None)
-         if len(argv) == 0:
--            print_nodes_utilization()
-+            print_node_utilization(filter_name=filter_name)
-         elif len(argv) == 1:
--            print_node_utilization(argv.pop(0))
-+            print_node_utilization(argv.pop(0), filter_name=filter_name)
-         else:
-             set_node_utilization(argv.pop(0), argv)
-     # pcs-to-pcsd use only
-@@ -135,23 +152,16 @@ def set_node_utilization(node, argv):
-     )
-     utils.replace_cib_configuration(cib)
- 
--def print_node_utilization(node):
--    cib = utils.get_cib_dom()
--    node_el = utils.dom_get_node(cib, node)
--    if node_el is None:
--        utils.err("Unable to find a node: {0}".format(node))
--    utilization = utils.get_utilization_str(node_el)
--
--    print("Node Utilization:")
--    print(" {0}: {1}".format(node, utilization))
--
--def print_nodes_utilization():
-+def print_node_utilization(filter_node=None, filter_name=None):
-     cib = utils.get_cib_dom()
-     utilization = {}
-     for node_el in cib.getElementsByTagName("node"):
--        u = utils.get_utilization_str(node_el)
-+        node = node_el.getAttribute("uname")
-+        if filter_node is not None and node != filter_node:
-+            continue
-+        u = utils.get_utilization_str(node_el, filter_name)
-         if u:
--            utilization[node_el.getAttribute("uname")] = u
-+            utilization[node] = u
-     print("Node Utilization:")
-     for node in sorted(utilization):
-         print(" {0}: {1}".format(node, utilization[node]))
-@@ -163,3 +173,27 @@ def node_pacemaker_status():
-         ))
-     except LibraryError as e:
-         utils.process_library_reports(e.args)
-+
-+def attribute_show_cmd(filter_node=None, filter_attr=None):
-+    node_attributes = utils.get_node_attributes(
-+        filter_node=filter_node,
-+        filter_attr=filter_attr
-+    )
-+    print("Node Attributes:")
-+    attribute_print(node_attributes)
-+
-+def attribute_set_cmd(node, argv):
-+    try:
-+        attrs = prepare_options(argv)
-+    except CmdLineInputError as e:
-+        utils.exit_on_cmdline_input_errror(e, "node", "attribute")
-+    for name, value in attrs.items():
-+        utils.set_node_attribute(name, value, node)
-+
-+def attribute_print(node_attributes):
-+    for node in sorted(node_attributes.keys()):
-+        line_parts = [" " + node + ":"]
-+        for name, value in sorted(node_attributes[node].items()):
-+            line_parts.append("{0}={1}".format(name, value))
-+        print(" ".join(line_parts))
-+
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 16c9331..f789df7 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -644,6 +644,9 @@ clear-auth [\fB\-\-local\fR] [\fB\-\-remote\fR]
- Removes all system tokens which allow pcs/pcsd on the current system to authenticate with remote pcs/pcsd instances and vice\-versa.  After this command is run this node will need to be re\-authenticated with other nodes (using 'pcs cluster auth').  Using \fB\-\-local\fR only removes tokens used by local pcs (and pcsd if root) to connect to other pcsd instances, using \fB\-\-remote\fR clears authentication tokens used by remote systems to connect to the local pcsd instance.
- .SS "node"
- .TP
-+attribute [[<node>] [\fB\-\-name\fR <attr>] | <node> <name>=<value> ...]
-+Manage node attributes.  If no parameters are specified, show attributes of all nodes.  If one parameter is specified, show attributes of specified node.  If \fB\-\-name\fR is specified, show specified attribute's value from all nodes.  If more parameters are specified, set attributes of specified node.  Attributes can be removed by setting an attribute without a value.
-+.TP
- maintenance [\fB\-\-all\fR] | [<node>]...
- Put specified node(s) into maintenance mode, if no node or options are specified the current node will be put into maintenance mode, if \fB\-\-all\fR is specified all nodes will be put into maintenace mode.
- .TP
-@@ -656,8 +659,8 @@ Put specified node into standby mode (the node specified will no longer be able
- unstandby [\fB\-\-all\fR | <node>] [\fB\-\-wait\fR[=n]]
- Remove node from standby mode (the node specified will now be able to host resources), if no node or options are specified the current node will be removed from standby mode, if \fB\-\-all\fR is specified all nodes will be removed from standby mode.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the node(s) to be removed from standby mode and then return 0 on success or 1 if the operation not succeeded yet.  If 'n' is not specified it defaults to 60 minutes.
- .TP
--utilization [<node> [<name>=<value> ...]]
--Add specified utilization options to specified node. If node is not specified, shows utilization of all nodes. If utilization options are not specified, shows utilization of specified node. Utilization option should be in format name=value, value has to be integer. Options may be removed by setting an option without a value. Example: pcs node utilization node1 cpu=4 ram=
-+utilization [[<node>] [\fB\-\-name\fR <name>] | <node> <name>=<value> ...]
-+Add specified utilization options to specified node.  If node is not specified, shows utilization of all nodes.  If \fB\-\-name\fR is specified, shows specified utilization value from all nodes. If utilization options are not specified, shows utilization of specified node.  Utilization option should be in format name=value, value has to be integer.  Options may be removed by setting an option without a value.  Example: pcs node utilization node1 cpu=4 ram=
- .SS "alert"
- .TP
- [config|show]
-diff --git a/pcs/prop.py b/pcs/prop.py
-index 92a953c..1089865 100644
---- a/pcs/prop.py
-+++ b/pcs/prop.py
-@@ -8,8 +8,11 @@ from __future__ import (
- import sys
- import json
- 
--from pcs import usage
--from pcs import utils
-+from pcs import (
-+    node,
-+    usage,
-+    utils,
-+)
- 
- def property_cmd(argv):
-     if len(argv) == 0:
-@@ -127,11 +130,7 @@ def list_property(argv):
-     )
-     if node_attributes:
-         print("Node Attributes:")
--        for node in sorted(node_attributes.keys()):
--            line_parts = [" " + node + ":"]
--            for name, value in sorted(node_attributes[node].items()):
--                line_parts.append("{0}={1}".format(name, value))
--            print(" ".join(line_parts))
-+        node.attribute_print(node_attributes)
- 
- def get_default_properties():
-     parameters = {}
-diff --git a/pcs/test/test_node.py b/pcs/test/test_node.py
-index 023148c..6f03112 100644
---- a/pcs/test/test_node.py
-+++ b/pcs/test/test_node.py
-@@ -8,11 +8,17 @@ from __future__ import (
- import shutil
- import unittest
- 
-+from pcs.test.tools.assertions import AssertPcsMixin
- from pcs.test.tools.misc import (
-     ac,
-     get_test_resource as rc,
- )
--from pcs.test.tools.pcs_runner import pcs
-+from pcs.test.tools.pcs_runner import (
-+    pcs,
-+    PcsRunner,
-+)
-+
-+from pcs import utils
- 
- empty_cib = rc("cib-empty-withnodes.xml")
- temp_cib = rc("temp-cib.xml")
-@@ -182,7 +188,7 @@ Cluster Properties:
-         output, returnVal = pcs(temp_cib, "node utilization rh7-2")
-         expected_out = """\
- Node Utilization:
-- rh7-2: \n"""
-+"""
-         ac(expected_out, output)
-         self.assertEqual(0, returnVal)
- 
-@@ -229,14 +235,33 @@ Node Utilization:
-         ac(expected_out, output)
-         self.assertEqual(0, returnVal)
- 
--    def test_node_utilization_set_invalid(self):
--        output, returnVal = pcs(temp_cib, "node utilization rh7-0")
-+        output, returnVal = pcs(
-+            temp_cib, "node utilization rh7-2 test1=-20"
-+        )
-+        ac("", output)
-+        self.assertEqual(0, returnVal)
-+
-+        output, returnVal = pcs(temp_cib, "node utilization --name test1")
-         expected_out = """\
--Error: Unable to find a node: rh7-0
-+Node Utilization:
-+ rh7-1: test1=-10
-+ rh7-2: test1=-20
- """
-         ac(expected_out, output)
--        self.assertEqual(1, returnVal)
-+        self.assertEqual(0, returnVal)
- 
-+        output, returnVal = pcs(
-+            temp_cib,
-+            "node utilization --name test1 rh7-2"
-+        )
-+        expected_out = """\
-+Node Utilization:
-+ rh7-2: test1=-20
-+"""
-+        ac(expected_out, output)
-+        self.assertEqual(0, returnVal)
-+
-+    def test_node_utilization_set_invalid(self):
-         output, returnVal = pcs(temp_cib, "node utilization rh7-0 test=10")
-         expected_out = """\
- Error: Unable to find a node: rh7-0
-@@ -252,3 +277,244 @@ Error: Value of utilization attribute must be integer: 'test=int'
- """
-         ac(expected_out, output)
-         self.assertEqual(1, returnVal)
-+
-+
-+class NodeAttributeTest(unittest.TestCase, AssertPcsMixin):
-+    def setUp(self):
-+        shutil.copy(empty_cib, temp_cib)
-+        self.pcs_runner = PcsRunner(temp_cib)
-+
-+    def fixture_attrs(self, nodes, attrs=None):
-+        attrs = dict() if attrs is None else attrs
-+        xml_lines = ['<nodes>']
-+        for node_id, node_name in enumerate(nodes, 1):
-+            xml_lines.extend([
-+                '<node id="{0}" uname="{1}">'.format(node_id, node_name),
-+                '<instance_attributes id="nodes-{0}">'.format(node_id),
-+            ])
-+            nv = '<nvpair id="nodes-{id}-{name}" name="{name}" value="{val}"/>'
-+            for name, value in attrs.get(node_name, dict()).items():
-+                xml_lines.append(nv.format(id=node_id, name=name, val=value))
-+            xml_lines.extend([
-+                '</instance_attributes>',
-+                '</node>'
-+            ])
-+        xml_lines.append('</nodes>')
-+
-+        utils.usefile = True
-+        utils.filename = temp_cib
-+        output, retval = utils.run([
-+            "cibadmin", "--modify", '--xml-text', "\n".join(xml_lines)
-+        ])
-+        assert output == ""
-+        assert retval == 0
-+
-+    def test_show_empty(self):
-+        self.fixture_attrs(["rh7-1", "rh7-2"])
-+        self.assert_pcs_success(
-+            "node attribute",
-+            "Node Attributes:\n"
-+        )
-+
-+    def test_show_nonempty(self):
-+        self.fixture_attrs(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", },
-+                "rh7-2": {"IP": "192.168.1.2", },
-+            }
-+        )
-+        self.assert_pcs_success(
-+            "node attribute",
-+            """\
-+Node Attributes:
-+ rh7-1: IP=192.168.1.1
-+ rh7-2: IP=192.168.1.2
-+"""
-+        )
-+
-+    def test_show_multiple_per_node(self):
-+        self.fixture_attrs(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", "alias": "node1", },
-+                "rh7-2": {"IP": "192.168.1.2", "alias": "node2", },
-+            }
-+        )
-+        self.assert_pcs_success(
-+            "node attribute",
-+            """\
-+Node Attributes:
-+ rh7-1: IP=192.168.1.1 alias=node1
-+ rh7-2: IP=192.168.1.2 alias=node2
-+"""
-+        )
-+
-+    def test_show_one_node(self):
-+        self.fixture_attrs(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", "alias": "node1", },
-+                "rh7-2": {"IP": "192.168.1.2", "alias": "node2", },
-+            }
-+        )
-+        self.assert_pcs_success(
-+            "node attribute rh7-1",
-+            """\
-+Node Attributes:
-+ rh7-1: IP=192.168.1.1 alias=node1
-+"""
-+        )
-+
-+    def test_show_missing_node(self):
-+        self.fixture_attrs(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", "alias": "node1", },
-+                "rh7-2": {"IP": "192.168.1.2", "alias": "node2", },
-+            }
-+        )
-+        self.assert_pcs_success(
-+            "node attribute rh7-3",
-+            """\
-+Node Attributes:
-+"""
-+        )
-+
-+    def test_show_name(self):
-+        self.fixture_attrs(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", "alias": "node1", },
-+                "rh7-2": {"IP": "192.168.1.2", "alias": "node2", },
-+            }
-+        )
-+        self.assert_pcs_success(
-+            "node attribute --name alias",
-+            """\
-+Node Attributes:
-+ rh7-1: alias=node1
-+ rh7-2: alias=node2
-+"""
-+        )
-+
-+    def test_show_missing_name(self):
-+        self.fixture_attrs(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", "alias": "node1", },
-+                "rh7-2": {"IP": "192.168.1.2", "alias": "node2", },
-+            }
-+        )
-+        self.assert_pcs_success(
-+            "node attribute --name missing",
-+            """\
-+Node Attributes:
-+"""
-+        )
-+
-+    def test_show_node_and_name(self):
-+        self.fixture_attrs(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", "alias": "node1", },
-+                "rh7-2": {"IP": "192.168.1.2", "alias": "node2", },
-+            }
-+        )
-+        self.assert_pcs_success(
-+            "node attribute --name alias rh7-1",
-+            """\
-+Node Attributes:
-+ rh7-1: alias=node1
-+"""
-+        )
-+
-+    def test_set_new(self):
-+        self.fixture_attrs(["rh7-1", "rh7-2"])
-+        self.assert_pcs_success(
-+            "node attribute rh7-1 IP=192.168.1.1"
-+        )
-+        self.assert_pcs_success(
-+            "node attribute",
-+            """\
-+Node Attributes:
-+ rh7-1: IP=192.168.1.1
-+"""
-+        )
-+        self.assert_pcs_success(
-+            "node attribute rh7-2 IP=192.168.1.2"
-+        )
-+        self.assert_pcs_success(
-+            "node attribute",
-+            """\
-+Node Attributes:
-+ rh7-1: IP=192.168.1.1
-+ rh7-2: IP=192.168.1.2
-+"""
-+        )
-+
-+    def test_set_existing(self):
-+        self.fixture_attrs(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", },
-+                "rh7-2": {"IP": "192.168.1.2", },
-+            }
-+        )
-+        self.assert_pcs_success(
-+            "node attribute rh7-2 IP=192.168.2.2"
-+        )
-+        self.assert_pcs_success(
-+            "node attribute",
-+            """\
-+Node Attributes:
-+ rh7-1: IP=192.168.1.1
-+ rh7-2: IP=192.168.2.2
-+"""
-+        )
-+
-+    def test_unset(self):
-+        self.fixture_attrs(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", },
-+                "rh7-2": {"IP": "192.168.1.2", },
-+            }
-+        )
-+        self.assert_pcs_success(
-+            "node attribute rh7-2 IP="
-+        )
-+        self.assert_pcs_success(
-+            "node attribute",
-+            """\
-+Node Attributes:
-+ rh7-1: IP=192.168.1.1
-+"""
-+        )
-+
-+    def test_unset_nonexisting(self):
-+        self.fixture_attrs(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", },
-+                "rh7-2": {"IP": "192.168.1.2", },
-+            }
-+        )
-+        self.assert_pcs_result(
-+            "node attribute rh7-1 missing=",
-+            "Error: attribute: 'missing' doesn't exist for node: 'rh7-1'\n",
-+            returncode=2
-+        )
-+
-+    def test_unset_nonexisting_forced(self):
-+        self.fixture_attrs(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", },
-+                "rh7-2": {"IP": "192.168.1.2", },
-+            }
-+        )
-+        self.assert_pcs_success(
-+            "node attribute rh7-1 missing= --force",
-+            ""
-+        )
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 0474324..2f8f855 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -1242,6 +1242,14 @@ Usage: pcs node <command>
- Manage cluster nodes
- 
- Commands:
-+    attribute [[<node>] [--name <name>] | <node> <name>=<value> ...]
-+        Manage node attributes.  If no parameters are specified, show attributes
-+        of all nodes.  If one parameter is specified, show attributes
-+        of specified node.  If --name is specified, show specified attribute's
-+        value from all nodes.  If more parameters are specified, set attributes
-+        of specified node.  Attributes can be removed by setting an attribute
-+        without a value.
-+
-     maintenance [--all] | [<node>]...
-         Put specified node(s) into maintenance mode, if no node or options are
-         specified the current node will be put into maintenance mode, if --all
-@@ -1272,12 +1280,13 @@ Commands:
-         the operation not succeeded yet.  If 'n' is not specified it defaults
-         to 60 minutes.
- 
--    utilization [<node> [<name>=<value> ...]]
--        Add specified utilization options to specified node. If node is not
--        specified, shows utilization of all nodes. If utilization options are
--        not specified, shows utilization of specified node. Utilization option
--        should be in format name=value, value has to be integer. Options may be
--        removed by setting an option without a value.
-+    utilization [[<node>] [--name <name>] | <node> <name>=<value> ...]
-+        Add specified utilization options to specified node.  If node is not
-+        specified, shows utilization of all nodes.  If --name is specified,
-+        shows specified utilization value from all nodes. If utilization options
-+        are not specified, shows utilization of specified node.  Utilization
-+        option should be in format name=value, value has to be integer.  Options
-+        may be removed by setting an option without a value.
-         Example: pcs node utilization node1 cpu=4 ram=
- """
-     if pout:
-diff --git a/pcs/utils.py b/pcs/utils.py
-index c7d1759..079d916 100644
---- a/pcs/utils.py
-+++ b/pcs/utils.py
-@@ -1677,6 +1677,8 @@ def get_node_attributes(filter_node=None, filter_attr=None):
-                 if nodename not in nas:
-                     nas[nodename] = dict()
-                 nas[nodename][attr_name] = nvp.getAttribute("value")
-+            # Use just first element of attributes. We don't support
-+            # attributes with rules just yet.
-             break
-     return nas
- 
-@@ -2447,21 +2449,22 @@ def dom_update_meta_attr(dom_element, attributes):
-             meta_attributes.getAttribute("id") + "-"
-         )
- 
--def get_utilization(element):
-+def get_utilization(element, filter_name=None):
-     utilization = {}
-     for e in element.getElementsByTagName("utilization"):
-         for u in e.getElementsByTagName("nvpair"):
-             name = u.getAttribute("name")
--            value = u.getAttribute("value") if u.hasAttribute("value") else ""
--            utilization[name] = value
-+            if filter_name is not None and name != filter_name:
-+                continue
-+            utilization[name] = u.getAttribute("value")
-         # Use just first element of utilization attributes. We don't support
-         # utilization with rules just yet.
-         break
-     return utilization
- 
--def get_utilization_str(element):
-+def get_utilization_str(element, filter_name=None):
-     output = []
--    for name, value in sorted(get_utilization(element).items()):
-+    for name, value in sorted(get_utilization(element, filter_name).items()):
-         output.append(name + "=" + value)
-     return " ".join(output)
- 
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1302010-01-fix-filter-by-property-name-in-pcs-property-show.patch b/SOURCES/bz1302010-01-fix-filter-by-property-name-in-pcs-property-show.patch
deleted file mode 100644
index c2066bf..0000000
--- a/SOURCES/bz1302010-01-fix-filter-by-property-name-in-pcs-property-show.patch
+++ /dev/null
@@ -1,371 +0,0 @@
-From 5921099626e3afde044027ed493bdee905db4415 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Thu, 21 Jul 2016 13:58:41 +0200
-Subject: [PATCH] fix filter by property name in "pcs property show"
-
----
- pcs/prop.py                 |  14 +--
- pcs/test/test_properties.py | 263 ++++++++++++++++++++++++++++++++++----------
- pcs/utils.py                |  13 ++-
- 3 files changed, 223 insertions(+), 67 deletions(-)
-
-diff --git a/pcs/prop.py b/pcs/prop.py
-index 36eba60..92a953c 100644
---- a/pcs/prop.py
-+++ b/pcs/prop.py
-@@ -100,9 +100,7 @@ def unset_property(argv):
-         utils.replace_cib_configuration(cib_dom)
- 
- def list_property(argv):
--    print_all = False
--    if len(argv) == 0:
--        print_all = True
-+    print_all = len(argv) == 0
- 
-     if "--all" in utils.pcs_options and "--defaults" in utils.pcs_options:
-         utils.err("you cannot specify both --all and --defaults")
-@@ -124,13 +122,15 @@ def list_property(argv):
-     for prop,val in sorted(properties.items()):
-         print(" " + prop + ": " + val)
- 
--    node_attributes = utils.get_node_attributes()
-+    node_attributes = utils.get_node_attributes(
-+        filter_attr=(None if print_all else argv[0])
-+    )
-     if node_attributes:
-         print("Node Attributes:")
--        for node in sorted(node_attributes):
-+        for node in sorted(node_attributes.keys()):
-             line_parts = [" " + node + ":"]
--            for attr in node_attributes[node]:
--                line_parts.append(attr)
-+            for name, value in sorted(node_attributes[node].items()):
-+                line_parts.append("{0}={1}".format(name, value))
-             print(" ".join(line_parts))
- 
- def get_default_properties():
-diff --git a/pcs/test/test_properties.py b/pcs/test/test_properties.py
-index 6cdd2e5..fbaf880 100644
---- a/pcs/test/test_properties.py
-+++ b/pcs/test/test_properties.py
-@@ -8,11 +8,15 @@ from __future__ import (
- import shutil
- import unittest
- 
-+from pcs.test.tools.assertions import AssertPcsMixin
- from pcs.test.tools.misc import (
-     ac,
-     get_test_resource as rc,
- )
--from pcs.test.tools.pcs_runner import pcs
-+from pcs.test.tools.pcs_runner import (
-+    pcs,
-+    PcsRunner,
-+)
- 
- from pcs import utils
- 
-@@ -66,61 +70,6 @@ class PropertyTest(unittest.TestCase):
-         assert "stonith-enabled: false" in output
-         assert output.startswith('Cluster Properties:\n batch-limit')
- 
--    def testNodeProperties(self):
--        utils.usefile = True
--        utils.filename = temp_cib
--        o,r = utils.run(["cibadmin","-M", '--xml-text', '<nodes><node id="1" uname="rh7-1"><instance_attributes id="nodes-1"/></node><node id="2" uname="rh7-2"><instance_attributes id="nodes-2"/></node></nodes>'])
--        ac(o,"")
--        assert r == 0
--
--        o,r = pcs("property set --node=rh7-1 IP=192.168.1.1")
--        ac(o,"")
--        assert r==0
--
--        o,r = pcs("property set --node=rh7-2 IP=192.168.2.2")
--        ac(o,"")
--        assert r==0
--
--        o,r = pcs("property")
--        ac(o,"Cluster Properties:\nNode Attributes:\n rh7-1: IP=192.168.1.1\n rh7-2: IP=192.168.2.2\n")
--        assert r==0
--
--        o,r = pcs("property set --node=rh7-2 IP=")
--        ac(o,"")
--        assert r==0
--
--        o,r = pcs("property")
--        ac(o,"Cluster Properties:\nNode Attributes:\n rh7-1: IP=192.168.1.1\n")
--        assert r==0
--
--        o,r = pcs("property set --node=rh7-1 IP=192.168.1.1")
--        ac(o,"")
--        assert r==0
--
--        o,r = pcs("property set --node=rh7-2 IP=192.168.2.2")
--        ac(o,"")
--        assert r==0
--
--        o,r = pcs("property")
--        ac(o,"Cluster Properties:\nNode Attributes:\n rh7-1: IP=192.168.1.1\n rh7-2: IP=192.168.2.2\n")
--        assert r==0
--
--        o,r = pcs("property unset --node=rh7-1 IP")
--        ac(o,"")
--        assert r==0
--
--        o,r = pcs("property")
--        ac(o,"Cluster Properties:\nNode Attributes:\n rh7-2: IP=192.168.2.2\n")
--        assert r==0
--
--        o,r = pcs("property unset --node=rh7-1 IP")
--        ac(o,"Error: attribute: 'IP' doesn't exist for node: 'rh7-1'\n")
--        assert r==2
--
--        o,r = pcs("property unset --node=rh7-1 IP --force")
--        ac(o,"")
--        assert r==0
--
-     def testBadProperties(self):
-         o,r = pcs(temp_cib, "property set xxxx=zzzz")
-         self.assertEqual(r, 1)
-@@ -329,3 +278,205 @@ class PropertyTest(unittest.TestCase):
-  default-resource-stickiness: 0.1
- """
-         )
-+
-+
-+class NodePropertyTestBase(unittest.TestCase, AssertPcsMixin):
-+    def setUp(self):
-+        shutil.copy(empty_cib, temp_cib)
-+        self.pcs_runner = PcsRunner(temp_cib)
-+
-+    def fixture_nodes(self, nodes, attrs=None):
-+        attrs = dict() if attrs is None else attrs
-+        xml_lines = ['<nodes>']
-+        for node_id, node_name in enumerate(nodes, 1):
-+            xml_lines.extend([
-+                '<node id="{0}" uname="{1}">'.format(node_id, node_name),
-+                '<instance_attributes id="nodes-{0}">'.format(node_id),
-+            ])
-+            nv = '<nvpair id="nodes-{id}-{name}" name="{name}" value="{val}"/>'
-+            for name, value in attrs.get(node_name, dict()).items():
-+                xml_lines.append(nv.format(id=node_id, name=name, val=value))
-+            xml_lines.extend([
-+                '</instance_attributes>',
-+                '</node>'
-+            ])
-+        xml_lines.append('</nodes>')
-+
-+        utils.usefile = True
-+        utils.filename = temp_cib
-+        output, retval = utils.run([
-+            "cibadmin", "--modify", '--xml-text', "\n".join(xml_lines)
-+        ])
-+        assert output == ""
-+        assert retval == 0
-+
-+class NodePropertyShowTest(NodePropertyTestBase):
-+    def test_empty(self):
-+        self.fixture_nodes(["rh7-1", "rh7-2"])
-+        self.assert_pcs_success(
-+            "property",
-+            "Cluster Properties:\n"
-+        )
-+
-+    def test_nonempty(self):
-+        self.fixture_nodes(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", },
-+                "rh7-2": {"IP": "192.168.1.2", },
-+            }
-+        )
-+        self.assert_pcs_success(
-+            "property",
-+            """\
-+Cluster Properties:
-+Node Attributes:
-+ rh7-1: IP=192.168.1.1
-+ rh7-2: IP=192.168.1.2
-+"""
-+        )
-+
-+    def test_multiple_per_node(self):
-+        self.fixture_nodes(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", "alias": "node1", },
-+                "rh7-2": {"IP": "192.168.1.2", "alias": "node2", },
-+            }
-+        )
-+        self.assert_pcs_success(
-+            "property",
-+            """\
-+Cluster Properties:
-+Node Attributes:
-+ rh7-1: IP=192.168.1.1 alias=node1
-+ rh7-2: IP=192.168.1.2 alias=node2
-+"""
-+        )
-+
-+    def test_name_filter_not_exists(self):
-+        self.fixture_nodes(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", },
-+                "rh7-2": {"IP": "192.168.1.2", },
-+            }
-+        )
-+        self.assert_pcs_success(
-+            "property show alias",
-+            """\
-+Cluster Properties:
-+"""
-+        )
-+
-+    def test_name_filter_exists(self):
-+        self.fixture_nodes(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", "alias": "node1", },
-+                "rh7-2": {"IP": "192.168.1.2", },
-+            }
-+        )
-+        self.assert_pcs_success(
-+            "property show alias",
-+            """\
-+Cluster Properties:
-+Node Attributes:
-+ rh7-1: alias=node1
-+"""
-+        )
-+
-+class NodePropertySetTest(NodePropertyTestBase):
-+    def test_set_new(self):
-+        self.fixture_nodes(["rh7-1", "rh7-2"])
-+        self.assert_pcs_success(
-+            "property set --node=rh7-1 IP=192.168.1.1"
-+        )
-+        self.assert_pcs_success(
-+            "property",
-+            """\
-+Cluster Properties:
-+Node Attributes:
-+ rh7-1: IP=192.168.1.1
-+"""
-+        )
-+        self.assert_pcs_success(
-+            "property set --node=rh7-2 IP=192.168.1.2"
-+        )
-+        self.assert_pcs_success(
-+            "property",
-+            """\
-+Cluster Properties:
-+Node Attributes:
-+ rh7-1: IP=192.168.1.1
-+ rh7-2: IP=192.168.1.2
-+"""
-+        )
-+
-+    def test_set_existing(self):
-+        self.fixture_nodes(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", },
-+                "rh7-2": {"IP": "192.168.1.2", },
-+            }
-+        )
-+        self.assert_pcs_success(
-+            "property set --node=rh7-2 IP=192.168.2.2"
-+        )
-+        self.assert_pcs_success(
-+            "property",
-+            """\
-+Cluster Properties:
-+Node Attributes:
-+ rh7-1: IP=192.168.1.1
-+ rh7-2: IP=192.168.2.2
-+"""
-+        )
-+
-+    def test_unset(self):
-+        self.fixture_nodes(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", },
-+                "rh7-2": {"IP": "192.168.1.2", },
-+            }
-+        )
-+        self.assert_pcs_success(
-+            "property set --node=rh7-2 IP="
-+        )
-+        self.assert_pcs_success(
-+            "property",
-+            """\
-+Cluster Properties:
-+Node Attributes:
-+ rh7-1: IP=192.168.1.1
-+"""
-+        )
-+
-+    def test_unset_nonexisting(self):
-+        self.fixture_nodes(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", },
-+                "rh7-2": {"IP": "192.168.1.2", },
-+            }
-+        )
-+        self.assert_pcs_result(
-+            "property unset --node=rh7-1 missing",
-+            "Error: attribute: 'missing' doesn't exist for node: 'rh7-1'\n",
-+            returncode=2
-+        )
-+
-+    def test_unset_nonexisting_forced(self):
-+        self.fixture_nodes(
-+            ["rh7-1", "rh7-2"],
-+            {
-+                "rh7-1": {"IP": "192.168.1.1", },
-+                "rh7-2": {"IP": "192.168.1.2", },
-+            }
-+        )
-+        self.assert_pcs_success(
-+            "property unset --node=rh7-1 missing --force",
-+            ""
-+        )
-diff --git a/pcs/utils.py b/pcs/utils.py
-index 981a186..c7d1759 100644
---- a/pcs/utils.py
-+++ b/pcs/utils.py
-@@ -1659,19 +1659,24 @@ def set_unmanaged(resource):
-             "is-managed", "--meta", "--parameter-value", "false"]
-     return run(args)
- 
--def get_node_attributes():
-+def get_node_attributes(filter_node=None, filter_attr=None):
-     node_config = get_cib_xpath("//nodes")
--    nas = {}
-     if (node_config == ""):
-         err("unable to get crm_config, is pacemaker running?")
-     dom = parseString(node_config).documentElement
-+    nas = dict()
-     for node in dom.getElementsByTagName("node"):
-         nodename = node.getAttribute("uname")
-+        if filter_node is not None and nodename != filter_node:
-+            continue
-         for attributes in node.getElementsByTagName("instance_attributes"):
-             for nvp in attributes.getElementsByTagName("nvpair"):
-+                attr_name = nvp.getAttribute("name")
-+                if filter_attr is not None and attr_name != filter_attr:
-+                    continue
-                 if nodename not in nas:
--                    nas[nodename] = []
--                nas[nodename].append(nvp.getAttribute("name") + "=" + nvp.getAttribute("value"))
-+                    nas[nodename] = dict()
-+                nas[nodename][attr_name] = nvp.getAttribute("value")
-             break
-     return nas
- 
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1303136-01-fix-check-if-id-exists-in-cib.patch b/SOURCES/bz1303136-01-fix-check-if-id-exists-in-cib.patch
deleted file mode 100644
index bc71bfc..0000000
--- a/SOURCES/bz1303136-01-fix-check-if-id-exists-in-cib.patch
+++ /dev/null
@@ -1,116 +0,0 @@
-From cb2347ad79fe30076fad1579d1f5ee27a1835963 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Fri, 22 Jul 2016 16:29:04 +0200
-Subject: [PATCH] fix check if id exists in cib
-
----
- pcs/lib/cib/tools.py           | 11 ++++++++++-
- pcs/test/test_lib_cib_tools.py | 24 ++++++++++++++++++++++++
- pcs/utils.py                   | 30 +++++++++++++++++++++++++-----
- 3 files changed, 59 insertions(+), 6 deletions(-)
-
-diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
-index b59d50d..f86b63b 100644
---- a/pcs/lib/cib/tools.py
-+++ b/pcs/lib/cib/tools.py
-@@ -21,7 +21,16 @@ def does_id_exist(tree, check_id):
-     tree cib etree node
-     check_id id to check
-     """
--    return tree.find('.//*[@id="{0}"]'.format(check_id)) is not None
-+    # ElementTree has getroot, Elemet has getroottree
-+    root = tree.getroot() if hasattr(tree, "getroot") else tree.getroottree()
-+    # do not search in /cib/status, it may contain references to previously
-+    # existing and deleted resources and thus preventing creating them again
-+    existing = root.xpath(
-+        '(/cib/*[name()!="status"]|/*[name()!="cib"])//*[@id="{0}"]'.format(
-+            check_id
-+        )
-+    )
-+    return len(existing) > 0
- 
- def validate_id_does_not_exist(tree, id):
-     """
-diff --git a/pcs/test/test_lib_cib_tools.py b/pcs/test/test_lib_cib_tools.py
-index 1149a3f..e1f2313 100644
---- a/pcs/test/test_lib_cib_tools.py
-+++ b/pcs/test/test_lib_cib_tools.py
-@@ -48,6 +48,30 @@ class DoesIdExistTest(CibToolsTest):
-         self.assertFalse(lib.does_id_exist(self.cib.tree, "myId "))
-         self.assertFalse(lib.does_id_exist(self.cib.tree, "my Id"))
- 
-+    def test_ignore_status_section(self):
-+        self.cib.append_to_first_tag_name(
-+            "status",
-+            """\
-+<elem1 id="status-1">
-+    <elem1a id="status-1a">
-+        <elem1aa id="status-1aa"/>
-+        <elem1ab id="status-1ab"/>
-+    </elem1a>
-+    <elem1b id="status-1b">
-+        <elem1ba id="status-1ba"/>
-+        <elem1bb id="status-1bb"/>
-+    </elem1b>
-+</elem1>
-+"""
-+        )
-+        self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1"))
-+        self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1a"))
-+        self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1aa"))
-+        self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1ab"))
-+        self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1b"))
-+        self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1ba"))
-+        self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1bb"))
-+
- class FindUniqueIdTest(CibToolsTest):
-     def test_already_unique(self):
-         self.fixture_add_primitive_with_id("myId")
-diff --git a/pcs/utils.py b/pcs/utils.py
-index 079d916..a7ed975 100644
---- a/pcs/utils.py
-+++ b/pcs/utils.py
-@@ -1589,15 +1589,35 @@ def is_valid_cib_scope(scope):
- # Checks to see if id exists in the xml dom passed
- # DEPRECATED use lxml version available in pcs.lib.cib.tools
- def does_id_exist(dom, check_id):
-+    # do not search in /cib/status, it may contain references to previously
-+    # existing and deleted resources and thus preventing creating them again
-     if is_etree(dom):
--        for elem in dom.findall(str(".//*")):
-+        for elem in dom.findall(str(
-+            '(/cib/*[name()!="status"]|/*[name()!="cib"])/*'
-+        )):
-             if elem.get("id") == check_id:
-                 return True
-     else:
--        all_elem = dom.getElementsByTagName("*")
--        for elem in all_elem:
--            if elem.getAttribute("id") == check_id:
--                return True
-+        document = (
-+            dom
-+            if isinstance(dom, xml.dom.minidom.Document)
-+            else dom.ownerDocument
-+        )
-+        cib_found = False
-+        for cib in dom_get_children_by_tag_name(document, "cib"):
-+            cib_found = True
-+            for section in cib.childNodes:
-+                if section.nodeType != xml.dom.minidom.Node.ELEMENT_NODE:
-+                    continue
-+                if section.tagName == "status":
-+                    continue
-+                for elem in section.getElementsByTagName("*"):
-+                    if elem.getAttribute("id") == check_id:
-+                        return True
-+        if not cib_found:
-+            for elem in document.getElementsByTagName("*"):
-+                if elem.getAttribute("id") == check_id:
-+                    return True
-     return False
- 
- # Returns check_id if it doesn't exist in the dom, otherwise it adds an integer
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1305049-01-pcs-does-not-support-ticket-constraints.patch b/SOURCES/bz1305049-01-pcs-does-not-support-ticket-constraints.patch
deleted file mode 100644
index 38e1b01..0000000
--- a/SOURCES/bz1305049-01-pcs-does-not-support-ticket-constraints.patch
+++ /dev/null
@@ -1,351 +0,0 @@
-From be8876832da345e7e16827bd6c50e262380d6979 Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Wed, 14 Sep 2016 09:04:57 +0200
-Subject: [PATCH] squash bz1305049 pcs does not support "ticket" con
-
-d147ba4a51d0 do not use suffix "no-role" in ticket constraints
-
-066cf217ec45 add constraint ticket remove command
----
- pcs/cli/common/lib_wrapper.py                  |  1 +
- pcs/cli/constraint_ticket/command.py           |  6 ++
- pcs/cli/constraint_ticket/test/test_command.py | 22 +++++++
- pcs/constraint.py                              |  1 +
- pcs/lib/cib/constraint/ticket.py               | 29 ++++++++-
- pcs/lib/cib/test/test_constraint_ticket.py     | 89 +++++++++++++++++++++++++-
- pcs/lib/commands/constraint/ticket.py          | 12 ++++
- pcs/pcs.8                                      |  3 +
- pcs/test/test_constraints.py                   | 36 +++++++++++
- pcs/usage.py                                   |  3 +
- 10 files changed, 199 insertions(+), 3 deletions(-)
-
-diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
-index 94a1311..99bfe35 100644
---- a/pcs/cli/common/lib_wrapper.py
-+++ b/pcs/cli/common/lib_wrapper.py
-@@ -132,6 +132,7 @@ def load_module(env, middleware_factory, name):
-                 'set': constraint_ticket.create_with_set,
-                 'show': constraint_ticket.show,
-                 'add': constraint_ticket.create,
-+                'remove': constraint_ticket.remove,
-             }
-         )
- 
-diff --git a/pcs/cli/constraint_ticket/command.py b/pcs/cli/constraint_ticket/command.py
-index ab70434..0ed4fdd 100644
---- a/pcs/cli/constraint_ticket/command.py
-+++ b/pcs/cli/constraint_ticket/command.py
-@@ -52,6 +52,12 @@ def add(lib, argv, modificators):
-         duplication_alowed=modificators["force"],
-     )
- 
-+def remove(lib, argv, modificators):
-+    if len(argv) != 2:
-+        raise CmdLineInputError()
-+    ticket, resource_id = argv
-+    lib.constraint_ticket.remove(ticket, resource_id)
-+
- def show(lib, argv, modificators):
-     """
-     show all ticket constraints
-diff --git a/pcs/cli/constraint_ticket/test/test_command.py b/pcs/cli/constraint_ticket/test/test_command.py
-index d40d421..9ca7817 100644
---- a/pcs/cli/constraint_ticket/test/test_command.py
-+++ b/pcs/cli/constraint_ticket/test/test_command.py
-@@ -65,3 +65,25 @@ class AddTest(TestCase):
-             resource_in_clone_alowed=True,
-             duplication_alowed=True,
-         )
-+
-+class RemoveTest(TestCase):
-+    def test_refuse_args_count(self):
-+        self.assertRaises(CmdLineInputError, lambda: command.remove(
-+            mock.MagicMock(),
-+            ["TICKET"],
-+            {},
-+        ))
-+        self.assertRaises(CmdLineInputError, lambda: command.remove(
-+            mock.MagicMock(),
-+            ["TICKET", "RESOURCE", "SOMETHING_ELSE"],
-+            {},
-+        ))
-+
-+    def test_call_library_remove_with_correct_attrs(self):
-+        lib = mock.MagicMock(
-+            constraint_ticket=mock.MagicMock(remove=mock.Mock())
-+        )
-+        command.remove(lib, ["TICKET", "RESOURCE"], {})
-+        lib.constraint_ticket.remove.assert_called_once_with(
-+            "TICKET", "RESOURCE",
-+        )
-diff --git a/pcs/constraint.py b/pcs/constraint.py
-index e32f1a3..d8415b6 100644
---- a/pcs/constraint.py
-+++ b/pcs/constraint.py
-@@ -90,6 +90,7 @@ def constraint_cmd(argv):
-             command_map = {
-                 "set": ticket_command.create_with_set,
-                 "add": ticket_command.add,
-+                "remove": ticket_command.remove,
-                 "show": ticket_command.show,
-             }
-             sub_command = argv[0] if argv else "show"
-diff --git a/pcs/lib/cib/constraint/ticket.py b/pcs/lib/cib/constraint/ticket.py
-index 4154aac..c708794 100644
---- a/pcs/lib/cib/constraint/ticket.py
-+++ b/pcs/lib/cib/constraint/ticket.py
-@@ -39,7 +39,8 @@ def _validate_options_common(options):
- def _create_id(cib, ticket, resource_id, resource_role):
-     return tools.find_unique_id(
-         cib,
--        "-".join(('ticket', ticket, resource_id, resource_role))
-+        "-".join(('ticket', ticket, resource_id))
-+        +("-{0}".format(resource_role) if resource_role else "")
-     )
- 
- def prepare_options_with_set(cib, options, resource_set_list):
-@@ -93,7 +94,7 @@ def prepare_options_plain(cib, options, ticket, resource_id):
-             cib,
-             options["ticket"],
-             resource_id,
--            options["rsc-role"] if "rsc-role" in options else "no-role"
-+            options.get("rsc-role", "")
-         ),
-         partial(tools.check_new_id_applicable, cib, DESCRIPTION)
-     )
-@@ -103,6 +104,30 @@ def create_plain(constraint_section, options):
-     element.attrib.update(options)
-     return element
- 
-+def remove_plain(constraint_section, ticket_key, resource_id):
-+    ticket_element_list = constraint_section.xpath(
-+        './/rsc_ticket[@ticket="{0}" and @rsc="{1}"]'
-+        .format(ticket_key, resource_id)
-+    )
-+
-+    for ticket_element in ticket_element_list:
-+        ticket_element.getparent().remove(ticket_element)
-+
-+def remove_with_resource_set(constraint_section, ticket_key, resource_id):
-+    ref_element_list = constraint_section.xpath(
-+        './/rsc_ticket[@ticket="{0}"]/resource_set/resource_ref[@id="{1}"]'
-+        .format(ticket_key, resource_id)
-+    )
-+
-+    for ref_element in ref_element_list:
-+        set_element = ref_element.getparent()
-+        set_element.remove(ref_element)
-+        if not len(set_element):
-+            ticket_element = set_element.getparent()
-+            ticket_element.remove(set_element)
-+            if not len(ticket_element):
-+                ticket_element.getparent().remove(ticket_element)
-+
- def are_duplicate_plain(element, other_element):
-     return all(
-         element.attrib.get(name, "") == other_element.attrib.get(name, "")
-diff --git a/pcs/lib/cib/test/test_constraint_ticket.py b/pcs/lib/cib/test/test_constraint_ticket.py
-index ede748e..d3da004 100644
---- a/pcs/lib/cib/test/test_constraint_ticket.py
-+++ b/pcs/lib/cib/test/test_constraint_ticket.py
-@@ -8,10 +8,15 @@ from __future__ import (
- from functools import partial
- from pcs.test.tools.pcs_unittest import TestCase
- 
-+from lxml import etree
-+
- from pcs.common import report_codes
- from pcs.lib.cib.constraint import ticket
- from pcs.lib.errors import ReportItemSeverity as severities
--from pcs.test.tools.assertions import assert_raise_library_error
-+from pcs.test.tools.assertions import (
-+    assert_raise_library_error,
-+    assert_xml_equal,
-+)
- from pcs.test.tools.pcs_unittest import mock
- 
- 
-@@ -306,3 +311,85 @@ class AreDuplicateWithResourceSet(TestCase):
-             Element({"ticket": "ticket_key"}),
-             Element({"ticket": "X"}),
-         ))
-+
-+class RemovePlainTest(TestCase):
-+    def test_remove_tickets_constraints_for_resource(self):
-+        constraint_section = etree.fromstring("""
-+            <constraints>
-+                <rsc_ticket id="t1" ticket="tA" rsc="rA"/>
-+                <rsc_ticket id="t2" ticket="tA" rsc="rB"/>
-+                <rsc_ticket id="t3" ticket="tA" rsc="rA"/>
-+                <rsc_ticket id="t4" ticket="tB" rsc="rA"/>
-+                <rsc_ticket id="t5" ticket="tB" rsc="rB"/>
-+            </constraints>
-+        """)
-+
-+        ticket.remove_plain(
-+            constraint_section,
-+            ticket_key="tA",
-+            resource_id="rA",
-+        )
-+
-+        assert_xml_equal(etree.tostring(constraint_section).decode(), """
-+            <constraints>
-+                <rsc_ticket id="t2" ticket="tA" rsc="rB"/>
-+                <rsc_ticket id="t4" ticket="tB" rsc="rA"/>
-+                <rsc_ticket id="t5" ticket="tB" rsc="rB"/>
-+            </constraints>
-+        """)
-+
-+class RemoveWithSetTest(TestCase):
-+    def test_remove_resource_references_and_empty_remaining_parents(self):
-+        constraint_section = etree.fromstring("""
-+            <constraints>
-+                <rsc_ticket id="t1" ticket="tA">
-+                    <resource_set id="rs1">
-+                        <resource_ref id="rA"/>
-+                    </resource_set>
-+                    <resource_set id="rs2">
-+                        <resource_ref id="rA"/>
-+                    </resource_set>
-+                </rsc_ticket>
-+
-+                <rsc_ticket id="t2" ticket="tA">
-+                    <resource_set id="rs3">
-+                        <resource_ref id="rA"/>
-+                        <resource_ref id="rB"/>
-+                    </resource_set>
-+                    <resource_set id="rs4">
-+                        <resource_ref id="rA"/>
-+                    </resource_set>
-+                </rsc_ticket>
-+
-+                <rsc_ticket id="t3" ticket="tB">
-+                    <resource_set id="rs5">
-+                        <resource_ref id="rA"/>
-+                    </resource_set>
-+                </rsc_ticket>
-+            </constraints>
-+        """)
-+
-+        ticket.remove_with_resource_set(
-+            constraint_section,
-+            ticket_key="tA",
-+            resource_id="rA"
-+        )
-+
-+        assert_xml_equal(
-+            """
-+                <constraints>
-+                    <rsc_ticket id="t2" ticket="tA">
-+                        <resource_set id="rs3">
-+                            <resource_ref id="rB"/>
-+                        </resource_set>
-+                    </rsc_ticket>
-+
-+                    <rsc_ticket id="t3" ticket="tB">
-+                        <resource_set id="rs5">
-+                            <resource_ref id="rA"/>
-+                        </resource_set>
-+                    </rsc_ticket>
-+                </constraints>
-+            """,
-+            etree.tostring(constraint_section).decode()
-+        )
-diff --git a/pcs/lib/commands/constraint/ticket.py b/pcs/lib/commands/constraint/ticket.py
-index e6960d5..2ea7afc 100644
---- a/pcs/lib/commands/constraint/ticket.py
-+++ b/pcs/lib/commands/constraint/ticket.py
-@@ -68,3 +68,15 @@ def create(
-     )
- 
-     env.push_cib(cib)
-+
-+def remove(env, ticket_key, resource_id):
-+    """
-+    remove all ticket constraint from resource
-+    If resource is in resource set with another resources then only resource ref
-+    is removed. If resource is alone in resource set whole constraint is removed.
-+    """
-+    cib = env.get_cib()
-+    constraint_section = get_constraints(cib)
-+    ticket.remove_plain(constraint_section, ticket_key, resource_id)
-+    ticket.remove_with_resource_set(constraint_section, ticket_key, resource_id)
-+    env.push_cib(cib)
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 61abe67..40b146f 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -490,6 +490,9 @@ Create a ticket constraint for <resource id>. Available option is loss-policy=fe
- ticket set <resource1> [resourceN]... [options] [set <resourceX> ... [options]] [setoptions [constraint_options]]
- Create a ticket constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Required constraint option is ticket=<ticket>. Optional constraint options are id=<constraint-id> and loss-policy=fence/stop/freeze/demote.
- .TP
-+ticket remove <ticket> <resource id>
-+Remove all ticket constraints with <ticket> from <resource id>.
-+.TP
- remove [constraint id]...
- Remove constraint(s) or constraint rules with the specified id(s).
- .TP
-diff --git a/pcs/test/test_constraints.py b/pcs/test/test_constraints.py
-index 7c76e09..4007e90 100644
---- a/pcs/test/test_constraints.py
-+++ b/pcs/test/test_constraints.py
-@@ -2686,6 +2686,42 @@ class TicketAdd(ConstraintBaseTest):
-             "  Master A loss-policy=fence ticket=T",
-         ])
- 
-+class TicketRemoveTest(ConstraintBaseTest):
-+    def test_remove_multiple_tickets(self):
-+        #fixture
-+        self.assert_pcs_success('constraint ticket add T A')
-+        self.assert_pcs_success(
-+            'constraint ticket add T A --force',
-+            stdout_full=[
-+                "Warning: duplicate constraint already exists",
-+                "  A ticket=T (id:ticket-T-A)"
-+            ]
-+        )
-+        self.assert_pcs_success(
-+            'constraint ticket set A B setoptions ticket=T'
-+        )
-+        self.assert_pcs_success(
-+            'constraint ticket set A setoptions ticket=T'
-+        )
-+        self.assert_pcs_success("constraint ticket show", stdout_full=[
-+            "Ticket Constraints:",
-+            "  A ticket=T",
-+            "  A ticket=T",
-+            "  Resource Sets:",
-+            "    set A B setoptions ticket=T",
-+            "    set A setoptions ticket=T",
-+        ])
-+
-+        #test
-+        self.assert_pcs_success("constraint ticket remove T A")
-+
-+        self.assert_pcs_success("constraint ticket show", stdout_full=[
-+            "Ticket Constraints:",
-+            "  Resource Sets:",
-+            "    set B setoptions ticket=T",
-+        ])
-+
-+
- class TicketShow(ConstraintBaseTest):
-     def test_show_set(self):
-         self.assert_pcs_success('constraint ticket set A B setoptions ticket=T')
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 9d4617f..764e3fc 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -1011,6 +1011,9 @@ Commands:
-         Required constraint option is ticket=<ticket>. Optional constraint
-         options are id=<constraint-id> and loss-policy=fence/stop/freeze/demote.
- 
-+    ticket remove <ticket> <resource id>
-+        Remove all ticket constraints with <ticket> from <resource id>.
-+
-     remove [constraint id]...
-         Remove constraint(s) or constraint rules with the specified id(s).
- 
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1305049-02-pcs-does-not-support-ticket-constraints.patch b/SOURCES/bz1305049-02-pcs-does-not-support-ticket-constraints.patch
deleted file mode 100644
index 6655172..0000000
--- a/SOURCES/bz1305049-02-pcs-does-not-support-ticket-constraints.patch
+++ /dev/null
@@ -1,287 +0,0 @@
-From deac91b1fc74065d01342420accfd1af88237237 Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Tue, 20 Sep 2016 08:20:29 +0200
-Subject: [PATCH] squash bz1305049 pcs does not support "ticket" con
-
-07ae6704fff5 fail when no matching ticket constraint for remove
-
-d25ed3d9bc65 fix help for constraint ticket commands
-
-66b91ba0da7e fix manpage for booth ticket add command
-
-2710bc2e15c2 fix manpage for constraint ticket set
----
- pcs/cli/constraint_ticket/command.py       |  4 ++-
- pcs/lib/cib/constraint/ticket.py           |  4 +++
- pcs/lib/cib/test/test_constraint_ticket.py | 53 +++++++++++++++++++++++++++---
- pcs/lib/commands/constraint/ticket.py      | 15 +++++++--
- pcs/lib/commands/test/test_ticket.py       | 20 +++++++++++
- pcs/pcs.8                                  |  6 ++--
- pcs/test/test_constraints.py               |  8 +++++
- pcs/usage.py                               | 10 +++---
- 8 files changed, 105 insertions(+), 15 deletions(-)
-
-diff --git a/pcs/cli/constraint_ticket/command.py b/pcs/cli/constraint_ticket/command.py
-index 0ed4fdd..583ba9e 100644
---- a/pcs/cli/constraint_ticket/command.py
-+++ b/pcs/cli/constraint_ticket/command.py
-@@ -8,6 +8,7 @@ from __future__ import (
- from pcs.cli.common.errors import CmdLineInputError
- from pcs.cli.constraint import command
- from pcs.cli.constraint_ticket import parse_args, console_report
-+from pcs.cli.common.console_report import error
- 
- def create_with_set(lib, argv, modificators):
-     """
-@@ -56,7 +57,8 @@ def remove(lib, argv, modificators):
-     if len(argv) != 2:
-         raise CmdLineInputError()
-     ticket, resource_id = argv
--    lib.constraint_ticket.remove(ticket, resource_id)
-+    if not lib.constraint_ticket.remove(ticket, resource_id):
-+        raise error("no matching ticket constraint found")
- 
- def show(lib, argv, modificators):
-     """
-diff --git a/pcs/lib/cib/constraint/ticket.py b/pcs/lib/cib/constraint/ticket.py
-index c708794..85d045c 100644
---- a/pcs/lib/cib/constraint/ticket.py
-+++ b/pcs/lib/cib/constraint/ticket.py
-@@ -113,6 +113,8 @@ def remove_plain(constraint_section, ticket_key, resource_id):
-     for ticket_element in ticket_element_list:
-         ticket_element.getparent().remove(ticket_element)
- 
-+    return len(ticket_element_list) > 0
-+
- def remove_with_resource_set(constraint_section, ticket_key, resource_id):
-     ref_element_list = constraint_section.xpath(
-         './/rsc_ticket[@ticket="{0}"]/resource_set/resource_ref[@id="{1}"]'
-@@ -128,6 +130,8 @@ def remove_with_resource_set(constraint_section, ticket_key, resource_id):
-             if not len(ticket_element):
-                 ticket_element.getparent().remove(ticket_element)
- 
-+    return len(ref_element_list) > 0
-+
- def are_duplicate_plain(element, other_element):
-     return all(
-         element.attrib.get(name, "") == other_element.attrib.get(name, "")
-diff --git a/pcs/lib/cib/test/test_constraint_ticket.py b/pcs/lib/cib/test/test_constraint_ticket.py
-index d3da004..b720b55 100644
---- a/pcs/lib/cib/test/test_constraint_ticket.py
-+++ b/pcs/lib/cib/test/test_constraint_ticket.py
-@@ -324,11 +324,34 @@ class RemovePlainTest(TestCase):
-             </constraints>
-         """)
- 
--        ticket.remove_plain(
-+        self.assertTrue(ticket.remove_plain(
-             constraint_section,
-             ticket_key="tA",
-             resource_id="rA",
--        )
-+        ))
-+
-+        assert_xml_equal(etree.tostring(constraint_section).decode(), """
-+            <constraints>
-+                <rsc_ticket id="t2" ticket="tA" rsc="rB"/>
-+                <rsc_ticket id="t4" ticket="tB" rsc="rA"/>
-+                <rsc_ticket id="t5" ticket="tB" rsc="rB"/>
-+            </constraints>
-+        """)
-+
-+    def test_remove_nothing_when_no_matching_found(self):
-+        constraint_section = etree.fromstring("""
-+            <constraints>
-+                <rsc_ticket id="t2" ticket="tA" rsc="rB"/>
-+                <rsc_ticket id="t4" ticket="tB" rsc="rA"/>
-+                <rsc_ticket id="t5" ticket="tB" rsc="rB"/>
-+            </constraints>
-+        """)
-+
-+        self.assertFalse(ticket.remove_plain(
-+            constraint_section,
-+            ticket_key="tA",
-+            resource_id="rA",
-+        ))
- 
-         assert_xml_equal(etree.tostring(constraint_section).decode(), """
-             <constraints>
-@@ -369,11 +392,11 @@ class RemoveWithSetTest(TestCase):
-             </constraints>
-         """)
- 
--        ticket.remove_with_resource_set(
-+        self.assertTrue(ticket.remove_with_resource_set(
-             constraint_section,
-             ticket_key="tA",
-             resource_id="rA"
--        )
-+        ))
- 
-         assert_xml_equal(
-             """
-@@ -393,3 +416,25 @@ class RemoveWithSetTest(TestCase):
-             """,
-             etree.tostring(constraint_section).decode()
-         )
-+
-+    def test_remove_nothing_when_no_matching_found(self):
-+        constraint_section = etree.fromstring("""
-+                <constraints>
-+                    <rsc_ticket id="t2" ticket="tA">
-+                        <resource_set id="rs3">
-+                            <resource_ref id="rB"/>
-+                        </resource_set>
-+                    </rsc_ticket>
-+
-+                    <rsc_ticket id="t3" ticket="tB">
-+                        <resource_set id="rs5">
-+                            <resource_ref id="rA"/>
-+                        </resource_set>
-+                    </rsc_ticket>
-+                </constraints>
-+        """)
-+        self.assertFalse(ticket.remove_with_resource_set(
-+            constraint_section,
-+            ticket_key="tA",
-+            resource_id="rA"
-+        ))
-diff --git a/pcs/lib/commands/constraint/ticket.py b/pcs/lib/commands/constraint/ticket.py
-index 2ea7afc..a14c5ad 100644
---- a/pcs/lib/commands/constraint/ticket.py
-+++ b/pcs/lib/commands/constraint/ticket.py
-@@ -77,6 +77,17 @@ def remove(env, ticket_key, resource_id):
-     """
-     cib = env.get_cib()
-     constraint_section = get_constraints(cib)
--    ticket.remove_plain(constraint_section, ticket_key, resource_id)
--    ticket.remove_with_resource_set(constraint_section, ticket_key, resource_id)
-+    any_plain_removed = ticket.remove_plain(
-+        constraint_section,
-+        ticket_key,
-+        resource_id
-+    )
-+    any_with_resource_set_removed = ticket.remove_with_resource_set(
-+        constraint_section,
-+        ticket_key,
-+        resource_id
-+    )
-+
-     env.push_cib(cib)
-+
-+    return any_plain_removed or any_with_resource_set_removed
-diff --git a/pcs/lib/commands/test/test_ticket.py b/pcs/lib/commands/test/test_ticket.py
-index 586ca4b..edf592a 100644
---- a/pcs/lib/commands/test/test_ticket.py
-+++ b/pcs/lib/commands/test/test_ticket.py
-@@ -6,6 +6,8 @@ from __future__ import (
- )
- 
- from pcs.test.tools.pcs_unittest import TestCase
-+from pcs.test.tools.pcs_unittest import mock
-+from pcs.test.tools.misc import create_patcher
- 
- from pcs.common import report_codes
- from pcs.lib.commands.constraint import ticket as ticket_command
-@@ -18,6 +20,7 @@ from pcs.test.tools.assertions import (
- from pcs.test.tools.misc import get_test_resource as rc
- from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
- 
-+patch_commands = create_patcher("pcs.lib.commands.constraint.ticket")
- 
- class CreateTest(TestCase):
-     def setUp(self):
-@@ -65,3 +68,20 @@ class CreateTest(TestCase):
-                 {"resource_id": "resourceA"},
-             ),
-         )
-+
-+@patch_commands("get_constraints", mock.Mock)
-+class RemoveTest(TestCase):
-+    @patch_commands("ticket.remove_plain", mock.Mock(return_value=1))
-+    @patch_commands("ticket.remove_with_resource_set",mock.Mock(return_value=0))
-+    def test_successfully_remove_plain(self):
-+        self.assertTrue(ticket_command.remove(mock.MagicMock(), "T", "R"))
-+
-+    @patch_commands("ticket.remove_plain", mock.Mock(return_value=0))
-+    @patch_commands("ticket.remove_with_resource_set",mock.Mock(return_value=1))
-+    def test_successfully_remove_with_resource_set(self):
-+        self.assertTrue(ticket_command.remove(mock.MagicMock(), "T", "R"))
-+
-+    @patch_commands("ticket.remove_plain", mock.Mock(return_value=0))
-+    @patch_commands("ticket.remove_with_resource_set",mock.Mock(return_value=0))
-+    def test_raises_library_error_when_no_matching_constraint_found(self):
-+        self.assertFalse(ticket_command.remove(mock.MagicMock(), "T", "R"))
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 40b146f..1efe8f4 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -484,10 +484,10 @@ Remove colocation constraints with specified resources.
- ticket [show] [\fB\-\-full\fR]
- List all current ticket constraints (if \fB\-\-full\fR is specified show the internal constraint id's as well).
- .TP
--ticket add <ticket> [<role>] <resource id> [options] [id=constraint\-id]
-+ticket add <ticket> [<role>] <resource id> [<options>] [id=<constraint\-id>]
- Create a ticket constraint for <resource id>. Available option is loss-policy=fence/stop/freeze/demote. A role can be master, slave, started or stopped.
- .TP
--ticket set <resource1> [resourceN]... [options] [set <resourceX> ... [options]] [setoptions [constraint_options]]
-+ticket set <resource1> [<resourceN>]... [<options>] [set <resourceX> ... [<options>]] setoptions <constraint_options>
- Create a ticket constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Required constraint option is ticket=<ticket>. Optional constraint options are id=<constraint-id> and loss-policy=fence/stop/freeze/demote.
- .TP
- ticket remove <ticket> <resource id>
-@@ -587,7 +587,7 @@ Write new booth configuration with specified sites and arbitrators.  Total numbe
- destroy
- Remove booth configuration files.
- .TP
--ticket add <ticket>
-+ticket add <ticket> [<name>=<value> ...]
- Add new ticket to the current configuration. Ticket options are specified in booth manpage.
- 
- .TP
-diff --git a/pcs/test/test_constraints.py b/pcs/test/test_constraints.py
-index 4007e90..fee7093 100644
---- a/pcs/test/test_constraints.py
-+++ b/pcs/test/test_constraints.py
-@@ -2721,6 +2721,14 @@ class TicketRemoveTest(ConstraintBaseTest):
-             "    set B setoptions ticket=T",
-         ])
- 
-+    def test_fail_when_no_matching_ticket_constraint_here(self):
-+        self.assert_pcs_success("constraint ticket show", stdout_full=[
-+            "Ticket Constraints:",
-+        ])
-+        self.assert_pcs_fail("constraint ticket remove T A", [
-+            "Error: no matching ticket constraint found"
-+        ])
-+
- 
- class TicketShow(ConstraintBaseTest):
-     def test_show_set(self):
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 764e3fc..ea407c3 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -996,15 +996,15 @@ Commands:
-         List all current ticket constraints (if --full is specified show
-         the internal constraint id's as well).
- 
--    ticket add <ticket> [<role>] <resource id> [options]
--               [id=constraint-id]
-+    ticket add <ticket> [<role>] <resource id> [<options>]
-+               [id=<constraint-id>]
-         Create a ticket constraint for <resource id>.
-         Available option is loss-policy=fence/stop/freeze/demote.
-         A role can be master, slave, started or stopped.
- 
--    ticket set <resource1> [resourceN]... [options]
--               [set <resourceX> ... [options]]
--               [setoptions [constraint_options]]
-+    ticket set <resource1> [<resourceN>]... [<options>]
-+               [set <resourceX> ... [<options>]]
-+               setoptions <constraint_options>
-         Create a ticket constraint with a resource set.
-         Available options are sequential=true/false, require-all=true/false,
-         action=start/promote/demote/stop and role=Stopped/Started/Master/Slave.
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1308514-01-add-booth-support.patch b/SOURCES/bz1308514-01-add-booth-support.patch
deleted file mode 100644
index c096bbe..0000000
--- a/SOURCES/bz1308514-01-add-booth-support.patch
+++ /dev/null
@@ -1,8684 +0,0 @@
-From 2c8d74653e3217ba1458d65854e3a448fcedfc5d Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Tue, 28 Jun 2016 15:36:30 +0200
-Subject: [PATCH] add booth support
-
----
- pcs/alert.py                                |   22 +-
- pcs/app.py                                  |    7 +
- pcs/booth.py                                |   76 ++
- pcs/cli/booth/__init__.py                   |    0
- pcs/cli/booth/command.py                    |  177 ++++
- pcs/cli/booth/env.py                        |  121 +++
- pcs/cli/booth/test/__init__.py              |    0
- pcs/cli/booth/test/test_command.py          |   44 +
- pcs/cli/booth/test/test_env.py              |  118 +++
- pcs/cli/common/console_report.py            |   13 +-
- pcs/cli/common/env.py                       |    2 +
- pcs/cli/common/lib_wrapper.py               |   78 +-
- pcs/cli/common/middleware.py                |    9 +-
- pcs/cli/common/parse_args.py                |   27 +
- pcs/cli/common/test/test_lib_wrapper.py     |   28 +-
- pcs/cli/common/test/test_middleware.py      |    6 +-
- pcs/cli/common/test/test_parse_args.py      |   84 +-
- pcs/cluster.py                              |   10 +
- pcs/common/env_file_role_codes.py           |    9 +
- pcs/common/report_codes.py                  |   40 +
- pcs/common/test/__init__.py                 |    0
- pcs/common/tools.py                         |    5 +
- pcs/lib/booth/__init__.py                   |    0
- pcs/lib/booth/config_exchange.py            |   43 +
- pcs/lib/booth/config_files.py               |   97 +++
- pcs/lib/booth/config_parser.py              |   90 ++
- pcs/lib/booth/config_structure.py           |  111 +++
- pcs/lib/booth/env.py                        |  149 ++++
- pcs/lib/booth/reports.py                    |  409 +++++++++
- pcs/lib/booth/resource.py                   |  116 +++
- pcs/lib/booth/status.py                     |   41 +
- pcs/lib/booth/sync.py                       |  208 +++++
- pcs/lib/booth/test/__init__.py              |    0
- pcs/lib/booth/test/test_config_exchange.py  |   70 ++
- pcs/lib/booth/test/test_config_files.py     |  272 ++++++
- pcs/lib/booth/test/test_config_parser.py    |  169 ++++
- pcs/lib/booth/test/test_config_structure.py |  224 +++++
- pcs/lib/booth/test/test_env.py              |  228 +++++
- pcs/lib/booth/test/test_resource.py         |  203 +++++
- pcs/lib/booth/test/test_status.py           |  137 +++
- pcs/lib/booth/test/test_sync.py             | 1215 +++++++++++++++++++++++++++
- pcs/lib/cib/tools.py                        |    7 +
- pcs/lib/commands/booth.py                   |  349 ++++++++
- pcs/lib/commands/test/test_booth.py         |  614 ++++++++++++++
- pcs/lib/commands/test/test_ticket.py        |   15 +-
- pcs/lib/corosync/live.py                    |    3 +
- pcs/lib/env.py                              |   44 +-
- pcs/lib/env_file.py                         |  122 +++
- pcs/lib/errors.py                           |   14 +
- pcs/lib/external.py                         |   66 +-
- pcs/lib/reports.py                          |  215 ++++-
- pcs/lib/test/misc.py                        |   20 +
- pcs/lib/test/test_env_file.py               |  187 +++++
- pcs/lib/test/test_errors.py                 |   20 +
- pcs/pcs.8                                   |   52 ++
- pcs/resource.py                             |   19 +-
- pcs/settings_default.py                     |    2 +
- pcs/stonith.py                              |    3 +-
- pcs/test/resources/.gitignore               |    1 +
- pcs/test/resources/tmp_keyfile              |    1 +
- pcs/test/suite.py                           |   16 +-
- pcs/test/test_alert.py                      |    8 +-
- pcs/test/test_booth.py                      |  342 ++++++++
- pcs/test/test_lib_cib_tools.py              |   21 +
- pcs/test/test_lib_external.py               |   86 ++
- pcs/test/tools/color_text_runner.py         |    9 +-
- pcs/test/tools/pcs_unittest.py              |    7 +
- pcs/usage.py                                |   72 ++
- pcs/utils.py                                |   68 +-
- pcsd/pcs.rb                                 |   76 +-
- pcsd/remote.rb                              |  144 ++++
- pcsd/settings.rb                            |    1 +
- 72 files changed, 7093 insertions(+), 169 deletions(-)
- create mode 100644 pcs/booth.py
- create mode 100644 pcs/cli/booth/__init__.py
- create mode 100644 pcs/cli/booth/command.py
- create mode 100644 pcs/cli/booth/env.py
- create mode 100644 pcs/cli/booth/test/__init__.py
- create mode 100644 pcs/cli/booth/test/test_command.py
- create mode 100644 pcs/cli/booth/test/test_env.py
- create mode 100644 pcs/common/env_file_role_codes.py
- create mode 100644 pcs/common/test/__init__.py
- create mode 100644 pcs/lib/booth/__init__.py
- create mode 100644 pcs/lib/booth/config_exchange.py
- create mode 100644 pcs/lib/booth/config_files.py
- create mode 100644 pcs/lib/booth/config_parser.py
- create mode 100644 pcs/lib/booth/config_structure.py
- create mode 100644 pcs/lib/booth/env.py
- create mode 100644 pcs/lib/booth/reports.py
- create mode 100644 pcs/lib/booth/resource.py
- create mode 100644 pcs/lib/booth/status.py
- create mode 100644 pcs/lib/booth/sync.py
- create mode 100644 pcs/lib/booth/test/__init__.py
- create mode 100644 pcs/lib/booth/test/test_config_exchange.py
- create mode 100644 pcs/lib/booth/test/test_config_files.py
- create mode 100644 pcs/lib/booth/test/test_config_parser.py
- create mode 100644 pcs/lib/booth/test/test_config_structure.py
- create mode 100644 pcs/lib/booth/test/test_env.py
- create mode 100644 pcs/lib/booth/test/test_resource.py
- create mode 100644 pcs/lib/booth/test/test_status.py
- create mode 100644 pcs/lib/booth/test/test_sync.py
- create mode 100644 pcs/lib/commands/booth.py
- create mode 100644 pcs/lib/commands/test/test_booth.py
- create mode 100644 pcs/lib/env_file.py
- create mode 100644 pcs/lib/test/misc.py
- create mode 100644 pcs/lib/test/test_env_file.py
- create mode 100644 pcs/lib/test/test_errors.py
- create mode 100644 pcs/test/resources/tmp_keyfile
- create mode 100644 pcs/test/test_booth.py
- create mode 100644 pcs/test/tools/pcs_unittest.py
-
-diff --git a/pcs/alert.py b/pcs/alert.py
-index 4786f57..693bb8d 100644
---- a/pcs/alert.py
-+++ b/pcs/alert.py
-@@ -6,16 +6,18 @@ from __future__ import (
- )
- 
- import sys
-+from functools import partial
- 
- from pcs import (
-     usage,
-     utils,
- )
- from pcs.cli.common.errors import CmdLineInputError
--from pcs.cli.common.parse_args import prepare_options
-+from pcs.cli.common.parse_args import prepare_options, group_by_keywords
- from pcs.cli.common.console_report import indent
- from pcs.lib.errors import LibraryError
- 
-+parse_cmd_sections = partial(group_by_keywords, implicit_first_keyword="main")
- 
- def alert_cmd(*args):
-     argv = args[1]
-@@ -67,16 +69,6 @@ def recipient_cmd(*args):
-         )
- 
- 
--def parse_cmd_sections(arg_list, section_list):
--    output = dict([(section, []) for section in section_list + ["main"]])
--    cur_section = "main"
--    for arg in arg_list:
--        if arg in section_list:
--            cur_section = arg
--            continue
--        output[cur_section].append(arg)
--
--    return output
- 
- 
- def ensure_only_allowed_options(parameter_dict, allowed_list):
-@@ -91,7 +83,7 @@ def alert_add(lib, argv, modifiers):
-     if not argv:
-         raise CmdLineInputError()
- 
--    sections = parse_cmd_sections(argv, ["options", "meta"])
-+    sections = parse_cmd_sections(argv, set(["options", "meta"]))
-     main_args = prepare_options(sections["main"])
-     ensure_only_allowed_options(main_args, ["id", "description", "path"])
- 
-@@ -110,7 +102,7 @@ def alert_update(lib, argv, modifiers):
- 
-     alert_id = argv[0]
- 
--    sections = parse_cmd_sections(argv[1:], ["options", "meta"])
-+    sections = parse_cmd_sections(argv[1:], set(["options", "meta"]))
-     main_args = prepare_options(sections["main"])
-     ensure_only_allowed_options(main_args, ["description", "path"])
- 
-@@ -137,7 +129,7 @@ def recipient_add(lib, argv, modifiers):
-     alert_id = argv[0]
-     recipient_value = argv[1]
- 
--    sections = parse_cmd_sections(argv[2:], ["options", "meta"])
-+    sections = parse_cmd_sections(argv[2:], set(["options", "meta"]))
-     main_args = prepare_options(sections["main"])
-     ensure_only_allowed_options(main_args, ["description", "id"])
- 
-@@ -158,7 +150,7 @@ def recipient_update(lib, argv, modifiers):
- 
-     recipient_id = argv[0]
- 
--    sections = parse_cmd_sections(argv[1:], ["options", "meta"])
-+    sections = parse_cmd_sections(argv[1:], set(["options", "meta"]))
-     main_args = prepare_options(sections["main"])
-     ensure_only_allowed_options(main_args, ["description", "value"])
- 
-diff --git a/pcs/app.py b/pcs/app.py
-index 3758ee4..ab9e970 100644
---- a/pcs/app.py
-+++ b/pcs/app.py
-@@ -13,6 +13,7 @@ logging.basicConfig()
- 
- from pcs import (
-     acl,
-+    booth,
-     cluster,
-     config,
-     constraint,
-@@ -97,6 +98,7 @@ def main(argv=None):
-             "token=", "token_coefficient=", "consensus=", "join=",
-             "miss_count_const=", "fail_recv_const=",
-             "corosync_conf=", "cluster_conf=",
-+            "booth-conf=", "booth-key=",
-             "remote", "watchdog=",
-             #in pcs status - do not display resorce status on inactive node
-             "hide-inactive",
-@@ -199,6 +201,11 @@ def main(argv=None):
-             args,
-             utils.get_modificators()
-         ),
-+        "booth": lambda argv: booth.booth_cmd(
-+            utils.get_library_wrapper(),
-+            argv,
-+            utils.get_modificators()
-+        ),
-     }
-     if command not in cmd_map:
-         usage.main()
-diff --git a/pcs/booth.py b/pcs/booth.py
-new file mode 100644
-index 0000000..764dcd8
---- /dev/null
-+++ b/pcs/booth.py
-@@ -0,0 +1,76 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+import sys
-+
-+from pcs import usage
-+from pcs import utils
-+from pcs.cli.booth import command
-+from pcs.cli.common.errors import CmdLineInputError
-+from pcs.lib.errors import LibraryError
-+from pcs.resource import resource_create, resource_remove
-+
-+
-+def booth_cmd(lib, argv, modifiers):
-+    """
-+    routes booth command
-+    """
-+    if len(argv) < 1:
-+        usage.booth()
-+        sys.exit(1)
-+
-+    sub_cmd, argv_next = argv[0], argv[1:]
-+    try:
-+        if sub_cmd == "help":
-+            usage.booth(argv)
-+        elif sub_cmd == "config":
-+            command.config_show(lib, argv_next, modifiers)
-+        elif sub_cmd == "setup":
-+            command.config_setup(lib, argv_next, modifiers)
-+        elif sub_cmd == "destroy":
-+            command.config_destroy(lib, argv_next, modifiers)
-+        elif sub_cmd == "ticket":
-+            if len(argv_next) < 1:
-+                raise CmdLineInputError()
-+            if argv_next[0] == "add":
-+                command.config_ticket_add(lib, argv_next[1:], modifiers)
-+            elif argv_next[0] == "remove":
-+                command.config_ticket_remove(lib, argv_next[1:], modifiers)
-+            elif argv_next[0] == "grant":
-+                command.ticket_grant(lib, argv_next[1:], modifiers)
-+            elif argv_next[0] == "revoke":
-+                command.ticket_revoke(lib, argv_next[1:], modifiers)
-+            else:
-+                raise CmdLineInputError()
-+        elif sub_cmd == "create":
-+            command.get_create_in_cluster(resource_create)(
-+                lib, argv_next, modifiers
-+            )
-+        elif sub_cmd == "remove":
-+            command.get_remove_from_cluster(resource_remove)(
-+                lib, argv_next, modifiers
-+            )
-+        elif sub_cmd == "sync":
-+            command.sync(lib, argv_next, modifiers)
-+        elif sub_cmd == "pull":
-+            command.pull(lib, argv_next, modifiers)
-+        elif sub_cmd == "enable":
-+            command.enable(lib, argv_next, modifiers)
-+        elif sub_cmd == "disable":
-+            command.disable(lib, argv_next, modifiers)
-+        elif sub_cmd == "start":
-+            command.start(lib, argv_next, modifiers)
-+        elif sub_cmd == "stop":
-+            command.stop(lib, argv_next, modifiers)
-+        elif sub_cmd == "status":
-+            command.status(lib, argv_next, modifiers)
-+        else:
-+            raise CmdLineInputError()
-+    except LibraryError as e:
-+        utils.process_library_reports(e.args)
-+    except CmdLineInputError as e:
-+        utils.exit_on_cmdline_input_errror(e, "booth", sub_cmd)
-diff --git a/pcs/cli/booth/__init__.py b/pcs/cli/booth/__init__.py
-new file mode 100644
-index 0000000..e69de29
-diff --git a/pcs/cli/booth/command.py b/pcs/cli/booth/command.py
-new file mode 100644
-index 0000000..bea6582
---- /dev/null
-+++ b/pcs/cli/booth/command.py
-@@ -0,0 +1,177 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from pcs.cli.common.errors import CmdLineInputError
-+from pcs.cli.common.parse_args import group_by_keywords
-+
-+
-+DEFAULT_BOOTH_NAME = "booth"
-+
-+def __get_name(modifiers):
-+    return  modifiers["name"] if modifiers["name"] else DEFAULT_BOOTH_NAME
-+
-+def config_setup(lib, arg_list, modifiers):
-+    """
-+    create booth config
-+    """
-+    booth_configuration = group_by_keywords(
-+        arg_list,
-+        set(["sites", "arbitrators"]),
-+        keyword_repeat_allowed=False
-+    )
-+    if "sites" not in booth_configuration or not booth_configuration["sites"]:
-+        raise CmdLineInputError()
-+
-+    lib.booth.config_setup(booth_configuration, modifiers["force"])
-+
-+def config_destroy(lib, arg_list, modifiers):
-+    """
-+    destroy booth config
-+    """
-+    if arg_list:
-+        raise CmdLineInputError()
-+    lib.booth.config_destroy(ignore_config_load_problems=modifiers["force"])
-+
-+
-+def config_show(lib, arg_list, modifiers):
-+    """
-+    print booth config
-+    """
-+    booth_configuration = lib.booth.config_show()
-+    authfile_lines = []
-+    if booth_configuration["authfile"]:
-+        authfile_lines.append(
-+            "authfile = {0}".format(booth_configuration["authfile"])
-+        )
-+
-+    line_list = (
-+        ["site = {0}".format(site) for site in booth_configuration["sites"]]
-+        +
-+        [
-+            "arbitrator = {0}".format(arbitrator)
-+            for arbitrator in booth_configuration["arbitrators"]
-+        ]
-+        + authfile_lines +
-+        [
-+            'ticket = "{0}"'.format(ticket)
-+            for ticket in booth_configuration["tickets"]
-+        ]
-+    )
-+    for line in line_list:
-+        print(line)
-+
-+def config_ticket_add(lib, arg_list, modifiers):
-+    """
-+    add ticket to current configuration
-+    """
-+    if len(arg_list) != 1:
-+        raise CmdLineInputError
-+    lib.booth.config_ticket_add(arg_list[0])
-+
-+def config_ticket_remove(lib, arg_list, modifiers):
-+    """
-+    add ticket to current configuration
-+    """
-+    if len(arg_list) != 1:
-+        raise CmdLineInputError
-+    lib.booth.config_ticket_remove(arg_list[0])
-+
-+def ticket_operation(lib_call, arg_list, modifiers):
-+    site_ip = None
-+    if len(arg_list) == 2:
-+        site_ip = arg_list[1]
-+    elif len(arg_list) != 1:
-+        raise CmdLineInputError()
-+
-+    ticket = arg_list[0]
-+    lib_call(__get_name(modifiers), ticket, site_ip)
-+
-+def ticket_revoke(lib, arg_list, modifiers):
-+    ticket_operation(lib.booth.ticket_revoke, arg_list, modifiers)
-+
-+def ticket_grant(lib, arg_list, modifiers):
-+    ticket_operation(lib.booth.ticket_grant, arg_list, modifiers)
-+
-+def get_create_in_cluster(resource_create):
-+    #TODO resource_remove is provisional hack until resources are not moved to
-+    #lib
-+    def create_in_cluster(lib, arg_list, modifiers):
-+        if len(arg_list) != 2 or arg_list[0] != "ip":
-+            raise CmdLineInputError()
-+        ip = arg_list[1]
-+
-+        lib.booth.create_in_cluster(
-+            __get_name(modifiers),
-+            ip,
-+            resource_create,
-+        )
-+    return create_in_cluster
-+
-+def get_remove_from_cluster(resource_remove):
-+    #TODO resource_remove is provisional hack until resources are not moved to
-+    #lib
-+    def remove_from_cluster(lib, arg_list, modifiers):
-+        if arg_list:
-+            raise CmdLineInputError()
-+
-+        lib.booth.remove_from_cluster(__get_name(modifiers), resource_remove)
-+
-+    return remove_from_cluster
-+
-+
-+def sync(lib, arg_list, modifiers):
-+    if arg_list:
-+        raise CmdLineInputError()
-+    lib.booth.config_sync(
-+        DEFAULT_BOOTH_NAME,
-+        skip_offline_nodes=modifiers["skip_offline_nodes"]
-+    )
-+
-+
-+def enable(lib, arg_list, modifiers):
-+    if arg_list:
-+        raise CmdLineInputError()
-+    lib.booth.enable(DEFAULT_BOOTH_NAME)
-+
-+
-+def disable(lib, arg_list, modifiers):
-+    if arg_list:
-+        raise CmdLineInputError()
-+    lib.booth.disable(DEFAULT_BOOTH_NAME)
-+
-+
-+def start(lib, arg_list, modifiers):
-+    if arg_list:
-+        raise CmdLineInputError()
-+    lib.booth.start(DEFAULT_BOOTH_NAME)
-+
-+
-+def stop(lib, arg_list, modifiers):
-+    if arg_list:
-+        raise CmdLineInputError()
-+    lib.booth.stop(DEFAULT_BOOTH_NAME)
-+
-+
-+def pull(lib, arg_list, modifiers):
-+    if len(arg_list) != 1:
-+        raise CmdLineInputError()
-+    lib.booth.pull(arg_list[0], DEFAULT_BOOTH_NAME)
-+
-+
-+def status(lib, arg_list, modifiers):
-+    if arg_list:
-+        raise CmdLineInputError()
-+    booth_status = lib.booth.status(DEFAULT_BOOTH_NAME)
-+    if booth_status.get("ticket"):
-+        print("TICKETS:")
-+        print(booth_status["ticket"])
-+    if booth_status.get("peers"):
-+        print("PEERS:")
-+        print(booth_status["peers"])
-+    if booth_status.get("status"):
-+        print("DAEMON STATUS:")
-+        print(booth_status["status"])
-diff --git a/pcs/cli/booth/env.py b/pcs/cli/booth/env.py
-new file mode 100644
-index 0000000..918e487
---- /dev/null
-+++ b/pcs/cli/booth/env.py
-@@ -0,0 +1,121 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+import os.path
-+
-+from pcs.cli.common import console_report
-+from pcs.common import report_codes, env_file_role_codes as file_role_codes
-+from pcs.lib.errors import LibraryEnvError
-+
-+
-+def read_env_file(path):
-+    try:
-+        return {
-+            "content": open(path).read() if os.path.isfile(path) else None
-+        }
-+    except EnvironmentError as e:
-+        raise console_report.error(
-+            "Unable to read {0}: {1}".format(path, e.strerror)
-+        )
-+
-+def write_env_file(env_file, file_path):
-+    try:
-+        f = open(file_path, "wb" if env_file.get("is_binary", False) else "w")
-+        f.write(env_file["content"])
-+        f.close()
-+    except EnvironmentError as e:
-+        raise console_report.error(
-+            "Unable to write {0}: {1}".format(file_path, e.strerror)
-+        )
-+
-+def process_no_existing_file_expectation(file_role, env_file, file_path):
-+    if(
-+        env_file["no_existing_file_expected"]
-+        and
-+        os.path.exists(file_path)
-+    ):
-+        msg = "{0} {1} already exists".format(file_role, file_path)
-+        if not env_file["can_overwrite_existing_file"]:
-+            raise console_report.error(
-+                "{0}, use --force to override".format(msg)
-+            )
-+        console_report.warn(msg)
-+
-+def is_missing_file_report(report, file_role_code):
-+    return (
-+        report.code == report_codes.FILE_DOES_NOT_EXIST
-+        and
-+        report.info["file_role"] == file_role_code
-+    )
-+
-+def report_missing_file(file_role, file_path):
-+    console_report.error(
-+        "{0} '{1}' does not exist".format(file_role, file_path)
-+    )
-+
-+def middleware_config(name, config_path, key_path):
-+    if config_path and not key_path:
-+        raise console_report.error(
-+            "With --booth-conf must be specified --booth-key as well"
-+        )
-+
-+    if key_path and not config_path:
-+        raise console_report.error(
-+            "With --booth-key must be specified --booth-conf as well"
-+        )
-+
-+    is_mocked_environment = config_path and key_path
-+
-+    def create_booth_env():
-+        if not is_mocked_environment:
-+            return {"name": name}
-+        return {
-+            "name": name,
-+            "config_file": read_env_file(config_path),
-+            "key_file": read_env_file(key_path),
-+            "key_path": key_path,
-+        }
-+
-+    def flush(modified_env):
-+        if not is_mocked_environment:
-+            return
-+        if not modified_env:
-+            #TODO now this would not happen
-+            #for more information see comment in
-+            #pcs.cli.common.lib_wrapper.lib_env_to_cli_env
-+            raise console_report.error("Error during library communication")
-+
-+        process_no_existing_file_expectation(
-+            "booth config file",
-+            modified_env["config_file"],
-+            config_path
-+        )
-+        process_no_existing_file_expectation(
-+            "booth key file",
-+            modified_env["key_file"],
-+            key_path
-+        )
-+        write_env_file(modified_env["key_file"], key_path)
-+        write_env_file(modified_env["config_file"], config_path)
-+
-+    def apply(next_in_line, env, *args, **kwargs):
-+        env.booth = create_booth_env()
-+        try:
-+            result_of_next = next_in_line(env, *args, **kwargs)
-+        except LibraryEnvError as e:
-+            for report in e.args:
-+                if is_missing_file_report(report, file_role_codes.BOOTH_CONFIG):
-+                    report_missing_file("Booth config file", config_path)
-+                    e.sign_processed(report)
-+                if is_missing_file_report(report, file_role_codes.BOOTH_KEY):
-+                    report_missing_file("Booth key file", key_path)
-+                    e.sign_processed(report)
-+            raise e
-+        flush(env.booth["modified_env"])
-+        return result_of_next
-+
-+    return apply
-diff --git a/pcs/cli/booth/test/__init__.py b/pcs/cli/booth/test/__init__.py
-new file mode 100644
-index 0000000..e69de29
-diff --git a/pcs/cli/booth/test/test_command.py b/pcs/cli/booth/test/test_command.py
-new file mode 100644
-index 0000000..00216f2
---- /dev/null
-+++ b/pcs/cli/booth/test/test_command.py
-@@ -0,0 +1,44 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from unittest import TestCase
-+
-+from pcs.cli.booth import command
-+from pcs.test.tools.pcs_mock import mock
-+
-+
-+class ConfigSetupTest(TestCase):
-+    def test_call_lib_with_correct_args(self):
-+        lib = mock.MagicMock()
-+        lib.booth = mock.MagicMock()
-+        lib.booth.config_setup = mock.MagicMock()
-+
-+        command.config_setup(
-+            lib,
-+            arg_list=[
-+                "sites", "1.1.1.1", "2.2.2.2", "4.4.4.4",
-+                "arbitrators", "3.3.3.3"
-+            ],
-+            modifiers={
-+                "force": False,
-+            }
-+        )
-+        lib.booth.config_setup.assert_called_once_with(
-+            {
-+                "sites": ["1.1.1.1", "2.2.2.2", "4.4.4.4"],
-+                "arbitrators": ["3.3.3.3"],
-+            },
-+            False
-+        )
-+
-+class ConfigTicketAddTest(TestCase):
-+    def test_call_lib_with_ticket_name(self):
-+        lib = mock.MagicMock()
-+        lib.booth = mock.MagicMock()
-+        lib.booth.config_ticket_add = mock.MagicMock()
-+        command.config_ticket_add(lib, arg_list=["TICKET_A"], modifiers={})
-+        lib.booth.config_ticket_add.assert_called_once_with("TICKET_A")
-diff --git a/pcs/cli/booth/test/test_env.py b/pcs/cli/booth/test/test_env.py
-new file mode 100644
-index 0000000..1ead6f2
---- /dev/null
-+++ b/pcs/cli/booth/test/test_env.py
-@@ -0,0 +1,118 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from unittest import TestCase
-+
-+from pcs.cli.booth.env import middleware_config
-+from pcs.common import report_codes, env_file_role_codes
-+from pcs.lib.errors import LibraryEnvError, ReportItem
-+from pcs.test.tools.pcs_mock import mock
-+
-+
-+class BoothConfTest(TestCase):
-+    @mock.patch("pcs.cli.booth.env.os.path.isfile")
-+    def test_sucessfully_care_about_local_file(self, mock_is_file):
-+        #setup, fixtures
-+        def next_in_line(env):
-+            env.booth["modified_env"] = {
-+                "config_file": {
-+                    "content": "file content",
-+                    "no_existing_file_expected": False,
-+                },
-+                "key_file": {
-+                    "content": "key file content",
-+                    "no_existing_file_expected": False,
-+                }
-+            }
-+            return "call result"
-+        mock_is_file.return_value = True
-+        mock_env = mock.MagicMock()
-+
-+        mock_open = mock.mock_open()
-+        with mock.patch(
-+            "pcs.cli.booth.env.open",
-+            mock_open,
-+            create=True
-+        ):
-+            #run tested code
-+            booth_conf_middleware = middleware_config(
-+                "booth-name",
-+                "/local/file/path.conf",
-+                "/local/file/path.key",
-+            )
-+
-+            self.assertEqual(
-+                "call result",
-+                booth_conf_middleware(next_in_line, mock_env)
-+            )
-+
-+        #assertions
-+        self.assertEqual(mock_is_file.mock_calls,[
-+            mock.call("/local/file/path.conf"),
-+            mock.call("/local/file/path.key"),
-+        ])
-+
-+        self.assertEqual(mock_env.booth["name"], "booth-name")
-+        self.assertEqual(mock_env.booth["config_file"], {"content": ""})
-+        self.assertEqual(mock_env.booth["key_file"], {"content": ""})
-+
-+        self.assertEqual(mock_open.mock_calls, [
-+            mock.call(u'/local/file/path.conf'),
-+            mock.call().read(),
-+            mock.call(u'/local/file/path.key'),
-+            mock.call().read(),
-+            mock.call(u'/local/file/path.key', u'w'),
-+            mock.call().write(u'key file content'),
-+            mock.call().close(),
-+            mock.call(u'/local/file/path.conf', u'w'),
-+            mock.call().write(u'file content'),
-+            mock.call().close(),
-+        ])
-+
-+    @mock.patch("pcs.cli.booth.env.console_report")
-+    @mock.patch("pcs.cli.booth.env.os.path.isfile")
-+    def test_catch_exactly_his_exception(
-+        self, mock_is_file, mock_console_report
-+    ):
-+        next_in_line = mock.Mock(side_effect=LibraryEnvError(
-+            ReportItem.error(report_codes.FILE_DOES_NOT_EXIST, "", info={
-+                "file_role": env_file_role_codes.BOOTH_CONFIG,
-+            }),
-+            ReportItem.error(report_codes.FILE_DOES_NOT_EXIST, "", info={
-+                "file_role": env_file_role_codes.BOOTH_KEY,
-+            }),
-+            ReportItem.error("OTHER ERROR", "", info={}),
-+        ))
-+        mock_is_file.return_value = False
-+        mock_env = mock.MagicMock()
-+
-+        #run tested code
-+        booth_conf_middleware = middleware_config(
-+            "booth-name",
-+            "/local/file/path.conf",
-+            "/local/file/path.key",
-+        )
-+        raised_exception = []
-+        def run_middleware():
-+            try:
-+                booth_conf_middleware(next_in_line, mock_env)
-+            except Exception as e:
-+                raised_exception.append(e)
-+                raise e
-+
-+        self.assertRaises(LibraryEnvError, run_middleware)
-+        self.assertEqual(1, len(raised_exception[0].unprocessed))
-+        self.assertEqual("OTHER ERROR", raised_exception[0].unprocessed[0].code)
-+
-+        self.assertEqual(mock_console_report.error.mock_calls, [
-+            mock.call(
-+                "Booth config file '/local/file/path.conf' does not exist"
-+            ),
-+            mock.call(
-+                "Booth key file '/local/file/path.key' does not exist"
-+            ),
-+        ])
-diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py
-index 3d42798..e600168 100644
---- a/pcs/cli/common/console_report.py
-+++ b/pcs/cli/common/console_report.py
-@@ -8,10 +8,15 @@ from __future__ import (
- import sys
- 
- 
--def error(message, exit=True):
--    sys.stderr.write("Error: {0}\n".format(message))
--    if exit:
--        sys.exit(1)
-+def warn(message):
-+    sys.stdout.write(format_message(message, "Warning: "))
-+
-+def format_message(message, prefix):
-+    return "{0}{1}\n".format(prefix, message)
-+
-+def error(message):
-+    sys.stderr.write(format_message(message, "Error: "))
-+    return SystemExit(1)
- 
- def indent(line_list, indent_step=2):
-     """
-diff --git a/pcs/cli/common/env.py b/pcs/cli/common/env.py
-index 2ba4f70..b1d951d 100644
---- a/pcs/cli/common/env.py
-+++ b/pcs/cli/common/env.py
-@@ -6,11 +6,13 @@ from __future__ import (
- )
- 
- class Env(object):
-+    #pylint: disable=too-many-instance-attributes
-     def __init__(self):
-         self.cib_data = None
-         self.cib_upgraded = False
-         self.user = None
-         self.groups = None
-         self.corosync_conf_data = None
-+        self.booth = None
-         self.auth_tokens_getter = None
-         self.debug = False
-diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
-index c4b8342..c836575 100644
---- a/pcs/cli/common/lib_wrapper.py
-+++ b/pcs/cli/common/lib_wrapper.py
-@@ -5,27 +5,30 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from collections import namedtuple
--from functools import partial
- import logging
-+import sys
-+from collections import namedtuple
- 
- from pcs.cli.common import middleware
--
--#from pcs.lib import commands does not work: "commands" is package
--from pcs.lib.commands.constraint import colocation as constraint_colocation
--from pcs.lib.commands.constraint import order as constraint_order
--from pcs.lib.commands.constraint import ticket as constraint_ticket
-+from pcs.cli.common.reports import (
-+    LibraryReportProcessorToConsole,
-+    process_library_reports
-+)
- from pcs.lib.commands import (
-+    booth,
-     quorum,
-     qdevice,
-     sbd,
-     alert,
- )
--from pcs.cli.common.reports import (
--    LibraryReportProcessorToConsole as LibraryReportProcessorToConsole,
-+from pcs.lib.commands.constraint import (
-+    colocation as constraint_colocation,
-+    order as constraint_order,
-+    ticket as constraint_ticket
- )
--
- from pcs.lib.env import LibraryEnvironment
-+from pcs.lib.errors import LibraryEnvError
-+
- 
- _CACHE = {}
- 
-@@ -40,7 +43,8 @@ def cli_env_to_lib_env(cli_env):
-         cli_env.groups,
-         cli_env.cib_data,
-         cli_env.corosync_conf_data,
--        cli_env.auth_tokens_getter,
-+        booth=cli_env.booth,
-+        auth_tokens_getter=cli_env.auth_tokens_getter,
-     )
- 
- def lib_env_to_cli_env(lib_env, cli_env):
-@@ -49,6 +53,19 @@ def lib_env_to_cli_env(lib_env, cli_env):
-         cli_env.cib_upgraded = lib_env.cib_upgraded
-     if not lib_env.is_corosync_conf_live:
-         cli_env.corosync_conf_data = lib_env.get_corosync_conf_data()
-+
-+    #TODO
-+    #now we know: if is in cli_env booth is in lib_env as well
-+    #when we communicate with the library over the network we will need extra
-+    #sanitization here
-+    #this applies generally, not only for booth
-+    #corosync_conf and cib suffers with this problem as well but in this cases
-+    #it is dangerously hidden: when inconsistency between cli and lib
-+    #environment inconsitency occurs, original content is put to file (which is
-+    #wrong)
-+    if cli_env.booth:
-+        cli_env.booth["modified_env"] = lib_env.booth.export()
-+
-     return cli_env
- 
- def bind(cli_env, run_with_middleware, run_library_command):
-@@ -62,7 +79,17 @@ def bind(cli_env, run_with_middleware, run_library_command):
-         lib_env_to_cli_env(lib_env, cli_env)
- 
-         return lib_call_result
--    return partial(run_with_middleware, run, cli_env)
-+
-+    def decorated_run(*args, **kwargs):
-+        try:
-+            return run_with_middleware(run, cli_env, *args, **kwargs)
-+        except LibraryEnvError as e:
-+            process_library_reports(e.unprocessed)
-+            #TODO we use explicit exit here - process_library_reports stil has
-+            #possibility to not exit - it will need deeper rethinking
-+            sys.exit(1)
-+
-+    return decorated_run
- 
- def bind_all(env, run_with_middleware, dictionary):
-     return wrapper(dict(
-@@ -172,6 +199,33 @@ def load_module(env, middleware_factory, name):
-             }
-         )
- 
-+    if name == "booth":
-+        return bind_all(
-+            env,
-+            middleware.build(
-+                middleware_factory.booth_conf,
-+                middleware_factory.cib
-+            ),
-+            {
-+                "config_setup": booth.config_setup,
-+                "config_destroy": booth.config_destroy,
-+                "config_show": booth.config_show,
-+                "config_ticket_add": booth.config_ticket_add,
-+                "config_ticket_remove": booth.config_ticket_remove,
-+                "create_in_cluster": booth.create_in_cluster,
-+                "remove_from_cluster": booth.remove_from_cluster,
-+                "config_sync": booth.config_sync,
-+                "enable": booth.enable_booth,
-+                "disable": booth.disable_booth,
-+                "start": booth.start_booth,
-+                "stop": booth.stop_booth,
-+                "pull": booth.pull_config,
-+                "status": booth.get_status,
-+                "ticket_grant": booth.ticket_grant,
-+                "ticket_revoke": booth.ticket_revoke,
-+            }
-+        )
-+
-     raise Exception("No library part '{0}'".format(name))
- 
- class Library(object):
-diff --git a/pcs/cli/common/middleware.py b/pcs/cli/common/middleware.py
-index e53e138..9254a12 100644
---- a/pcs/cli/common/middleware.py
-+++ b/pcs/cli/common/middleware.py
-@@ -29,11 +29,12 @@ def cib(use_local_cib, load_cib_content, write_cib):
-     """
-     def apply(next_in_line, env, *args, **kwargs):
-         if use_local_cib:
--            env.cib_data = load_cib_content()
-+            original_content = load_cib_content()
-+            env.cib_data = original_content
- 
-         result_of_next = next_in_line(env, *args, **kwargs)
- 
--        if use_local_cib:
-+        if use_local_cib and env.cib_data != original_content:
-             write_cib(env.cib_data, env.cib_upgraded)
- 
-         return result_of_next
-@@ -45,7 +46,7 @@ def corosync_conf_existing(local_file_path):
-             try:
-                 env.corosync_conf_data = open(local_file_path).read()
-             except EnvironmentError as e:
--                console_report.error("Unable to read {0}: {1}".format(
-+                raise console_report.error("Unable to read {0}: {1}".format(
-                     local_file_path,
-                     e.strerror
-                 ))
-@@ -58,7 +59,7 @@ def corosync_conf_existing(local_file_path):
-                 f.write(env.corosync_conf_data)
-                 f.close()
-             except EnvironmentError as e:
--                console_report.error("Unable to write {0}: {1}".format(
-+                raise console_report.error("Unable to write {0}: {1}".format(
-                     local_file_path,
-                     e.strerror
-                 ))
-diff --git a/pcs/cli/common/parse_args.py b/pcs/cli/common/parse_args.py
-index 3b01775..d17c5da 100644
---- a/pcs/cli/common/parse_args.py
-+++ b/pcs/cli/common/parse_args.py
-@@ -25,3 +25,30 @@ def prepare_options(cmdline_args):
-         name, value = arg.split("=", 1)
-         options[name] = value
-     return options
-+
-+def group_by_keywords(
-+    arg_list, keyword_set,
-+    implicit_first_keyword=None, keyword_repeat_allowed=True,
-+):
-+    groups = dict([(keyword, []) for keyword in keyword_set])
-+    if implicit_first_keyword:
-+        groups[implicit_first_keyword] = []
-+
-+    if not arg_list:
-+        return groups
-+
-+    used_keywords = []
-+    if implicit_first_keyword:
-+        used_keywords.append(implicit_first_keyword)
-+    elif arg_list[0] not in keyword_set:
-+        raise CmdLineInputError()
-+
-+    for arg in arg_list:
-+        if arg in list(groups.keys()):
-+            if arg in used_keywords and not keyword_repeat_allowed:
-+                raise CmdLineInputError()
-+            used_keywords.append(arg)
-+        else:
-+            groups[used_keywords[-1]].append(arg)
-+
-+    return groups
-diff --git a/pcs/cli/common/test/test_lib_wrapper.py b/pcs/cli/common/test/test_lib_wrapper.py
-index f34d2d0..c10bb62 100644
---- a/pcs/cli/common/test/test_lib_wrapper.py
-+++ b/pcs/cli/common/test/test_lib_wrapper.py
-@@ -6,8 +6,10 @@ from __future__ import (
- )
- from unittest import TestCase
- 
--from pcs.cli.common.lib_wrapper import Library
-+from pcs.cli.common.lib_wrapper import Library, bind
- from pcs.test.tools.pcs_mock import mock
-+from pcs.lib.errors import ReportItem
-+from pcs.lib.errors import LibraryEnvError
- 
- class LibraryWrapperTest(TestCase):
-     def test_raises_for_bad_path(self):
-@@ -30,6 +32,28 @@ class LibraryWrapperTest(TestCase):
-         mock_middleware_factory = mock.MagicMock()
-         mock_middleware_factory.cib = dummy_middleware
-         mock_middleware_factory.corosync_conf_existing = dummy_middleware
--        Library('env', mock_middleware_factory).constraint_order.set('first', second="third")
-+        mock_env = mock.MagicMock()
-+        Library(mock_env, mock_middleware_factory).constraint_order.set(
-+            'first', second="third"
-+        )
- 
-         mock_order_set.assert_called_once_with(lib_env, "first", second="third")
-+
-+class BindTest(TestCase):
-+    @mock.patch("pcs.cli.common.lib_wrapper.process_library_reports")
-+    def test_report_unprocessed_library_env_errors(self, mock_process_report):
-+        report1 = ReportItem.error("OTHER ERROR", "", info={})
-+        report2 = ReportItem.error("OTHER ERROR", "", info={})
-+        report3 = ReportItem.error("OTHER ERROR", "", info={})
-+        e = LibraryEnvError(report1, report2, report3)
-+        e.sign_processed(report2)
-+        mock_middleware = mock.Mock(side_effect=e)
-+
-+        binded = bind(
-+            cli_env=None,
-+            run_with_middleware=mock_middleware,
-+            run_library_command=None
-+        )
-+
-+        self.assertRaises(SystemExit, lambda: binded(cli_env=None))
-+        mock_process_report.assert_called_once_with([report1, report3])
-diff --git a/pcs/cli/common/test/test_middleware.py b/pcs/cli/common/test/test_middleware.py
-index 6179882..c030cd9 100644
---- a/pcs/cli/common/test/test_middleware.py
-+++ b/pcs/cli/common/test/test_middleware.py
-@@ -6,7 +6,8 @@ from __future__ import (
- )
- 
- from unittest import TestCase
--import pcs.cli.common.middleware
-+
-+from pcs.cli.common import middleware
- 
- 
- class MiddlewareBuildTest(TestCase):
-@@ -29,7 +30,7 @@ class MiddlewareBuildTest(TestCase):
-             next(lib, argv, modificators)
-             log.append('m2 done')
- 
--        run_with_middleware = pcs.cli.common.middleware.build(m1, m2)
-+        run_with_middleware = middleware.build(m1, m2)
-         run_with_middleware(command, "1", "2", "3")
-         self.assertEqual(log, [
-             'm1 start: 1, 2, 3',
-@@ -38,3 +39,4 @@ class MiddlewareBuildTest(TestCase):
-             'm2 done',
-             'm1 done',
-         ])
-+
-diff --git a/pcs/cli/common/test/test_parse_args.py b/pcs/cli/common/test/test_parse_args.py
-index 1d6c4b0..eb358a5 100644
---- a/pcs/cli/common/test/test_parse_args.py
-+++ b/pcs/cli/common/test/test_parse_args.py
-@@ -6,7 +6,11 @@ from __future__ import (
- )
- 
- from unittest import TestCase
--from pcs.cli.common.parse_args import split_list, prepare_options
-+from pcs.cli.common.parse_args import(
-+    split_list,
-+    prepare_options,
-+    group_by_keywords,
-+)
- from pcs.cli.common.errors import CmdLineInputError
- 
- 
-@@ -42,3 +46,81 @@ class SplitListTest(TestCase):
-             [[], ['a', 'b'], ['c', 'd'], []],
-             split_list(['|','a', 'b', '|', 'c', 'd', "|"], '|')
-         )
-+
-+class SplitByKeywords(TestCase):
-+    def test_split_with_implicit_first_keyword(self):
-+        self.assertEqual(
-+            group_by_keywords(
-+                [0, "first", 1, 2, "second", 3],
-+                set(["first", "second"]),
-+                implicit_first_keyword="zero"
-+            ),
-+            {
-+                "zero": [0],
-+                "first": [1, 2],
-+                "second": [3],
-+            }
-+        )
-+
-+    def test_splict_without_implict_keyword(self):
-+        self.assertEqual(
-+            group_by_keywords(
-+                ["first", 1, 2, "second", 3],
-+                set(["first", "second"]),
-+            ),
-+            {
-+                "first": [1, 2],
-+                "second": [3],
-+            }
-+        )
-+
-+    def test_raises_when_args_do_not_start_with_keyword_nor_implicit(self):
-+        self.assertRaises(CmdLineInputError, lambda: group_by_keywords(
-+            [0, "first", 1, 2, "second", 3],
-+            set(["first", "second"]),
-+        ))
-+
-+    def test_returns_dict_with_empty_lists_for_no_args(self):
-+        self.assertEqual(
-+            group_by_keywords(
-+                [],
-+                set(["first", "second"])
-+            ),
-+            {
-+                "first": [],
-+                "second": [],
-+            }
-+        )
-+
-+    def test_returns_dict_with_empty_lists_for_no_args_implicit_case(self):
-+        self.assertEqual(
-+            group_by_keywords(
-+                [],
-+                set(["first", "second"]),
-+                implicit_first_keyword="zero",
-+            ),
-+            {
-+                "zero": [],
-+                "first": [],
-+                "second": [],
-+            }
-+        )
-+
-+    def test_allow_keywords_repeating(self):
-+        self.assertEqual(
-+            group_by_keywords(
-+                ["first", 1, 2, "second", 3, "first", 4],
-+                set(["first", "second"]),
-+            ),
-+            {
-+                "first": [1, 2, 4],
-+                "second": [3],
-+            }
-+        )
-+
-+    def test_can_disallow_keywords_repeating(self):
-+        self.assertRaises(CmdLineInputError, lambda: group_by_keywords(
-+            ["first", 1, 2, "second", 3, "first"],
-+            set(["first", "second"]),
-+            keyword_repeat_allowed=False,
-+        ))
-diff --git a/pcs/cluster.py b/pcs/cluster.py
-index 3f41d96..90fec63 100644
---- a/pcs/cluster.py
-+++ b/pcs/cluster.py
-@@ -42,6 +42,7 @@ from pcs.lib import (
-     sbd as lib_sbd,
-     reports as lib_reports,
- )
-+from pcs.lib.booth import sync as booth_sync
- from pcs.lib.commands.quorum import _add_device_model_net
- from pcs.lib.corosync import (
-     config_parser as corosync_conf_utils,
-@@ -1388,6 +1389,7 @@ def cluster_node(argv):
-         report_processor = lib_env.report_processor
-         node_communicator = lib_env.node_communicator()
-         node_addr = NodeAddresses(node0, node1)
-+        modifiers = utils.get_modificators()
-         try:
-             if lib_sbd.is_sbd_enabled(utils.cmd_runner()):
-                 if "--watchdog" not in utils.pcs_options:
-@@ -1421,6 +1423,14 @@ def cluster_node(argv):
-                 lib_sbd.disable_sbd_service_on_node(
-                     report_processor, node_communicator, node_addr
-                 )
-+
-+            booth_sync.send_all_config_to_node(
-+                node_communicator,
-+                report_processor,
-+                node_addr,
-+                rewrite_existing=modifiers["force"],
-+                skip_wrong_config=modifiers["force"]
-+            )
-         except LibraryError as e:
-             process_library_reports(e.args)
-         except NodeCommunicationException as e:
-diff --git a/pcs/common/env_file_role_codes.py b/pcs/common/env_file_role_codes.py
-new file mode 100644
-index 0000000..1f47387
---- /dev/null
-+++ b/pcs/common/env_file_role_codes.py
-@@ -0,0 +1,9 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+BOOTH_CONFIG = "BOOTH_CONFIG"
-+BOOTH_KEY = "BOOTH_KEY"
-diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
-index 53f2ccb..e71d418 100644
---- a/pcs/common/report_codes.py
-+++ b/pcs/common/report_codes.py
-@@ -8,6 +8,9 @@ from __future__ import (
- # force cathegories
- FORCE_ACTIVE_RRP = "ACTIVE_RRP"
- FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE = "FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE"
-+FORCE_BOOTH_REMOVE_FROM_CIB = "FORCE_BOOTH_REMOVE_FROM_CIB"
-+FORCE_BOOTH_DESTROY = "FORCE_BOOTH_DESTROY"
-+FORCE_FILE_OVERWRITE = "FORCE_FILE_OVERWRITE"
- FORCE_CONSTRAINT_DUPLICATE = "CONSTRAINT_DUPLICATE"
- FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE = "CONSTRAINT_MULTIINSTANCE_RESOURCE"
- FORCE_LOAD_THRESHOLD = "LOAD_THRESHOLD"
-@@ -17,10 +20,40 @@ FORCE_UNKNOWN_AGENT = "UNKNOWN_AGENT"
- FORCE_UNSUPPORTED_AGENT = "UNSUPPORTED_AGENT"
- FORCE_METADATA_ISSUE = "METADATA_ISSUE"
- SKIP_OFFLINE_NODES = "SKIP_OFFLINE_NODES"
-+SKIP_UNREADABLE_CONFIG = "SKIP_UNREADABLE_CONFIG"
- 
- AGENT_GENERAL_ERROR = "AGENT_GENERAL_ERROR"
- AGENT_NOT_FOUND = "AGENT_NOT_FOUND"
- BAD_CLUSTER_STATE_FORMAT = 'BAD_CLUSTER_STATE_FORMAT'
-+BOOTH_ADDRESS_DUPLICATION = "BOOTH_ADDRESS_DUPLICATION"
-+BOOTH_ALREADY_IN_CIB = "BOOTH_ALREADY_IN_CIB"
-+BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP = "BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP"
-+BOOTH_CANNOT_IDENTIFY_KEYFILE = "BOOTH_CANNOT_IDENTIFY_KEYFILE"
-+BOOTH_CONFIG_FILE_ALREADY_EXISTS = "BOOTH_CONFIG_FILE_ALREADY_EXISTS"
-+BOOTH_CONFIG_IO_ERROR = "BOOTH_CONFIG_IO_ERROR"
-+BOOTH_CONFIG_IS_USED = "BOOTH_CONFIG_IS_USED"
-+BOOTH_CONFIG_READ_ERROR = "BOOTH_CONFIG_READ_ERROR"
-+BOOTH_CONFIG_WRITE_ERROR = "BOOTH_CONFIG_WRITE_ERROR"
-+BOOTH_CONFIG_UNEXPECTED_LINES = "BOOTH_CONFIG_UNEXPECTED_LINES"
-+BOOTH_CONFIGS_SAVED_ON_NODE = "BOOTH_CONFIGS_SAVED_ON_NODE"
-+BOOTH_CONFIGS_SAVING_ON_NODE = "BOOTH_CONFIGS_SAVING_ON_NODE"
-+BOOTH_DAEMON_STATUS_ERROR = "BOOTH_DAEMON_STATUS_ERROR"
-+BOOTH_DISTRIBUTING_CONFIG = "BOOTH_DISTRIBUTING_CONFIG"
-+BOOTH_EVEN_PEERS_NUM = "BOOTH_EVEN_PEERS_NUM"
-+BOOTH_FETCHING_CONFIG_FROM_NODE = "BOOTH_FETCHING_CONFIG_FROM_NODE"
-+BOOTH_INVALID_CONFIG_NAME = "BOOTH_INVALID_CONFIG_NAME"
-+BOOTH_INVALID_NAME = "BOOTH_INVALID_NAME"
-+BOOTH_LACK_OF_SITES = "BOOTH_LACK_OF_SITES"
-+BOOTH_MULTIPLE_TIMES_IN_CIB = "BOOTH_MULTIPLE_TIMES_IN_CIB"
-+BOOTH_NOT_EXISTS_IN_CIB = "BOOTH_NOT_EXISTS_IN_CIB"
-+BOOTH_PEERS_STATUS_ERROR = "BOOTH_PEERS_STATUS_ERROR"
-+BOOTH_SKIPPING_CONFIG = "BOOTH_SKIPPING_CONFIG"
-+BOOTH_TICKET_DOES_NOT_EXIST = "BOOTH_TICKET_DOES_NOT_EXIST"
-+BOOTH_TICKET_DUPLICATE = "BOOTH_TICKET_DUPLICATE"
-+BOOTH_TICKET_OPERATION_FAILED = "BOOTH_TICKET_OPERATION_FAILED"
-+BOOTH_TICKET_NAME_INVALID = "BOOTH_TICKET_NAME_INVALID"
-+BOOTH_TICKET_STATUS_ERROR = "BOOTH_TICKET_STATUS_ERROR"
-+BOOTH_UNSUPORTED_FILE_LOCATION = "BOOTH_UNSUPORTED_FILE_LOCATION"
- CIB_ALERT_NOT_FOUND = "CIB_ALERT_NOT_FOUND"
- CIB_ALERT_RECIPIENT_ALREADY_EXISTS = "CIB_ALERT_RECIPIENT_ALREADY_EXISTS"
- CIB_ALERT_RECIPIENT_VALUE_INVALID = "CIB_ALERT_RECIPIENT_VALUE_INVALID"
-@@ -38,6 +71,7 @@ CMAN_UDPU_RESTART_REQUIRED = 'CMAN_UDPU_RESTART_REQUIRED'
- CMAN_UNSUPPORTED_COMMAND = "CMAN_UNSUPPORTED_COMMAND"
- COMMON_ERROR = 'COMMON_ERROR'
- COMMON_INFO = 'COMMON_INFO'
-+LIVE_ENVIRONMENT_REQUIRED = "LIVE_ENVIRONMENT_REQUIRED"
- COROSYNC_CONFIG_ACCEPTED_BY_NODE = "COROSYNC_CONFIG_ACCEPTED_BY_NODE"
- COROSYNC_CONFIG_DISTRIBUTION_STARTED = "COROSYNC_CONFIG_DISTRIBUTION_STARTED"
- COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR = "COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR"
-@@ -53,6 +87,9 @@ COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE"
- CRM_MON_ERROR = "CRM_MON_ERROR"
- DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST"
- EMPTY_RESOURCE_SET_LIST = "EMPTY_RESOURCE_SET_LIST"
-+FILE_ALREADY_EXISTS = "FILE_ALREADY_EXISTS"
-+FILE_DOES_NOT_EXIST = "FILE_DOES_NOT_EXIST"
-+FILE_IO_ERROR = "FILE_IO_ERROR"
- ID_ALREADY_EXISTS = 'ID_ALREADY_EXISTS'
- ID_NOT_FOUND = 'ID_NOT_FOUND'
- IGNORED_CMAN_UNSUPPORTED_OPTION = 'IGNORED_CMAN_UNSUPPORTED_OPTION'
-@@ -134,10 +171,13 @@ SERVICE_START_SUCCESS = "SERVICE_START_SUCCESS"
- SERVICE_STOP_ERROR = "SERVICE_STOP_ERROR"
- SERVICE_STOP_STARTED = "SERVICE_STOP_STARTED"
- SERVICE_STOP_SUCCESS = "SERVICE_STOP_SUCCESS"
-+UNABLE_TO_DETERMINE_USER_UID = "UNABLE_TO_DETERMINE_USER_UID"
-+UNABLE_TO_DETERMINE_GROUP_GID = "UNABLE_TO_DETERMINE_GROUP_GID"
- UNABLE_TO_GET_AGENT_METADATA = 'UNABLE_TO_GET_AGENT_METADATA'
- UNABLE_TO_READ_COROSYNC_CONFIG = "UNABLE_TO_READ_COROSYNC_CONFIG"
- UNABLE_TO_GET_SBD_CONFIG = "UNABLE_TO_GET_SBD_CONFIG"
- UNABLE_TO_GET_SBD_STATUS = "UNABLE_TO_GET_SBD_STATUS"
- UNKNOWN_COMMAND = 'UNKNOWN_COMMAND'
- UNSUPPORTED_AGENT = 'UNSUPPORTED_AGENT'
-+UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS = "UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS"
- WATCHDOG_NOT_FOUND = "WATCHDOG_NOT_FOUND"
-diff --git a/pcs/common/test/__init__.py b/pcs/common/test/__init__.py
-new file mode 100644
-index 0000000..e69de29
-diff --git a/pcs/common/tools.py b/pcs/common/tools.py
-index f4f6c4b..275f6b9 100644
---- a/pcs/common/tools.py
-+++ b/pcs/common/tools.py
-@@ -33,3 +33,8 @@ def run_parallel(worker, data_list):
- 
-     for thread in thread_list:
-         thread.join()
-+
-+def format_environment_error(e):
-+    if e.filename:
-+        return "{0}: '{1}'".format(e.strerror, e.filename)
-+    return e.strerror
-diff --git a/pcs/lib/booth/__init__.py b/pcs/lib/booth/__init__.py
-new file mode 100644
-index 0000000..e69de29
-diff --git a/pcs/lib/booth/config_exchange.py b/pcs/lib/booth/config_exchange.py
-new file mode 100644
-index 0000000..e0569ba
---- /dev/null
-+++ b/pcs/lib/booth/config_exchange.py
-@@ -0,0 +1,43 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+from pcs.lib.booth.config_structure import ConfigItem
-+
-+EXCHANGE_PRIMITIVES = ["authfile"]
-+EXCHANGE_LISTS = [
-+    ("site", "sites"),
-+    ("arbitrator", "arbitrators"),
-+    ("ticket", "tickets"),
-+]
-+
-+
-+def to_exchange_format(booth_configuration):
-+    exchange_lists = dict(EXCHANGE_LISTS)
-+    exchange = dict(
-+        (exchange_key, []) for exchange_key in exchange_lists.values()
-+    )
-+
-+    for key, value, _ in booth_configuration:
-+        if key in exchange_lists:
-+            exchange[exchange_lists[key]].append(value)
-+        if key in EXCHANGE_PRIMITIVES:
-+            exchange[key] = value
-+
-+    return exchange
-+
-+
-+def from_exchange_format(exchange_format):
-+    booth_config = []
-+    for key in EXCHANGE_PRIMITIVES:
-+        if key in exchange_format:
-+            booth_config.append(ConfigItem(key, exchange_format[key]))
-+
-+    for key, exchange_key in EXCHANGE_LISTS:
-+        booth_config.extend([
-+            ConfigItem(key, value)
-+            for value in exchange_format.get(exchange_key, [])
-+        ])
-+    return booth_config
-diff --git a/pcs/lib/booth/config_files.py b/pcs/lib/booth/config_files.py
-new file mode 100644
-index 0000000..aaad951
---- /dev/null
-+++ b/pcs/lib/booth/config_files.py
-@@ -0,0 +1,97 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+import os
-+import binascii
-+
-+from pcs.common import report_codes, env_file_role_codes as file_roles
-+from pcs.common.tools import format_environment_error
-+from pcs.lib import reports as lib_reports
-+from pcs.lib.booth import reports
-+from pcs.lib.errors import ReportItemSeverity
-+from pcs.settings import booth_config_dir as BOOTH_CONFIG_DIR
-+
-+
-+def generate_key():
-+    return binascii.hexlify(os.urandom(32))
-+
-+def get_all_configs_file_names():
-+    """
-+    Returns list of all file names ending with '.conf' in booth configuration
-+    directory.
-+    """
-+    return [
-+        file_name for file_name in os.listdir(BOOTH_CONFIG_DIR)
-+        if os.path.isfile(file_name) and file_name.endswith(".conf") and
-+        len(file_name) > len(".conf")
-+    ]
-+
-+
-+def _read_config(file_name):
-+    """
-+    Read specified booth config from default booth config directory.
-+
-+    file_name -- string, name of file
-+    """
-+    with open(os.path.join(BOOTH_CONFIG_DIR, file_name), "r") as file:
-+        return file.read()
-+
-+
-+def read_configs(reporter, skip_wrong_config=False):
-+    """
-+    Returns content of all configs present on local system in dictionary,
-+    where key is name of config and value is its content.
-+
-+    reporter -- report processor
-+    skip_wrong_config -- if True skip local configs that are unreadable
-+    """
-+    report_list = []
-+    output = {}
-+    for file_name in get_all_configs_file_names():
-+        try:
-+            output[file_name] = _read_config(file_name)
-+        except EnvironmentError:
-+            report_list.append(reports.booth_config_unable_to_read(
-+                file_name,
-+                (
-+                    ReportItemSeverity.WARNING if skip_wrong_config
-+                    else ReportItemSeverity.ERROR
-+                ),
-+                (
-+                    None if skip_wrong_config
-+                    else report_codes.SKIP_UNREADABLE_CONFIG
-+                )
-+            ))
-+    reporter.process_list(report_list)
-+    return output
-+
-+
-+def read_authfile(reporter, path):
-+    """
-+    Returns content of specified authfile as bytes. None if file is not in
-+    default booth directory or there was some IO error.
-+
-+    reporter -- report processor
-+    path -- path to the authfile to be read
-+    """
-+    if not path:
-+        return None
-+    if os.path.dirname(os.path.abspath(path)) != BOOTH_CONFIG_DIR:
-+        reporter.process(reports.booth_unsupported_file_location(path))
-+        return None
-+    try:
-+        with open(path, "rb") as file:
-+            return file.read()
-+    except EnvironmentError as e:
-+        reporter.process(lib_reports.file_io_error(
-+            file_roles.BOOTH_KEY,
-+            path,
-+            reason=format_environment_error(e),
-+            operation="read",
-+            severity=ReportItemSeverity.WARNING
-+        ))
-+        return None
-diff --git a/pcs/lib/booth/config_parser.py b/pcs/lib/booth/config_parser.py
-new file mode 100644
-index 0000000..62d2203
---- /dev/null
-+++ b/pcs/lib/booth/config_parser.py
-@@ -0,0 +1,90 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+import re
-+
-+from pcs.lib.booth import config_structure, reports
-+from pcs.lib.errors import LibraryError
-+
-+
-+class InvalidLines(Exception):
-+    pass
-+
-+def parse(content):
-+    try:
-+        return organize_lines(parse_to_raw_lines(content))
-+    except InvalidLines as e:
-+        raise LibraryError(
-+            reports.booth_config_unexpected_lines(e.args[0])
-+        )
-+
-+def build(config_line_list):
-+    return "\n".join(build_to_lines(config_line_list))
-+
-+def build_to_lines(config_line_list, deep=0):
-+    line_list = []
-+    for key, value, details in config_line_list:
-+        line_value = value if key != "ticket" else '"{0}"'.format(value)
-+        line_list.append("{0}{1} = {2}".format("  "*deep, key, line_value))
-+        if details:
-+            line_list.extend(build_to_lines(details, deep+1))
-+    return line_list
-+
-+
-+def organize_lines(raw_line_list):
-+    #Decision: Global key is moved up when is below ticket. Alternative is move
-+    #it below all ticket details. But it is confusing.
-+    global_section = []
-+    ticket_section = []
-+    current_ticket = None
-+    for key, value in raw_line_list:
-+        if key == "ticket":
-+            current_ticket = config_structure.ConfigItem(key, value)
-+            ticket_section.append(current_ticket)
-+        elif key in config_structure.GLOBAL_KEYS or not current_ticket:
-+            global_section.append(config_structure.ConfigItem(key, value))
-+        else:
-+            current_ticket.details.append(
-+                config_structure.ConfigItem(key, value)
-+            )
-+
-+    return global_section + ticket_section
-+
-+def search_with_multiple_re(re_object_list, string):
-+    """
-+    return MatchObject of first matching regular expression object or None
-+    list re_object_list contains regular expresssion objects (products of
-+        re.compile)
-+    """
-+    for expression in re_object_list:
-+        match = expression.search(string)
-+        if match:
-+            return match
-+    return None
-+
-+def parse_to_raw_lines(config_content):
-+    keyword_part = r"^(?P<key>[a-zA-Z0-9_-]+)\s*=\s*"
-+    expression_list = [re.compile(pattern.format(keyword_part)) for pattern in [
-+        r"""{0}(?P<value>[^'"]+)$""",
-+        r"""{0}'(?P<value>[^']*)'\s*(#.*)?$""",
-+        r"""{0}"(?P<value>[^"]*)"\s*(#.*)?$""",
-+    ]]
-+
-+    line_list = []
-+    invalid_line_list = []
-+    for line in config_content.splitlines():
-+        line = line.strip()
-+        match = search_with_multiple_re(expression_list, line)
-+        if match:
-+            line_list.append((match.group("key"), match.group("value")))
-+        elif line and not line.startswith("#"):
-+            invalid_line_list.append(line)
-+
-+    if invalid_line_list:
-+        raise InvalidLines(invalid_line_list)
-+
-+    return line_list
-diff --git a/pcs/lib/booth/config_structure.py b/pcs/lib/booth/config_structure.py
-new file mode 100644
-index 0000000..c92f718
---- /dev/null
-+++ b/pcs/lib/booth/config_structure.py
-@@ -0,0 +1,111 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+import re
-+
-+from pcs.lib.booth import reports
-+from pcs.lib.errors import LibraryError
-+from collections import namedtuple
-+
-+GLOBAL_KEYS = (
-+    "transport",
-+    "port",
-+    "name",
-+    "authfile",
-+    "maxtimeskew",
-+    "site",
-+    "arbitrator",
-+    "site-user",
-+    "site-group",
-+    "arbitrator-user",
-+    "arbitrator-group",
-+    "debug",
-+    "ticket",
-+)
-+TICKET_KEYS = (
-+    "acquire-after",
-+    "attr-prereq",
-+    "before-acquire-handler",
-+    "expire",
-+    "renewal-freq",
-+    "retries",
-+    "timeout",
-+    "weights",
-+)
-+
-+class ConfigItem(namedtuple("ConfigItem", "key value details")):
-+    def __new__(cls, key, value, details=None):
-+        details = details if details else []
-+        return super(ConfigItem, cls).__new__(cls, key, value, details)
-+
-+def validate_peers(site_list, arbitrator_list):
-+    report = []
-+
-+    if len(site_list) < 2:
-+        report.append(reports.booth_lack_of_sites(site_list))
-+
-+    peer_list = site_list + arbitrator_list
-+
-+    if len(peer_list) % 2 == 0:
-+        report.append(reports.booth_even_peers_num(len(peer_list)))
-+
-+    address_set = set()
-+    duplicate_addresses = set()
-+    for address in peer_list:
-+        if address in address_set:
-+            duplicate_addresses.add(address)
-+        else:
-+            address_set.add(address)
-+    if duplicate_addresses:
-+        report.append(reports.booth_address_duplication(duplicate_addresses))
-+
-+    if report:
-+        raise LibraryError(*report)
-+
-+def remove_ticket(booth_configuration, ticket_name):
-+    validate_ticket_exists(booth_configuration, ticket_name)
-+    return [
-+        config_item for config_item in booth_configuration
-+        if config_item.key != "ticket" or config_item.value != ticket_name
-+    ]
-+
-+def add_ticket(booth_configuration, ticket_name):
-+    validate_ticket_name(ticket_name)
-+    validate_ticket_unique(booth_configuration, ticket_name)
-+    return booth_configuration + [
-+        ConfigItem("ticket", ticket_name)
-+    ]
-+
-+def validate_ticket_exists(booth_configuration, ticket_name):
-+    if not ticket_exists(booth_configuration, ticket_name):
-+        raise LibraryError(reports.booth_ticket_does_not_exist(ticket_name))
-+
-+def validate_ticket_unique(booth_configuration, ticket_name):
-+    if ticket_exists(booth_configuration, ticket_name):
-+        raise LibraryError(reports.booth_ticket_duplicate(ticket_name))
-+
-+def ticket_exists(booth_configuration, ticket_name):
-+    return any(
-+        value for key, value, _ in booth_configuration
-+        if key == "ticket" and value == ticket_name
-+    )
-+
-+def validate_ticket_name(ticket_name):
-+    if not re.compile(r"^[\w-]+$").search(ticket_name):
-+        raise LibraryError(reports.booth_ticket_name_invalid(ticket_name))
-+
-+def set_authfile(booth_configuration, auth_file):
-+    return [ConfigItem("authfile", auth_file)] + [
-+        config_item for config_item in booth_configuration
-+        if config_item.key != "authfile"
-+    ]
-+
-+def get_authfile(booth_configuration):
-+    for key, value, _ in reversed(booth_configuration):
-+        if key == "authfile":
-+            return value
-+    return None
-diff --git a/pcs/lib/booth/env.py b/pcs/lib/booth/env.py
-new file mode 100644
-index 0000000..57d47aa
---- /dev/null
-+++ b/pcs/lib/booth/env.py
-@@ -0,0 +1,149 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+import os
-+import pwd
-+import grp
-+
-+from pcs import settings
-+from pcs.common import env_file_role_codes
-+from pcs.common.tools import format_environment_error
-+from pcs.lib import reports as common_reports
-+from pcs.lib.booth import reports
-+from pcs.lib.env_file import GhostFile, RealFile
-+from pcs.lib.errors import LibraryError
-+from pcs.settings import booth_config_dir as BOOTH_CONFIG_DIR
-+
-+
-+def get_booth_env_file_name(name, extension):
-+    report_list = []
-+    if "/" in name:
-+        report_list.append(
-+            reports.booth_invalid_name(name, "contains illegal character '/'")
-+        )
-+    if report_list:
-+        raise LibraryError(*report_list)
-+    return "{0}.{1}".format(os.path.join(BOOTH_CONFIG_DIR, name), extension)
-+
-+def get_config_file_name(name):
-+    return get_booth_env_file_name(name, "conf")
-+
-+def get_key_path(name):
-+    return get_booth_env_file_name(name, "key")
-+
-+def report_keyfile_io_error(file_path, operation, e):
-+    return LibraryError(common_reports.file_io_error(
-+        file_role=env_file_role_codes.BOOTH_KEY,
-+        file_path=file_path,
-+        operation=operation,
-+        reason=format_environment_error(e)
-+    ))
-+
-+def set_keyfile_access(file_path):
-+    #shutil.chown is not in python2
-+    try:
-+        uid = pwd.getpwnam(settings.pacemaker_uname).pw_uid
-+    except KeyError:
-+        raise LibraryError(common_reports.unable_to_determine_user_uid(
-+            settings.pacemaker_uname
-+        ))
-+    try:
-+        gid = grp.getgrnam(settings.pacemaker_gname).gr_gid
-+    except KeyError:
-+        raise LibraryError(common_reports.unable_to_determine_group_gid(
-+            settings.pacemaker_gname
-+        ))
-+    try:
-+        os.chown(file_path, uid, gid)
-+    except EnvironmentError as e:
-+        raise report_keyfile_io_error(file_path, "chown", e)
-+    try:
-+        os.chmod(file_path, 0o600)
-+    except EnvironmentError as e:
-+        raise report_keyfile_io_error(file_path, "chmod", e)
-+
-+class BoothEnv(object):
-+    def __init__(self, report_processor, env_data):
-+        self.__report_processor = report_processor
-+        self.__name = env_data["name"]
-+        if "config_file" in env_data:
-+            self.__config = GhostFile(
-+                file_role=env_file_role_codes.BOOTH_CONFIG,
-+                content=env_data["config_file"]["content"]
-+            )
-+            self.__key_path = env_data["key_path"]
-+            self.__key = GhostFile(
-+                file_role=env_file_role_codes.BOOTH_KEY,
-+                content=env_data["key_file"]["content"]
-+            )
-+        else:
-+            self.__config = RealFile(
-+                file_role=env_file_role_codes.BOOTH_CONFIG,
-+                file_path=get_config_file_name(env_data["name"]),
-+            )
-+            self.__set_key_path(get_key_path(env_data["name"]))
-+
-+    def __set_key_path(self, path):
-+        self.__key_path = path
-+        self.__key = RealFile(
-+            file_role=env_file_role_codes.BOOTH_KEY,
-+            file_path=path,
-+        )
-+
-+    def command_expect_live_env(self):
-+        if not self.__config.is_live:
-+            raise LibraryError(common_reports.live_environment_required([
-+                "--booth-conf",
-+                "--booth-key",
-+            ]))
-+
-+    def set_key_path(self, path):
-+        if not self.__config.is_live:
-+            raise AssertionError(
-+                "Set path of keyfile is supported only in live environment"
-+            )
-+        self.__set_key_path(path)
-+
-+    @property
-+    def name(self):
-+        return self.__name
-+
-+    @property
-+    def key_path(self):
-+        return self.__key_path
-+
-+    def get_config_content(self):
-+        return self.__config.read()
-+
-+    def create_config(self, content, can_overwrite_existing=False):
-+        self.__config.assert_no_conflict_with_existing(
-+            self.__report_processor,
-+            can_overwrite_existing
-+        )
-+        self.__config.write(content)
-+
-+    def create_key(self, key_content, can_overwrite_existing=False):
-+        self.__key.assert_no_conflict_with_existing(
-+            self.__report_processor,
-+            can_overwrite_existing
-+        )
-+        self.__key.write(key_content, set_keyfile_access, is_binary=True)
-+
-+    def push_config(self, content):
-+        self.__config.write(content)
-+
-+    def remove_key(self):
-+        self.__key.remove(silence_no_existence=True)
-+
-+    def remove_config(self):
-+        self.__config.remove()
-+
-+    def export(self):
-+        return {} if self.__config.is_live else {
-+            "config_file": self.__config.export(),
-+            "key_file": self.__key.export(),
-+        }
-diff --git a/pcs/lib/booth/reports.py b/pcs/lib/booth/reports.py
-new file mode 100644
-index 0000000..8a804e0
---- /dev/null
-+++ b/pcs/lib/booth/reports.py
-@@ -0,0 +1,409 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from pcs.common import report_codes
-+from pcs.lib.errors import ReportItem, ReportItemSeverity
-+
-+
-+def booth_lack_of_sites(site_list):
-+    """
-+    Less than 2 booth sites entered. But it does not make sense.
-+    list site_list contains currently entered sites
-+    """
-+    return ReportItem.error(
-+        report_codes.BOOTH_LACK_OF_SITES,
-+        "lack of sites for booth configuration (need 2 at least):"
-+            " sites {sites_string}"
-+        ,
-+        info={
-+            "sites": site_list,
-+            "sites_string": ", ".join(site_list) if site_list else "missing",
-+        }
-+    )
-+
-+def booth_even_peers_num(number):
-+    """
-+    Booth requires odd number of peers. But even number of peers was entered.
-+    integer number determines how many peers was entered
-+    """
-+    return ReportItem.error(
-+        report_codes.BOOTH_EVEN_PEERS_NUM,
-+        "odd number of peers is required (entered {number} peers)",
-+        info={
-+            "number": number,
-+        }
-+    )
-+
-+def booth_address_duplication(duplicate_addresses):
-+    """
-+    Address of each peer must unique. But address duplication appeared.
-+    set duplicate_addresses contains addreses entered multiple times
-+    """
-+    return ReportItem.error(
-+        report_codes.BOOTH_ADDRESS_DUPLICATION,
-+        "duplicate address for booth configuration: {addresses_string}"
-+        ,
-+        info={
-+            "addresses": duplicate_addresses,
-+            "addresses_string": ", ".join(duplicate_addresses),
-+        }
-+    )
-+
-+def booth_config_unexpected_lines(line_list):
-+    """
-+    Booth config have defined structure. But line out of structure definition
-+        appeared.
-+    list line_list contains lines out of defined structure
-+    """
-+    return ReportItem.error(
-+        report_codes.BOOTH_CONFIG_UNEXPECTED_LINES,
-+        "unexpected line appeard in config: \n{lines_string}",
-+        info={
-+            "line_list": line_list,
-+            "lines_string": "\n".join(line_list)
-+        }
-+    )
-+
-+def booth_invalid_name(name, reason):
-+    """
-+    Booth instance name have rules. For example it cannot contain illegal
-+        characters like '/'. But some of rules was violated.
-+    string name is entered booth instance name
-+    """
-+    return ReportItem.error(
-+        report_codes.BOOTH_INVALID_NAME,
-+            "booth name '{name}' is not valid ({reason})"
-+        ,
-+        info={
-+            "name": name,
-+            "reason": reason,
-+        }
-+    )
-+
-+def booth_ticket_name_invalid(ticket_name):
-+    """
-+    Name of booth ticket may consists of alphanumeric characters or dash.
-+        Entered ticket name violating this rule.
-+    string ticket_name is entered booth ticket name
-+    """
-+    return ReportItem.error(
-+        report_codes.BOOTH_TICKET_NAME_INVALID,
-+        "booth ticket name '{ticket_name}' is not valid,"
-+            " use alphanumeric chars or dash"
-+        ,
-+        info={
-+            "ticket_name": ticket_name,
-+        }
-+    )
-+
-+def booth_ticket_duplicate(ticket_name):
-+    """
-+    Each booth ticket name must be uniqe. But duplicate booth ticket name
-+        was entered.
-+    string ticket_name is entered booth ticket name
-+    """
-+    return ReportItem.error(
-+        report_codes.BOOTH_TICKET_DUPLICATE,
-+        "booth ticket name '{ticket_name}' already exists in configuration",
-+        info={
-+            "ticket_name": ticket_name,
-+        }
-+    )
-+
-+def booth_ticket_does_not_exist(ticket_name):
-+    """
-+    Some operations (like ticket remove) expect the ticket name in booth
-+        configuration. But the ticket name not found in booth configuration.
-+    string ticket_name is entered booth ticket name
-+    """
-+    return ReportItem.error(
-+        report_codes.BOOTH_TICKET_DOES_NOT_EXIST,
-+        "booth ticket name '{ticket_name}' does not exist",
-+        info={
-+            "ticket_name": ticket_name,
-+        }
-+    )
-+
-+def booth_already_in_cib(name):
-+    """
-+    Each booth instance should be in a cib once maximally. Existence of booth
-+        instance in cib detected during creating new one.
-+    string name is booth instance name
-+    """
-+    return ReportItem.error(
-+        report_codes.BOOTH_ALREADY_IN_CIB,
-+        "booth instance '{name}' is already created as cluster resource",
-+        info={
-+            "name": name,
-+        }
-+    )
-+
-+def booth_not_exists_in_cib(name):
-+    """
-+    Remove booth instance from cib required. But no such instance found in cib.
-+    string name is booth instance name
-+    """
-+    return ReportItem.error(
-+        report_codes.BOOTH_NOT_EXISTS_IN_CIB,
-+        "booth instance '{name}' not found in cib",
-+        info={
-+            "name": name,
-+        }
-+    )
-+
-+def booth_config_is_used(name, detail=""):
-+    """
-+    Booth config use detected during destroy request.
-+    string name is booth instance name
-+    string detail provide more details (for example booth instance is used as
-+        cluster resource or is started/enabled under systemd)
-+    """
-+    return ReportItem.error(
-+        report_codes.BOOTH_CONFIG_IS_USED,
-+        "booth instance '{name}' is used{detail_string}",
-+        info={
-+            "name": name,
-+            "detail": detail,
-+            "detail_string": " {0}".format(detail) if detail else "",
-+        }
-+    )
-+
-+def booth_multiple_times_in_cib(
-+    name, severity=ReportItemSeverity.ERROR
-+):
-+    """
-+    Each booth instance should be in a cib once maximally. But multiple
-+        occurences detected. For example during remove booth instance from cib.
-+        Notify user about this fact is required. When operation is forced
-+        user should be notified about multiple occurences.
-+    string name is booth instance name
-+    ReportItemSeverity severit should be ERROR or WARNING (depends on context)
-+        is flag for next report processing
-+        Because of severity coupling with ReportItem is it specified here.
-+    """
-+    return ReportItem(
-+        report_codes.BOOTH_MULTIPLE_TIMES_IN_CIB,
-+        severity,
-+        "found more than one booth instance '{name}' in cib",
-+        info={
-+            "name": name,
-+        },
-+        forceable=report_codes.FORCE_BOOTH_REMOVE_FROM_CIB
-+            if severity == ReportItemSeverity.ERROR else None
-+    )
-+
-+
-+def booth_distributing_config(name=None):
-+    """
-+    Sending booth config to all nodes in cluster.
-+
-+    name -- name of booth instance
-+    """
-+    return ReportItem.info(
-+        report_codes.BOOTH_DISTRIBUTING_CONFIG,
-+        "Sending booth config{0} to all cluster nodes.".format(
-+            " ({name})" if name and name != "booth" else ""
-+        ),
-+        info={"name": name}
-+    )
-+
-+
-+def booth_config_saved(node=None, name_list=None):
-+    """
-+    Booth config has been saved on specified node.
-+
-+    node -- name of node
-+    name_list -- list of names of booth instance
-+    """
-+    if name_list:
-+        name = ", ".join(name_list)
-+        if name == "booth":
-+            msg = "Booth config saved."
-+        else:
-+            msg = "Booth config(s) ({name}) saved."
-+    else:
-+        msg = "Booth config saved."
-+        name = None
-+    return ReportItem.info(
-+        report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+        msg if node is None else "{node}: " + msg,
-+        info={
-+            "node": node,
-+            "name": name,
-+            "name_list": name_list
-+        }
-+    )
-+
-+
-+def booth_config_unable_to_read(
-+    name, severity=ReportItemSeverity.ERROR, forceable=None
-+):
-+    """
-+    Unable to read from specified booth instance config.
-+
-+    name -- name of booth instance
-+    severity -- severity of report item
-+    forceable -- is this report item forceable? by what category?
-+    """
-+    if name and name != "booth":
-+        msg = "Unable to read booth config ({name})."
-+    else:
-+        msg = "Unable to read booth config."
-+    return ReportItem(
-+        report_codes.BOOTH_CONFIG_READ_ERROR,
-+        severity,
-+        msg,
-+        info={"name": name},
-+        forceable=forceable
-+    )
-+
-+
-+def booth_config_not_saved(node, reason, name=None):
-+    """
-+    Saving booth config failed on specified node.
-+
-+    node -- node name
-+    reason -- reason of failure
-+    name -- name of booth instance
-+    """
-+    if name and name != "booth":
-+        msg = "Unable to save booth config ({name}) on node '{node}': {reason}"
-+    else:
-+        msg = "Unable to save booth config on node '{node}': {reason}"
-+    return ReportItem.error(
-+        report_codes.BOOTH_CONFIG_WRITE_ERROR,
-+        msg,
-+        info={
-+            "node": node,
-+            "name": name,
-+            "reason": reason
-+        }
-+    )
-+
-+
-+def booth_sending_local_configs_to_node(node):
-+    """
-+    Sending all local booth configs to node
-+
-+    node -- node name
-+    """
-+    return ReportItem.info(
-+        report_codes.BOOTH_CONFIGS_SAVING_ON_NODE,
-+        "{node}: Saving booth config(s)...",
-+        info={"node": node}
-+    )
-+
-+
-+def booth_fetching_config_from_node(node, config=None):
-+    if config or config == 'booth':
-+        msg = "Fetching booth config from node '{node}'..."
-+    else:
-+        msg = "Fetching booth config '{config}' from node '{node}'..."
-+    return ReportItem.info(
-+        report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE,
-+        msg,
-+        info={
-+            "node": node,
-+            "config": config,
-+        }
-+    )
-+
-+
-+def booth_unsupported_file_location(file):
-+    return ReportItem.warning(
-+        report_codes.BOOTH_UNSUPORTED_FILE_LOCATION,
-+        "skipping file {file}: unsupported file location",
-+        info={"file": file}
-+    )
-+
-+
-+def booth_daemon_status_error(reason):
-+    return ReportItem.error(
-+        report_codes.BOOTH_DAEMON_STATUS_ERROR,
-+        "unable to get status of booth daemon: {reason}",
-+        info={"reason": reason}
-+    )
-+
-+
-+def booth_tickets_status_error(reason=None):
-+    return ReportItem.error(
-+        report_codes.BOOTH_TICKET_STATUS_ERROR,
-+        "unable to get status of booth tickets",
-+        info={
-+            "reason": reason,
-+        }
-+    )
-+
-+
-+def booth_peers_status_error(reason=None):
-+    return ReportItem.error(
-+        report_codes.BOOTH_PEERS_STATUS_ERROR,
-+        "unable to get status of booth peers",
-+        info={
-+            "reason": reason,
-+        }
-+    )
-+
-+def booth_cannot_determine_local_site_ip():
-+    """
-+    Some booth operations are performed on specific site and requires to specify
-+        site ip. When site specification omitted pcs can try determine local ip.
-+        But determine local site ip failed.
-+    """
-+    return ReportItem.error(
-+        report_codes.BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP,
-+        "cannot determine local site ip, please specify site parameter",
-+        info={}
-+    )
-+
-+def booth_ticket_operation_failed(operation, reason, site_ip, ticket_name):
-+    """
-+    Pcs uses external booth tools for some ticket_name operations. For example
-+        grand and revoke. But the external command failed.
-+    string operatin determine what was intended perform with ticket_name
-+    string reason is taken from external booth command
-+    string site_ip specifiy what site had to run the command
-+    string ticket_name specify with which ticket had to run the command
-+    """
-+    return ReportItem.error(
-+        report_codes.BOOTH_TICKET_OPERATION_FAILED,
-+        "unable to {operation} booth ticket '{ticket_name}' for site '{site_ip}', "
-+            "reason: {reason}"
-+        ,
-+        info={
-+            "operation": operation,
-+            "reason": reason,
-+            "site_ip": site_ip,
-+            "ticket_name": ticket_name,
-+        }
-+    )
-+
-+def booth_skipping_config(config_file, reason):
-+    """
-+    Warning about skipping booth config file.
-+
-+    config_file -- file name of config which is skipped
-+    reason -- reason
-+    """
-+    return ReportItem.warning(
-+        report_codes.BOOTH_SKIPPING_CONFIG,
-+        "Skipping config file '{config_file}': {reason}",
-+        info={
-+            "config_file": config_file,
-+            "reason": reason,
-+        }
-+    )
-+
-+def booth_cannot_identify_keyfile(severity=ReportItemSeverity.ERROR):
-+    return ReportItem(
-+        report_codes.BOOTH_CANNOT_IDENTIFY_KEYFILE,
-+        severity,
-+        "cannot identify authfile in booth configuration",
-+        info={},
-+        forceable=report_codes.FORCE_BOOTH_DESTROY
-+            if severity == ReportItemSeverity.ERROR else None
-+    )
-diff --git a/pcs/lib/booth/resource.py b/pcs/lib/booth/resource.py
-new file mode 100644
-index 0000000..e793713
---- /dev/null
-+++ b/pcs/lib/booth/resource.py
-@@ -0,0 +1,116 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from pcs.lib.cib.tools import find_unique_id
-+
-+
-+class BoothNotFoundInCib(Exception):
-+    pass
-+
-+class BoothMultipleOccurenceFoundInCib(Exception):
-+    pass
-+
-+def create_resource_id(resources_section, name, suffix):
-+    return find_unique_id(
-+        resources_section.getroottree(), "booth-{0}-{1}".format(name, suffix)
-+    )
-+
-+def get_creator(resource_create):
-+    #TODO resource_create  is provisional hack until resources are not moved to
-+    #lib
-+    def create_booth_in_cluster(ip, booth_config_file_path, create_id):
-+        ip_id = create_id("ip")
-+        booth_id = create_id("service")
-+        group_id = create_id("group")
-+
-+        resource_create(
-+            ra_id=ip_id,
-+            ra_type="ocf:heartbeat:IPaddr2",
-+            ra_values=["ip={0}".format(ip)],
-+            op_values=[],
-+            meta_values=[],
-+            clone_opts=[],
-+            group=group_id,
-+        )
-+        resource_create(
-+            ra_id=booth_id,
-+            ra_type="ocf:pacemaker:booth-site",
-+            ra_values=["config={0}".format(booth_config_file_path)],
-+            op_values=[],
-+            meta_values=[],
-+            clone_opts=[],
-+            group=group_id,
-+        )
-+    return create_booth_in_cluster
-+
-+def is_ip_resource(resource_element):
-+    return resource_element.attrib["type"] == "IPaddr2"
-+
-+def find_grouped_ip_element_to_remove(booth_element):
-+    if booth_element.getparent().tag != "group":
-+        return None
-+
-+    group = booth_element.getparent()
-+    if len(group) != 2:
-+        #when something else in group, ip is not for remove
-+        return None
-+    for element in group:
-+        if is_ip_resource(element):
-+            return element
-+    return None
-+
-+def get_remover(resource_remove):
-+    def remove_from_cluster(
-+        resources_section, booth_config_file_path, remove_multiple=False
-+    ):
-+        element_list = find_for_config(
-+            resources_section,
-+            booth_config_file_path
-+        )
-+        if not element_list:
-+            raise BoothNotFoundInCib()
-+
-+        if len(element_list) > 1 and not remove_multiple:
-+            raise BoothMultipleOccurenceFoundInCib()
-+
-+        number_of_removed_booth_elements = 0
-+        for element in element_list:
-+            ip_resource_to_remove = find_grouped_ip_element_to_remove(element)
-+            if ip_resource_to_remove is not None:
-+                resource_remove(ip_resource_to_remove.attrib["id"])
-+            resource_remove(element.attrib["id"])
-+            number_of_removed_booth_elements += 1
-+
-+        return number_of_removed_booth_elements
-+
-+    return remove_from_cluster
-+
-+def find_for_config(resources_section, booth_config_file_path):
-+    return resources_section.xpath(("""
-+        .//primitive[
-+            @type="booth-site"
-+            and
-+            instance_attributes[nvpair[@name="config" and @value="{0}"]]
-+        ]
-+    """).format(booth_config_file_path))
-+
-+def find_bound_ip(resources_section, booth_config_file_path):
-+    return resources_section.xpath(("""
-+        .//group[
-+            primitive[
-+                @type="booth-site"
-+                and
-+                instance_attributes[
-+                    nvpair[@name="config" and @value="{0}"]
-+                ]
-+            ]
-+        ]
-+        /primitive[@type="IPaddr2"]
-+        /instance_attributes
-+        /nvpair[@name="ip"]
-+        /@value
-+    """).format(booth_config_file_path))
-diff --git a/pcs/lib/booth/status.py b/pcs/lib/booth/status.py
-new file mode 100644
-index 0000000..4b93161
---- /dev/null
-+++ b/pcs/lib/booth/status.py
-@@ -0,0 +1,41 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from pcs import settings
-+from pcs.lib.booth import reports
-+from pcs.lib.errors import LibraryError
-+
-+
-+def get_daemon_status(runner, name=None):
-+    cmd = [settings.booth_binary, "status"]
-+    if name:
-+        cmd += ["-c", name]
-+    output, return_value = runner.run(cmd)
-+    # 7 means that there is no booth instance running
-+    if return_value not in [0, 7]:
-+        raise LibraryError(reports.booth_daemon_status_error(output))
-+    return output
-+
-+
-+def get_tickets_status(runner, name=None):
-+    cmd = [settings.booth_binary, "list"]
-+    if name:
-+        cmd += ["-c", name]
-+    output, return_value = runner.run(cmd)
-+    if return_value != 0:
-+        raise LibraryError(reports.booth_tickets_status_error(output))
-+    return output
-+
-+
-+def get_peers_status(runner, name=None):
-+    cmd = [settings.booth_binary, "peers"]
-+    if name:
-+        cmd += ["-c", name]
-+    output, return_value = runner.run(cmd)
-+    if return_value != 0:
-+        raise LibraryError(reports.booth_peers_status_error(output))
-+    return output
-diff --git a/pcs/lib/booth/sync.py b/pcs/lib/booth/sync.py
-new file mode 100644
-index 0000000..c9bc30b
---- /dev/null
-+++ b/pcs/lib/booth/sync.py
-@@ -0,0 +1,208 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+import os
-+import json
-+import base64
-+
-+from pcs.common import report_codes
-+from pcs.lib import reports as lib_reports
-+from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities
-+from pcs.lib.external import (
-+    NodeCommunicator,
-+    NodeCommunicationException,
-+    node_communicator_exception_to_report_item,
-+    parallel_nodes_communication_helper,
-+)
-+from pcs.lib.booth import (
-+    config_files as booth_conf,
-+    config_structure,
-+    config_parser,
-+    reports,
-+)
-+
-+
-+def _set_config_on_node(
-+    communicator, reporter, node, name, config_data, authfile=None,
-+    authfile_data=None
-+):
-+    """
-+    Set booth config for instance 'name' on specified node.
-+
-+    communicator -- NodeCommunicator
-+    reporter -- report processor
-+    node -- NodeAddresses
-+    name -- name of booth instance
-+    config_data -- booth config as string
-+    authfile -- path to authfile
-+    authfile_data -- authfile content as bytes
-+    """
-+    data = {
-+        "config": {
-+            "name": "{0}.conf".format(name),
-+            "data": config_data
-+        }
-+    }
-+    if authfile is not None and authfile_data is not None:
-+        data["authfile"] = {
-+            "name": os.path.basename(authfile),
-+            "data": base64.b64encode(authfile_data).decode("utf-8")
-+        }
-+    communicator.call_node(
-+        node,
-+        "remote/booth_set_config",
-+        NodeCommunicator.format_data_dict([("data_json", json.dumps(data))])
-+    )
-+    reporter.process(reports.booth_config_saved(node.label, [name]))
-+
-+
-+def send_config_to_all_nodes(
-+    communicator, reporter, node_list, name, config_data, authfile=None,
-+    authfile_data=None, skip_offline=False
-+):
-+    """
-+    Send config_data of specified booth instance from local node to all nodes in
-+    node_list.
-+
-+    communicator -- NodeCommunicator
-+    reporter -- report processor
-+    node_list -- NodeAddressesList
-+    name -- name of booth instance
-+    config_data -- config_data content as string
-+    authfile -- path to authfile
-+    authfile_data -- content of authfile as bytes
-+    skip_offline -- if True offline nodes will be skipped
-+    """
-+    reporter.process(reports.booth_distributing_config(name))
-+    parallel_nodes_communication_helper(
-+        _set_config_on_node,
-+        [
-+            (
-+                [
-+                    communicator, reporter, node, name, config_data,
-+                    authfile, authfile_data
-+                ],
-+                {}
-+            )
-+            for node in node_list
-+        ],
-+        reporter,
-+        skip_offline
-+    )
-+
-+
-+def send_all_config_to_node(
-+    communicator,
-+    reporter,
-+    node,
-+    rewrite_existing=False,
-+    skip_wrong_config=False
-+):
-+    """
-+    Send all booth configs from default booth config directory and theri
-+    authfiles to specified node.
-+
-+    communicator -- NodeCommunicator
-+    reporter -- report processor
-+    node -- NodeAddress
-+    rewrite_existing -- if True rewrite existing file
-+    skip_wrong_config -- if True skip local configs that are unreadable
-+    """
-+    config_dict = booth_conf.read_configs(reporter, skip_wrong_config)
-+    if not config_dict:
-+        return
-+    file_list = []
-+    for config, config_data in sorted(config_dict.items()):
-+        try:
-+            authfile_path = config_structure.get_authfile(
-+                config_parser.parse(config_data)
-+            )
-+            file_list.append({
-+                "name": config,
-+                "data": config_data,
-+                "is_authfile": False
-+            })
-+            if authfile_path:
-+                content = booth_conf.read_authfile(reporter, authfile_path)
-+                if not content:
-+                    continue
-+                file_list.append({
-+                    "name": os.path.basename(authfile_path),
-+                    "data": base64.b64encode(content).decode("utf-8"),
-+                    "is_authfile": True
-+                })
-+        except LibraryError:
-+            reporter.process(reports.booth_skipping_config(
-+                config, "unable to parse config"
-+            ))
-+
-+    data = [("data_json", json.dumps(file_list))]
-+
-+    if rewrite_existing:
-+        data.append(("rewrite_existing", "1"))
-+
-+    reporter.process(reports.booth_sending_local_configs_to_node(node.label))
-+    try:
-+        response = json.loads(communicator.call_node(
-+            node,
-+            "remote/booth_save_files",
-+            NodeCommunicator.format_data_dict(data)
-+        ))
-+        report_list = []
-+        for file in response["existing"]:
-+            report_list.append(lib_reports.file_already_exists(
-+                None,
-+                file,
-+                Severities.WARNING if rewrite_existing else Severities.ERROR,
-+                (
-+                    None if rewrite_existing
-+                    else report_codes.FORCE_FILE_OVERWRITE
-+                ),
-+                node.label
-+            ))
-+        for file, reason in response["failed"].items():
-+            report_list.append(reports.booth_config_not_saved(
-+                node.label, reason, file
-+            ))
-+        reporter.process_list(report_list)
-+        reporter.process(
-+            reports.booth_config_saved(node.label, response["saved"])
-+        )
-+    except NodeCommunicationException as e:
-+        raise LibraryError(node_communicator_exception_to_report_item(e))
-+    except (KeyError, ValueError):
-+        raise LibraryError(lib_reports.invalid_response_format(node.label))
-+
-+
-+def pull_config_from_node(communicator, node, name):
-+    """
-+    Get config of specified booth instance and its authfile if there is one
-+    from 'node'. It returns dictionary with format:
-+    {
-+        "config": {
-+            "name": <file name of config>,
-+            "data": <content of file>
-+        },
-+        "authfile": {
-+            "name": <file name of authfile, None if it doesn't exist>,
-+            "data": <base64 coded content of authfile>
-+        }
-+
-+    communicator -- NodeCommunicator
-+    node -- NodeAddresses
-+    name -- name of booth instance
-+    """
-+    try:
-+        return json.loads(communicator.call_node(
-+            node,
-+            "remote/booth_get_config",
-+            NodeCommunicator.format_data_dict([("name", name)])
-+        ))
-+    except NodeCommunicationException as e:
-+        raise LibraryError(node_communicator_exception_to_report_item(e))
-+    except ValueError:
-+        raise LibraryError(lib_reports.invalid_response_format(node.label))
-diff --git a/pcs/lib/booth/test/__init__.py b/pcs/lib/booth/test/__init__.py
-new file mode 100644
-index 0000000..e69de29
-diff --git a/pcs/lib/booth/test/test_config_exchange.py b/pcs/lib/booth/test/test_config_exchange.py
-new file mode 100644
-index 0000000..a9a40ce
---- /dev/null
-+++ b/pcs/lib/booth/test/test_config_exchange.py
-@@ -0,0 +1,70 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+from unittest import TestCase
-+from pcs.lib.booth import config_structure, config_exchange
-+
-+
-+class FromExchangeFormatTest(TestCase):
-+    def test_convert_all_supported_items(self):
-+        self.assertEqual(
-+            [
-+                config_structure.ConfigItem("authfile", "/path/to/auth.file"),
-+                config_structure.ConfigItem("site", "1.1.1.1"),
-+                config_structure.ConfigItem("site", "2.2.2.2"),
-+                config_structure.ConfigItem("arbitrator", "3.3.3.3"),
-+                config_structure.ConfigItem("ticket", "TA"),
-+                config_structure.ConfigItem("ticket", "TB"),
-+            ],
-+            config_exchange.from_exchange_format(
-+                {
-+                    "sites": ["1.1.1.1", "2.2.2.2"],
-+                    "arbitrators": ["3.3.3.3"],
-+                    "tickets": ["TA", "TB"],
-+                    "authfile": "/path/to/auth.file",
-+                },
-+            )
-+        )
-+
-+
-+class GetExchenageFormatTest(TestCase):
-+    def test_convert_parsed_config_to_exchange_format(self):
-+        self.assertEqual(
-+            {
-+                "sites": ["1.1.1.1", "2.2.2.2"],
-+                "arbitrators": ["3.3.3.3"],
-+                "tickets": ["TA", "TB"],
-+                "authfile": "/path/to/auth.file",
-+            },
-+            config_exchange.to_exchange_format([
-+                config_structure.ConfigItem("site", "1.1.1.1"),
-+                config_structure.ConfigItem("site", "2.2.2.2"),
-+                config_structure.ConfigItem("arbitrator", "3.3.3.3"),
-+                config_structure.ConfigItem("authfile", "/path/to/auth.file"),
-+                config_structure.ConfigItem("ticket", "TA"),
-+                config_structure.ConfigItem("ticket", "TB", [
-+                    config_structure.ConfigItem("timeout", "10")
-+                ]),
-+            ])
-+        )
-+
-+    def test_convert_parsed_config_to_exchange_format_without_authfile(self):
-+        self.assertEqual(
-+            {
-+                "sites": ["1.1.1.1", "2.2.2.2"],
-+                "arbitrators": ["3.3.3.3"],
-+                "tickets": ["TA", "TB"],
-+            },
-+            config_exchange.to_exchange_format([
-+                config_structure.ConfigItem("site", "1.1.1.1"),
-+                config_structure.ConfigItem("site", "2.2.2.2"),
-+                config_structure.ConfigItem("arbitrator", "3.3.3.3"),
-+                config_structure.ConfigItem("ticket", "TA"),
-+                config_structure.ConfigItem("ticket", "TB", [
-+                    config_structure.ConfigItem("timeout", "10")
-+                ]),
-+            ])
-+        )
-diff --git a/pcs/lib/booth/test/test_config_files.py b/pcs/lib/booth/test/test_config_files.py
-new file mode 100644
-index 0000000..2d4c3ea
---- /dev/null
-+++ b/pcs/lib/booth/test/test_config_files.py
-@@ -0,0 +1,272 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from os.path import join
-+from unittest import TestCase
-+
-+from pcs.common import report_codes, env_file_role_codes as file_roles
-+from pcs.lib.booth import config_files
-+from pcs.lib.errors import ReportItemSeverity as severities
-+from pcs.settings import booth_config_dir as BOOTH_CONFIG_DIR
-+from pcs.test.tools.assertions import assert_raise_library_error, assert_report_item_list_equal
-+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-+from pcs.test.tools.pcs_mock import mock
-+
-+def patch_config_files(target, *args, **kwargs):
-+    return mock.patch(
-+        "pcs.lib.booth.config_files.{0}".format(target), *args, **kwargs
-+    )
-+
-+@mock.patch("os.listdir")
-+@mock.patch("os.path.isfile")
-+class GetAllConfigsFileNamesTest(TestCase):
-+    def test_success(self, mock_is_file, mock_listdir):
-+        def mock_is_file_fn(file_name):
-+            if file_name in ["dir.cong", "dir"]:
-+                return False
-+            elif file_name in [
-+                "name1", "name2.conf", "name.conf.conf", ".conf", "name3.conf"
-+            ]:
-+                return True
-+            else:
-+                raise AssertionError("unexpected input")
-+
-+        mock_is_file.side_effect = mock_is_file_fn
-+        mock_listdir.return_value = [
-+            "name1", "name2.conf", "name.conf.conf", ".conf", "name3.conf",
-+            "dir.cong", "dir"
-+        ]
-+        self.assertEqual(
-+            ["name2.conf", "name.conf.conf", "name3.conf"],
-+            config_files.get_all_configs_file_names()
-+        )
-+        mock_listdir.assert_called_once_with(BOOTH_CONFIG_DIR)
-+
-+
-+class ReadConfigTest(TestCase):
-+    def test_success(self):
-+        self.maxDiff = None
-+        mock_open = mock.mock_open(read_data="config content")
-+        with patch_config_files("open", mock_open, create=True):
-+            self.assertEqual(
-+                "config content",
-+                config_files._read_config("my-file.conf")
-+            )
-+
-+        self.assertEqual(
-+            [
-+                mock.call(join(BOOTH_CONFIG_DIR, "my-file.conf"), "r"),
-+                mock.call().__enter__(),
-+                mock.call().read(),
-+                mock.call().__exit__(None, None, None)
-+            ],
-+            mock_open.mock_calls
-+        )
-+
-+
-+@patch_config_files("_read_config")
-+@patch_config_files("get_all_configs_file_names")
-+class ReadConfigsTest(TestCase):
-+    def setUp(self):
-+        self.mock_reporter = MockLibraryReportProcessor()
-+
-+    def test_success(self, mock_get_configs, mock_read):
-+        def _mock_read_cfg(file):
-+            if file == "name1.conf":
-+                return "config1"
-+            elif file == "name2.conf":
-+                return "config2"
-+            elif file == "name3.conf":
-+                return "config3"
-+            else:
-+                raise AssertionError("unexpected input: {0}".format(file))
-+        mock_get_configs.return_value = [
-+            "name1.conf", "name2.conf", "name3.conf"
-+        ]
-+        mock_read.side_effect = _mock_read_cfg
-+
-+        self.assertEqual(
-+            {
-+                "name1.conf": "config1",
-+                "name2.conf": "config2",
-+                "name3.conf": "config3"
-+            },
-+            config_files.read_configs(self.mock_reporter)
-+        )
-+
-+        mock_get_configs.assert_called_once_with()
-+        self.assertEqual(3, mock_read.call_count)
-+        mock_read.assert_has_calls([
-+            mock.call("name1.conf"),
-+            mock.call("name2.conf"),
-+            mock.call("name3.conf")
-+        ])
-+        self.assertEqual(0, len(self.mock_reporter.report_item_list))
-+
-+    def test_skip_failed(self, mock_get_configs, mock_read):
-+        def _mock_read_cfg(file):
-+            if file in ["name1.conf", "name3.conf"]:
-+                raise EnvironmentError()
-+            elif file == "name2.conf":
-+                return "config2"
-+            else:
-+                raise AssertionError("unexpected input: {0}".format(file))
-+
-+        mock_get_configs.return_value = [
-+            "name1.conf", "name2.conf", "name3.conf"
-+        ]
-+        mock_read.side_effect = _mock_read_cfg
-+
-+        self.assertEqual(
-+            {"name2.conf": "config2"},
-+            config_files.read_configs(self.mock_reporter, True)
-+        )
-+        mock_get_configs.assert_called_once_with()
-+        self.assertEqual(3, mock_read.call_count)
-+        mock_read.assert_has_calls([
-+            mock.call("name1.conf"),
-+            mock.call("name2.conf"),
-+            mock.call("name3.conf")
-+        ])
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    severities.WARNING,
-+                    report_codes.BOOTH_CONFIG_READ_ERROR,
-+                    {"name": "name1.conf"}
-+                ),
-+                (
-+                    severities.WARNING,
-+                    report_codes.BOOTH_CONFIG_READ_ERROR,
-+                    {"name": "name3.conf"}
-+                )
-+            ]
-+        )
-+
-+    def test_do_not_skip_failed(self, mock_get_configs, mock_read):
-+        def _mock_read_cfg(file):
-+            if file in ["name1.conf", "name3.conf"]:
-+                raise EnvironmentError()
-+            elif file == "name2.conf":
-+                return "config2"
-+            else:
-+                raise AssertionError("unexpected input: {0}".format(file))
-+
-+        mock_get_configs.return_value = [
-+            "name1.conf", "name2.conf", "name3.conf"
-+        ]
-+        mock_read.side_effect = _mock_read_cfg
-+
-+        assert_raise_library_error(
-+            lambda: config_files.read_configs(self.mock_reporter),
-+            (
-+                severities.ERROR,
-+                report_codes.BOOTH_CONFIG_READ_ERROR,
-+                {"name": "name1.conf"},
-+                report_codes.SKIP_UNREADABLE_CONFIG
-+            ),
-+            (
-+                severities.ERROR,
-+                report_codes.BOOTH_CONFIG_READ_ERROR,
-+                {"name": "name3.conf"},
-+                report_codes.SKIP_UNREADABLE_CONFIG
-+            )
-+        )
-+        mock_get_configs.assert_called_once_with()
-+        self.assertEqual(3, mock_read.call_count)
-+        mock_read.assert_has_calls([
-+            mock.call("name1.conf"),
-+            mock.call("name2.conf"),
-+            mock.call("name3.conf")
-+        ])
-+        self.assertEqual(2, len(self.mock_reporter.report_item_list))
-+
-+
-+class ReadAuthfileTest(TestCase):
-+    def setUp(self):
-+        self.mock_reporter = MockLibraryReportProcessor()
-+        self.maxDiff = None
-+
-+    def test_success(self):
-+        path = join(BOOTH_CONFIG_DIR, "file.key")
-+        mock_open = mock.mock_open(read_data="key")
-+
-+        with patch_config_files("open", mock_open, create=True):
-+            self.assertEqual(
-+                "key", config_files.read_authfile(self.mock_reporter, path)
-+            )
-+
-+        self.assertEqual(
-+            [
-+                mock.call(path, "rb"),
-+                mock.call().__enter__(),
-+                mock.call().read(),
-+                mock.call().__exit__(None, None, None)
-+            ],
-+            mock_open.mock_calls
-+        )
-+        self.assertEqual(0, len(self.mock_reporter.report_item_list))
-+
-+    def test_path_none(self):
-+        self.assertTrue(
-+            config_files.read_authfile(self.mock_reporter, None) is None
-+        )
-+        self.assertEqual(0, len(self.mock_reporter.report_item_list))
-+
-+    def test_invalid_path(self):
-+        path = "/not/etc/booth/booth.key"
-+        self.assertTrue(
-+            config_files.read_authfile(self.mock_reporter, path) is None
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [(
-+                severities.WARNING,
-+                report_codes.BOOTH_UNSUPORTED_FILE_LOCATION,
-+                {"file": path}
-+            )]
-+        )
-+
-+    def test_not_abs_path(self):
-+        path = "/etc/booth/../booth.key"
-+        self.assertTrue(
-+            config_files.read_authfile(self.mock_reporter, path) is None
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [(
-+                severities.WARNING,
-+                report_codes.BOOTH_UNSUPORTED_FILE_LOCATION,
-+                {"file": path}
-+            )]
-+        )
-+
-+    @patch_config_files("format_environment_error", return_value="reason")
-+    def test_read_failure(self, _):
-+        path = join(BOOTH_CONFIG_DIR, "file.key")
-+        mock_open = mock.mock_open()
-+        mock_open().read.side_effect = EnvironmentError()
-+
-+        with patch_config_files("open", mock_open, create=True):
-+            return_value = config_files.read_authfile(self.mock_reporter, path)
-+
-+        self.assertTrue(return_value is None)
-+
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [(
-+                severities.WARNING,
-+                report_codes.FILE_IO_ERROR,
-+                {
-+                    "file_role": file_roles.BOOTH_KEY,
-+                    "file_path": path,
-+                    "reason": "reason",
-+                    "operation": "read",
-+                }
-+            )]
-+        )
-diff --git a/pcs/lib/booth/test/test_config_parser.py b/pcs/lib/booth/test/test_config_parser.py
-new file mode 100644
-index 0000000..684fc79
---- /dev/null
-+++ b/pcs/lib/booth/test/test_config_parser.py
-@@ -0,0 +1,169 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from pcs.common import report_codes
-+from pcs.lib.booth import config_parser
-+from pcs.lib.booth.config_structure import ConfigItem
-+from pcs.lib.errors import ReportItemSeverity as severities
-+from pcs.test.tools.assertions import assert_raise_library_error
-+from pcs.test.tools.pcs_unittest import TestCase
-+
-+
-+class BuildTest(TestCase):
-+    def test_build_file_content_from_parsed_structure(self):
-+        self.assertEqual(
-+            "\n".join([
-+                "authfile = /path/to/auth.file",
-+                "site = 1.1.1.1",
-+                "site = 2.2.2.2",
-+                "arbitrator = 3.3.3.3",
-+                'ticket = "TA"',
-+                'ticket = "TB"',
-+                "  timeout = 10",
-+            ]),
-+            config_parser.build([
-+                ConfigItem("authfile", "/path/to/auth.file"),
-+                ConfigItem("site", "1.1.1.1"),
-+                ConfigItem("site", "2.2.2.2"),
-+                ConfigItem("arbitrator", "3.3.3.3"),
-+                ConfigItem("ticket", "TA"),
-+                ConfigItem("ticket", "TB", [
-+                    ConfigItem("timeout", "10")
-+                ]),
-+            ])
-+        )
-+
-+
-+class OrganizeLinesTest(TestCase):
-+    def test_move_non_ticket_config_keys_above_tickets(self):
-+        self.assertEqual(
-+            [
-+                ConfigItem("site", "1.1.1.1"),
-+                ConfigItem('site', '2.2.2.2'),
-+                ConfigItem('arbitrator', '3.3.3.3'),
-+                ConfigItem("ticket", "TA"),
-+            ],
-+            config_parser.organize_lines([
-+                ("site", "1.1.1.1"),
-+                ("ticket", "TA"),
-+                ('site', '2.2.2.2'),
-+                ('arbitrator', '3.3.3.3'),
-+            ])
-+        )
-+
-+    def test_use_ticket_key_as_ticket_detail(self):
-+        self.maxDiff = None
-+        self.assertEqual(
-+            [
-+                ConfigItem("site", "1.1.1.1"),
-+                ConfigItem('expire', '300'),
-+                ConfigItem('site', '2.2.2.2'),
-+                ConfigItem('arbitrator', '3.3.3.3'),
-+                ConfigItem("ticket", "TA", [
-+                    ConfigItem("timeout", "10"),
-+                    ConfigItem('--nonexistent', 'value'),
-+                    ConfigItem("expire", "300"),
-+                ]),
-+                ConfigItem("ticket", "TB", [
-+                    ConfigItem("timeout", "20"),
-+                    ConfigItem("renewal-freq", "40"),
-+                ]),
-+            ],
-+            config_parser.organize_lines([
-+                ("site", "1.1.1.1"),
-+                ("expire", "300"), # out of ticket content is kept global
-+                ("ticket", "TA"),
-+                ("site", "2.2.2.2"), # move to global
-+                ("timeout", "10"),
-+                ("--nonexistent", "value"), # no global is kept under ticket
-+                ("expire", "300"),
-+                ("ticket", "TB"),
-+                ('arbitrator', '3.3.3.3'),
-+                ("timeout", "20"),
-+                ("renewal-freq", "40"),
-+            ])
-+        )
-+
-+
-+class ParseRawLinesTest(TestCase):
-+    def test_parse_simple_correct_lines(self):
-+        self.assertEqual(
-+            [
-+                ("site", "1.1.1.1"),
-+                ('site', '2.2.2.2'),
-+                ('arbitrator', '3.3.3.3'),
-+                ('syntactically_correct', 'nonsense'),
-+                ('line-with', 'hash#literal'),
-+            ],
-+            config_parser.parse_to_raw_lines("\n".join([
-+                "site = 1.1.1.1",
-+                " site  =  2.2.2.2 ",
-+                "arbitrator=3.3.3.3",
-+                "syntactically_correct = nonsense",
-+                "line-with = hash#literal",
-+            ]))
-+        )
-+
-+    def test_parse_lines_with_whole_line_comment(self):
-+        self.assertEqual(
-+            [("site", "1.1.1.1")],
-+            config_parser.parse_to_raw_lines("\n".join([
-+                " # some comment",
-+                "site = 1.1.1.1",
-+            ]))
-+       )
-+
-+    def test_skip_empty_lines(self):
-+        self.assertEqual(
-+            [("site", "1.1.1.1")],
-+            config_parser.parse_to_raw_lines("\n".join([
-+                " ",
-+                "site = 1.1.1.1",
-+            ]))
-+       )
-+
-+    def test_raises_when_unexpected_lines_appear(self):
-+        invalid_line_list = [
-+            "first invalid line",
-+            "second = 'invalid line' something else #comment",
-+            "third = 'invalid line 'something#'#",
-+        ]
-+        line_list = ["site = 1.1.1.1"] + invalid_line_list
-+        with self.assertRaises(config_parser.InvalidLines) as context_manager:
-+            config_parser.parse_to_raw_lines("\n".join(line_list))
-+        self.assertEqual(context_manager.exception.args[0], invalid_line_list)
-+
-+    def test_parse_lines_finishing_with_comment(self):
-+        self.assertEqual(
-+            [("site", "1.1.1.1")],
-+            config_parser.parse_to_raw_lines("\n".join([
-+                "site = '1.1.1.1' #comment",
-+            ]))
-+       )
-+
-+class ParseTest(TestCase):
-+    def test_raises_when_invalid_lines_appear(self):
-+        invalid_line_list = [
-+            "first invalid line",
-+            "second = 'invalid line' something else #comment"
-+        ]
-+        line_list = ["site = 1.1.1.1"] + invalid_line_list
-+        assert_raise_library_error(
-+            lambda:
-+                config_parser.parse("\n".join(line_list))
-+            ,
-+            (
-+                severities.ERROR,
-+                report_codes.BOOTH_CONFIG_UNEXPECTED_LINES,
-+                {
-+                    "line_list": invalid_line_list,
-+                },
-+            ),
-+        )
-+
-+    def test_do_not_raises_when_no_invalid_liens_there(self):
-+        config_parser.parse("site = 1.1.1.1")
-diff --git a/pcs/lib/booth/test/test_config_structure.py b/pcs/lib/booth/test/test_config_structure.py
-new file mode 100644
-index 0000000..27faca5
---- /dev/null
-+++ b/pcs/lib/booth/test/test_config_structure.py
-@@ -0,0 +1,224 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from unittest import TestCase
-+
-+from pcs.common import report_codes
-+from pcs.lib.booth import config_structure
-+from pcs.lib.errors import ReportItemSeverity as severities
-+from pcs.test.tools.assertions import assert_raise_library_error
-+from pcs.test.tools.pcs_mock import mock
-+
-+
-+class ValidateTicketExistsTest(TestCase):
-+    def test_raises_on_duplicate_ticket(self):
-+        assert_raise_library_error(
-+            lambda: config_structure.validate_ticket_exists(
-+                [config_structure.ConfigItem("ticket", "B")], "A"
-+            ),
-+            (
-+                severities.ERROR,
-+                report_codes.BOOTH_TICKET_DOES_NOT_EXIST,
-+                {
-+                    "ticket_name": "A",
-+                },
-+            ),
-+        )
-+
-+class ValidateTicketUniqueTest(TestCase):
-+    def test_raises_on_duplicate_ticket(self):
-+        assert_raise_library_error(
-+            lambda: config_structure.validate_ticket_unique(
-+                [config_structure.ConfigItem("ticket", "A")], "A"
-+            ),
-+            (
-+                severities.ERROR,
-+                report_codes.BOOTH_TICKET_DUPLICATE,
-+                {
-+                    "ticket_name": "A",
-+                },
-+            ),
-+        )
-+
-+    def test_do_not_raises_when_no_duplicated_ticket(self):
-+        config_structure.validate_ticket_unique([], "A")
-+
-+class TicketExistsTest(TestCase):
-+    def test_returns_true_if_ticket_in_structure(self):
-+        self.assertTrue(config_structure.ticket_exists(
-+            [config_structure.ConfigItem("ticket", "A")], "A"
-+        ))
-+
-+    def test_returns_false_if_ticket_in_structure(self):
-+        self.assertFalse(config_structure.ticket_exists(
-+            [config_structure.ConfigItem("ticket", "A")], "B"
-+        ))
-+
-+class ValidateTicketNameTest(TestCase):
-+    def test_accept_valid_ticket_name(self):
-+        config_structure.validate_ticket_name("abc")
-+
-+    def test_refuse_bad_ticket_name(self):
-+        assert_raise_library_error(
-+            lambda: config_structure.validate_ticket_name("@ticket"),
-+            (
-+                severities.ERROR,
-+                report_codes.BOOTH_TICKET_NAME_INVALID,
-+                {
-+                    "ticket_name": "@ticket",
-+                },
-+            ),
-+        )
-+
-+class ValidatePeersTest(TestCase):
-+    def test_do_no_raises_on_correct_args(self):
-+        config_structure.validate_peers(
-+            site_list=["1.1.1.1", "2.2.2.2"],
-+            arbitrator_list=["3.3.3.3"]
-+        )
-+
-+    def test_refuse_less_than_2_sites(self):
-+        assert_raise_library_error(
-+            lambda: config_structure.validate_peers(
-+                site_list=["1.1.1.1"],
-+                arbitrator_list=["3.3.3.3", "4.4.4.4"]
-+            ),
-+            (
-+                severities.ERROR,
-+                report_codes.BOOTH_LACK_OF_SITES,
-+                {
-+                    "sites": ["1.1.1.1"],
-+                }
-+            ),
-+        )
-+
-+    def test_refuse_even_number_peers(self):
-+        assert_raise_library_error(
-+            lambda: config_structure.validate_peers(
-+                site_list=["1.1.1.1", "2.2.2.2"],
-+                arbitrator_list=[]
-+            ),
-+            (
-+                severities.ERROR,
-+                report_codes.BOOTH_EVEN_PEERS_NUM,
-+                {
-+                    "number": 2,
-+                }
-+            ),
-+        )
-+
-+    def test_refuse_address_duplication(self):
-+        assert_raise_library_error(
-+            lambda: config_structure.validate_peers(
-+                site_list=["1.1.1.1", "1.1.1.1", "1.1.1.1"],
-+                arbitrator_list=["3.3.3.3", "4.4.4.4"]
-+            ),
-+            (
-+                severities.ERROR,
-+                report_codes.BOOTH_ADDRESS_DUPLICATION,
-+                {
-+                    "addresses": set(["1.1.1.1"]),
-+                }
-+            ),
-+        )
-+
-+    def test_refuse_problem_combination(self):
-+        assert_raise_library_error(
-+            lambda: config_structure.validate_peers(
-+                site_list=["1.1.1.1"],
-+                arbitrator_list=["1.1.1.1"]
-+            ),
-+            (
-+                severities.ERROR,
-+                report_codes.BOOTH_LACK_OF_SITES,
-+                {
-+                    "sites": ["1.1.1.1"],
-+                }
-+            ),
-+            (
-+                severities.ERROR,
-+                report_codes.BOOTH_EVEN_PEERS_NUM,
-+                {
-+                    "number": 2,
-+                }
-+            ),
-+            (
-+                severities.ERROR,
-+                report_codes.BOOTH_ADDRESS_DUPLICATION,
-+                {
-+                    "addresses": set(["1.1.1.1"]),
-+                }
-+            ),
-+        )
-+
-+class RemoveTicketTest(TestCase):
-+    @mock.patch("pcs.lib.booth.config_structure.validate_ticket_exists")
-+    def test_successfully_remove_ticket(self, mock_validate_ticket_exists):
-+        configuration = [
-+            config_structure.ConfigItem("ticket", "some-ticket"),
-+            config_structure.ConfigItem("ticket", "deprecated-ticket"),
-+        ]
-+        self.assertEqual(
-+            config_structure.remove_ticket(configuration, "deprecated-ticket"),
-+            [
-+                config_structure.ConfigItem("ticket", "some-ticket"),
-+            ]
-+        )
-+        mock_validate_ticket_exists.assert_called_once_with(
-+            configuration,
-+            "deprecated-ticket"
-+        )
-+
-+class AddTicketTest(TestCase):
-+    @mock.patch("pcs.lib.booth.config_structure.validate_ticket_unique")
-+    @mock.patch("pcs.lib.booth.config_structure.validate_ticket_name")
-+    def test_successfully_add_ticket(
-+        self, mock_validate_name, mock_validate_uniq
-+    ):
-+        configuration = [
-+            config_structure.ConfigItem("ticket", "some-ticket"),
-+        ]
-+        self.assertEqual(
-+            config_structure.add_ticket(configuration, "new-ticket"),
-+            [
-+                config_structure.ConfigItem("ticket", "some-ticket"),
-+                config_structure.ConfigItem("ticket", "new-ticket"),
-+            ],
-+        )
-+
-+        mock_validate_name.assert_called_once_with("new-ticket")
-+        mock_validate_uniq.assert_called_once_with(configuration, "new-ticket")
-+
-+class SetAuthfileTest(TestCase):
-+    def test_add_authfile(self):
-+        self.assertEqual(
-+            [
-+                config_structure.ConfigItem("authfile", "/path/to/auth.file"),
-+                config_structure.ConfigItem("site", "1.1.1.1"),
-+            ],
-+            config_structure.set_authfile(
-+                [
-+                    config_structure.ConfigItem("site", "1.1.1.1"),
-+                ],
-+                "/path/to/auth.file"
-+            )
-+        )
-+    def test_reset_authfile(self):
-+        self.assertEqual(
-+            [
-+                config_structure.ConfigItem("authfile", "/path/to/auth.file"),
-+                config_structure.ConfigItem("site", "1.1.1.1"),
-+            ],
-+            config_structure.set_authfile(
-+                [
-+                    config_structure.ConfigItem("site", "1.1.1.1"),
-+                    config_structure.ConfigItem("authfile", "/old/path/to/auth1.file"),
-+                    config_structure.ConfigItem("authfile", "/old/path/to/auth2.file"),
-+                ],
-+                "/path/to/auth.file"
-+            )
-+        )
-diff --git a/pcs/lib/booth/test/test_env.py b/pcs/lib/booth/test/test_env.py
-new file mode 100644
-index 0000000..77e0944
---- /dev/null
-+++ b/pcs/lib/booth/test/test_env.py
-@@ -0,0 +1,228 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+import grp
-+import os
-+import pwd
-+from unittest import TestCase
-+
-+from pcs import settings
-+from pcs.common import report_codes
-+from pcs.lib.booth import env
-+from pcs.lib.errors import ReportItemSeverity as severities
-+from pcs.test.tools.assertions import assert_raise_library_error
-+from pcs.test.tools.misc import get_test_resource as rc
-+from pcs.test.tools.pcs_mock import mock
-+
-+def patch_env(target, *args, **kwargs):
-+    return mock.patch(
-+        "pcs.lib.booth.env.{0}".format(target), *args, **kwargs
-+    )
-+
-+class GetConfigFileNameTest(TestCase):
-+    @patch_env("os.path.exists")
-+    def test_refuse_when_name_starts_with_slash(self, mock_path_exists):
-+        mock_path_exists.return_value = True
-+        assert_raise_library_error(
-+            lambda: env.get_config_file_name("/booth"),
-+            (
-+                severities.ERROR,
-+                report_codes.BOOTH_INVALID_NAME,
-+                {
-+                    "name": "/booth",
-+                    "reason": "contains illegal character '/'",
-+                }
-+            ),
-+        )
-+
-+class BoothEnvTest(TestCase):
-+    @patch_env("RealFile")
-+    def test_get_content_from_file(self, mock_real_file):
-+        mock_real_file.return_value = mock.MagicMock(
-+            read=mock.MagicMock(return_value="content")
-+        )
-+        self.assertEqual(
-+            "content",
-+            env.BoothEnv("report processor", env_data={"name": "booth"})
-+                .get_config_content()
-+        )
-+
-+    @patch_env("set_keyfile_access")
-+    @patch_env("RealFile")
-+    def test_create_config(self, mock_real_file, mock_set_keyfile_access):
-+        mock_file = mock.MagicMock(
-+            assert_no_conflict_with_existing=mock.MagicMock(),
-+            write=mock.MagicMock(),
-+        )
-+        mock_real_file.return_value = mock_file
-+
-+
-+        env.BoothEnv(
-+            "report processor",
-+            env_data={"name": "booth"}
-+        ).create_config("a", can_overwrite_existing=True)
-+
-+        self.assertEqual(mock_file.assert_no_conflict_with_existing.mock_calls,[
-+            mock.call('report processor', True),
-+        ])
-+        self.assertEqual(mock_file.write.mock_calls, [mock.call('a')])
-+
-+    @patch_env("RealFile")
-+    def test_push_config(self, mock_real_file):
-+        mock_file = mock.MagicMock(
-+            assert_no_conflict_with_existing=mock.MagicMock(),
-+            write=mock.MagicMock(),
-+        )
-+        mock_real_file.return_value = mock_file
-+        env.BoothEnv(
-+            "report processor",
-+            env_data={"name": "booth"}
-+        ).push_config("a")
-+        mock_file.write.assert_called_once_with("a")
-+
-+
-+
-+    def test_export_config_file_when_was_present_in_env_data(self):
-+        self.assertEqual(
-+            env.BoothEnv(
-+                "report processor",
-+                {
-+                    "name": "booth-name",
-+                    "config_file": {
-+                        "content": "a\nb",
-+                    },
-+                    "key_file": {
-+                        "content": "secure",
-+                    },
-+                    "key_path": "/path/to/file.key",
-+                }
-+            ).export(),
-+            {
-+                "config_file": {
-+                    "content": "a\nb",
-+                    "can_overwrite_existing_file": False,
-+                    "no_existing_file_expected": False,
-+                    "is_binary": False,
-+                },
-+                "key_file": {
-+                    "content": "secure",
-+                    "can_overwrite_existing_file": False,
-+                    "no_existing_file_expected": False,
-+                    "is_binary": False,
-+                },
-+            }
-+        )
-+
-+    def test_do_not_export_config_file_when_no_provided(self):
-+        self.assertEqual(
-+            env.BoothEnv("report processor", {"name": "booth"}).export(),
-+            {}
-+        )
-+
-+class SetKeyfileAccessTest(TestCase):
-+    def test_set_desired_file_access(self):
-+        #setup
-+        file_path = rc("temp-keyfile")
-+        if os.path.exists(file_path):
-+            os.remove(file_path)
-+        with open(file_path, "w") as file:
-+            file.write("content")
-+
-+        #check assumptions
-+        stat = os.stat(file_path)
-+        self.assertNotEqual('600', oct(stat.st_mode)[-3:])
-+        current_user = pwd.getpwuid(os.getuid())[0]
-+        if current_user != settings.pacemaker_uname:
-+            file_user = pwd.getpwuid(stat.st_uid)[0]
-+            self.assertNotEqual(file_user, settings.pacemaker_uname)
-+        current_group = grp.getgrgid(os.getgid())[0]
-+        if current_group != settings.pacemaker_gname:
-+            file_group = grp.getgrgid(stat.st_gid)[0]
-+            self.assertNotEqual(file_group, settings.pacemaker_gname)
-+
-+        #run tested method
-+        env.set_keyfile_access(file_path)
-+
-+        #check
-+        stat = os.stat(file_path)
-+        self.assertEqual('600', oct(stat.st_mode)[-3:])
-+
-+        file_user = pwd.getpwuid(stat.st_uid)[0]
-+        self.assertEqual(file_user, settings.pacemaker_uname)
-+
-+        file_group = grp.getgrgid(stat.st_gid)[0]
-+        self.assertEqual(file_group, settings.pacemaker_gname)
-+
-+    @patch_env("pwd.getpwnam", mock.MagicMock(side_effect=KeyError))
-+    @patch_env("settings.pacemaker_uname", "some-user")
-+    def test_raises_when_cannot_get_uid(self):
-+        assert_raise_library_error(
-+            lambda: env.set_keyfile_access("/booth"),
-+            (
-+                severities.ERROR,
-+                report_codes.UNABLE_TO_DETERMINE_USER_UID,
-+                {
-+                    "user": "some-user",
-+                }
-+            ),
-+        )
-+
-+    @patch_env("grp.getgrnam", mock.MagicMock(side_effect=KeyError))
-+    @patch_env("pwd.getpwnam", mock.MagicMock())
-+    @patch_env("settings.pacemaker_gname", "some-group")
-+    def test_raises_when_cannot_get_gid(self):
-+        assert_raise_library_error(
-+            lambda: env.set_keyfile_access("/booth"),
-+            (
-+                severities.ERROR,
-+                report_codes.UNABLE_TO_DETERMINE_GROUP_GID,
-+                {
-+                    "group": "some-group",
-+                }
-+            ),
-+        )
-+
-+    @patch_env("format_environment_error", mock.Mock(return_value="err"))
-+    @patch_env("os.chown", mock.MagicMock(side_effect=EnvironmentError()))
-+    @patch_env("grp.getgrnam", mock.MagicMock())
-+    @patch_env("pwd.getpwnam", mock.MagicMock())
-+    @patch_env("settings.pacemaker_gname", "some-group")
-+    def test_raises_when_cannot_chown(self):
-+        assert_raise_library_error(
-+            lambda: env.set_keyfile_access("/booth"),
-+            (
-+                severities.ERROR,
-+                report_codes.FILE_IO_ERROR,
-+                {
-+                    'reason': 'err',
-+                    'file_role': u'BOOTH_KEY',
-+                    'file_path': '/booth',
-+                    'operation': u'chown',
-+                }
-+            ),
-+        )
-+
-+    @patch_env("format_environment_error", mock.Mock(return_value="err"))
-+    @patch_env("os.chmod", mock.MagicMock(side_effect=EnvironmentError()))
-+    @patch_env("os.chown", mock.MagicMock())
-+    @patch_env("grp.getgrnam", mock.MagicMock())
-+    @patch_env("pwd.getpwnam", mock.MagicMock())
-+    @patch_env("settings.pacemaker_gname", "some-group")
-+    def test_raises_when_cannot_chmod(self):
-+        assert_raise_library_error(
-+            lambda: env.set_keyfile_access("/booth"),
-+            (
-+                severities.ERROR,
-+                report_codes.FILE_IO_ERROR,
-+                {
-+                    'reason': 'err',
-+                    'file_role': u'BOOTH_KEY',
-+                    'file_path': '/booth',
-+                    'operation': u'chmod',
-+                }
-+            ),
-+        )
-diff --git a/pcs/lib/booth/test/test_resource.py b/pcs/lib/booth/test/test_resource.py
-new file mode 100644
-index 0000000..440ddde
---- /dev/null
-+++ b/pcs/lib/booth/test/test_resource.py
-@@ -0,0 +1,203 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from unittest import TestCase
-+
-+from lxml import etree
-+
-+import pcs.lib.booth.resource as booth_resource
-+from pcs.test.tools.pcs_mock import mock
-+
-+
-+def fixture_resources_with_booth(booth_config_file_path):
-+    return etree.fromstring('''
-+        <resources>
-+            <primitive type="booth-site">
-+                <instance_attributes>
-+                    <nvpair name="config" value="{0}"/>
-+                </instance_attributes>
-+            </primitive>
-+        </resources>
-+    '''.format(booth_config_file_path))
-+
-+def fixture_booth_element(id, booth_config_file_path):
-+    return etree.fromstring('''
-+        <primitive id="{0}" type="booth-site">
-+            <instance_attributes>
-+                <nvpair name="config" value="{1}"/>
-+            </instance_attributes>
-+        </primitive>
-+    '''.format(id, booth_config_file_path))
-+
-+def fixture_ip_element(id, ip=""):
-+    return etree.fromstring('''
-+        <primitive id="{0}" type="IPaddr2">
-+            <instance_attributes id="{0}-ia">
-+            <nvpair
-+                id="booth-booth-{0}-ia-ip"
-+                name="ip"
-+                value="{1}"
-+            />
-+          </instance_attributes>
-+        </primitive>
-+    '''.format(id, ip))
-+
-+class CreateResourceIdTest(TestCase):
-+    @mock.patch("pcs.lib.booth.resource.find_unique_id")
-+    def test_return_new_uinq_id(self, mock_find_unique_id):
-+        resources_section = etree.fromstring('''<resources/>''')
-+        mock_find_unique_id.side_effect = (
-+            lambda resources_section, id: "{0}-n".format(id)
-+        )
-+        self.assertEqual(
-+            "booth-some-name-ip-n",
-+            booth_resource.create_resource_id(
-+                resources_section, "some-name", "ip"
-+            )
-+        )
-+
-+class FindBoothResourceElementsTest(TestCase):
-+    def test_returns_empty_list_when_no_matching_booth_element(self):
-+        self.assertEqual([], booth_resource.find_for_config(
-+            fixture_resources_with_booth("/ANOTHER/PATH/TO/CONF"),
-+            "/PATH/TO/CONF"
-+        ))
-+
-+
-+    def test_returns_all_found_resource_elements(self):
-+        resources = etree.fromstring('<resources/>')
-+        first = fixture_booth_element("first", "/PATH/TO/CONF")
-+        second = fixture_booth_element("second", "/ANOTHER/PATH/TO/CONF")
-+        third = fixture_booth_element("third", "/PATH/TO/CONF")
-+        for element in [first, second,third]:
-+            resources.append(element)
-+
-+        self.assertEqual(
-+            [first, third],
-+            booth_resource.find_for_config(
-+                resources,
-+                "/PATH/TO/CONF"
-+            )
-+        )
-+
-+class RemoveFromClusterTest(TestCase):
-+    def call(self, resources_section, remove_multiple=False):
-+        mock_resource_remove = mock.Mock()
-+        num_of_removed_booth_resources = booth_resource.get_remover(
-+            mock_resource_remove
-+        )(
-+            resources_section,
-+            "/PATH/TO/CONF",
-+            remove_multiple,
-+        )
-+        return (
-+            mock_resource_remove,
-+            num_of_removed_booth_resources
-+        )
-+
-+    def fixture_resources_including_two_booths(self):
-+        resources_section = etree.fromstring('<resources/>')
-+        first = fixture_booth_element("first", "/PATH/TO/CONF")
-+        second = fixture_booth_element("second", "/PATH/TO/CONF")
-+        resources_section.append(first)
-+        resources_section.append(second)
-+        return resources_section
-+
-+    def test_raises_when_booth_resource_not_found(self):
-+        self.assertRaises(
-+            booth_resource.BoothNotFoundInCib,
-+            lambda: self.call(etree.fromstring('<resources/>')),
-+        )
-+
-+    def test_raises_when_more_booth_resources_found(self):
-+        resources_section = self.fixture_resources_including_two_booths()
-+        self.assertRaises(
-+            booth_resource.BoothMultipleOccurenceFoundInCib,
-+            lambda: self.call(resources_section),
-+        )
-+
-+    def test_returns_number_of_removed_elements(self):
-+        resources_section = self.fixture_resources_including_two_booths()
-+        mock_resource_remove, num_of_removed_booth_resources = self.call(
-+            resources_section,
-+            remove_multiple=True
-+        )
-+        self.assertEqual(num_of_removed_booth_resources, 2)
-+        self.assertEqual(
-+            mock_resource_remove.mock_calls, [
-+                mock.call('first'),
-+                mock.call('second'),
-+            ]
-+        )
-+
-+    def test_remove_ip_when_is_only_booth_sibling_in_group(self):
-+        resources_section = etree.fromstring('''
-+            <resources>
-+                <group>
-+                    <primitive id="ip" type="IPaddr2"/>
-+                    <primitive id="booth" type="booth-site">
-+                        <instance_attributes>
-+                            <nvpair name="config" value="/PATH/TO/CONF"/>
-+                        </instance_attributes>
-+                    </primitive>
-+                </group>
-+            </resources>
-+        ''')
-+
-+        mock_resource_remove, _ = self.call(
-+            resources_section,
-+            remove_multiple=True
-+        )
-+        self.assertEqual(
-+            mock_resource_remove.mock_calls, [
-+                mock.call('ip'),
-+                mock.call('booth'),
-+            ]
-+        )
-+
-+
-+class FindBindedIpTest(TestCase):
-+    def fixture_resource_section(self, ip_element_list):
-+        resources_section = etree.fromstring('<resources/>')
-+        group = etree.SubElement(resources_section, "group")
-+        group.append(fixture_booth_element("booth1", "/PATH/TO/CONF"))
-+        for ip_element in ip_element_list:
-+            group.append(ip_element)
-+        return resources_section
-+
-+
-+    def test_returns_None_when_no_ip(self):
-+        self.assertEqual(
-+            [],
-+            booth_resource.find_bound_ip(
-+                self.fixture_resource_section([]),
-+                "/PATH/TO/CONF",
-+            )
-+        )
-+
-+    def test_returns_ip_when_correctly_found(self):
-+        self.assertEqual(
-+            ["192.168.122.31"],
-+            booth_resource.find_bound_ip(
-+                self.fixture_resource_section([
-+                    fixture_ip_element("ip1", "192.168.122.31"),
-+                ]),
-+                "/PATH/TO/CONF",
-+            )
-+        )
-+
-+    def test_returns_None_when_more_ip(self):
-+        self.assertEqual(
-+            ["192.168.122.31", "192.168.122.32"],
-+            booth_resource.find_bound_ip(
-+                self.fixture_resource_section([
-+                    fixture_ip_element("ip1", "192.168.122.31"),
-+                    fixture_ip_element("ip2", "192.168.122.32"),
-+                ]),
-+                "/PATH/TO/CONF",
-+            )
-+        )
-diff --git a/pcs/lib/booth/test/test_status.py b/pcs/lib/booth/test/test_status.py
-new file mode 100644
-index 0000000..0ea837a
---- /dev/null
-+++ b/pcs/lib/booth/test/test_status.py
-@@ -0,0 +1,137 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from unittest import TestCase
-+
-+try:
-+    # python 2
-+    #pylint: disable=unused-import
-+    from urlparse import parse_qs as url_decode
-+except ImportError:
-+    # python 3
-+    from urllib.parse import parse_qs as url_decode
-+
-+from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.assertions import assert_raise_library_error
-+
-+from pcs import settings
-+from pcs.common import report_codes
-+from pcs.lib.errors import ReportItemSeverity as Severities
-+from pcs.lib.external import CommandRunner
-+import pcs.lib.booth.status as lib
-+
-+
-+class GetDaemonStatusTest(TestCase):
-+    def setUp(self):
-+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
-+
-+    def test_no_name(self):
-+        self.mock_run.run.return_value = ("output", 0)
-+        self.assertEqual("output", lib.get_daemon_status(self.mock_run))
-+        self.mock_run.run.assert_called_once_with(
-+            [settings.booth_binary, "status"]
-+        )
-+
-+    def test_with_name(self):
-+        self.mock_run.run.return_value = ("output", 0)
-+        self.assertEqual("output", lib.get_daemon_status(self.mock_run, "name"))
-+        self.mock_run.run.assert_called_once_with(
-+            [settings.booth_binary, "status", "-c", "name"]
-+        )
-+
-+    def test_daemon_not_running(self):
-+        self.mock_run.run.return_value = ("", 7)
-+        self.assertEqual("", lib.get_daemon_status(self.mock_run))
-+        self.mock_run.run.assert_called_once_with(
-+            [settings.booth_binary, "status"]
-+        )
-+
-+    def test_failure(self):
-+        self.mock_run.run.return_value = ("out", 1)
-+        assert_raise_library_error(
-+            lambda: lib.get_daemon_status(self.mock_run),
-+            (
-+                Severities.ERROR,
-+                report_codes.BOOTH_DAEMON_STATUS_ERROR,
-+                {"reason": "out"}
-+            )
-+        )
-+        self.mock_run.run.assert_called_once_with(
-+            [settings.booth_binary, "status"]
-+        )
-+
-+
-+class GetTicketsStatusTest(TestCase):
-+    def setUp(self):
-+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
-+
-+    def test_no_name(self):
-+        self.mock_run.run.return_value = ("output", 0)
-+        self.assertEqual("output", lib.get_tickets_status(self.mock_run))
-+        self.mock_run.run.assert_called_once_with(
-+            [settings.booth_binary, "list"]
-+        )
-+
-+    def test_with_name(self):
-+        self.mock_run.run.return_value = ("output", 0)
-+        self.assertEqual(
-+            "output", lib.get_tickets_status(self.mock_run, "name")
-+        )
-+        self.mock_run.run.assert_called_once_with(
-+            [settings.booth_binary, "list", "-c", "name"]
-+        )
-+
-+    def test_failure(self):
-+        self.mock_run.run.return_value = ("out", 1)
-+        assert_raise_library_error(
-+            lambda: lib.get_tickets_status(self.mock_run),
-+            (
-+                Severities.ERROR,
-+                report_codes.BOOTH_TICKET_STATUS_ERROR,
-+                {
-+                    "reason": "out"
-+                }
-+            )
-+        )
-+        self.mock_run.run.assert_called_once_with(
-+            [settings.booth_binary, "list"]
-+        )
-+
-+
-+class GetPeersStatusTest(TestCase):
-+    def setUp(self):
-+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
-+
-+    def test_no_name(self):
-+        self.mock_run.run.return_value = ("output", 0)
-+        self.assertEqual("output", lib.get_peers_status(self.mock_run))
-+        self.mock_run.run.assert_called_once_with(
-+            [settings.booth_binary, "peers"]
-+        )
-+
-+    def test_with_name(self):
-+        self.mock_run.run.return_value = ("output", 0)
-+        self.assertEqual("output", lib.get_peers_status(self.mock_run, "name"))
-+        self.mock_run.run.assert_called_once_with(
-+            [settings.booth_binary, "peers", "-c", "name"]
-+        )
-+
-+    def test_failure(self):
-+        self.mock_run.run.return_value = ("out", 1)
-+        assert_raise_library_error(
-+            lambda: lib.get_peers_status(self.mock_run),
-+            (
-+                Severities.ERROR,
-+                report_codes.BOOTH_PEERS_STATUS_ERROR,
-+                {
-+                    "reason": "out"
-+                }
-+            )
-+        )
-+        self.mock_run.run.assert_called_once_with(
-+            [settings.booth_binary, "peers"]
-+        )
-diff --git a/pcs/lib/booth/test/test_sync.py b/pcs/lib/booth/test/test_sync.py
-new file mode 100644
-index 0000000..58500cc
---- /dev/null
-+++ b/pcs/lib/booth/test/test_sync.py
-@@ -0,0 +1,1215 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from unittest import TestCase
-+
-+import json
-+import base64
-+try:
-+    # python 2
-+    from urlparse import parse_qs as url_decode
-+except ImportError:
-+    # python 3
-+    from urllib.parse import parse_qs as url_decode
-+
-+from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.assertions import (
-+    assert_report_item_list_equal,
-+    assert_raise_library_error,
-+)
-+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-+
-+from pcs.common import report_codes
-+from pcs.lib.node import NodeAddresses, NodeAddressesList
-+from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities
-+from pcs.lib.external import NodeCommunicator, NodeConnectionException
-+import pcs.lib.booth.sync as lib
-+
-+
-+def to_b64(string):
-+    return base64.b64encode(string.encode("utf-8")).decode("utf-8")
-+
-+
-+class SetConfigOnNodeTest(TestCase):
-+    def setUp(self):
-+        self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
-+        self.mock_rep = MockLibraryReportProcessor()
-+        self.node = NodeAddresses("node")
-+
-+    def test_with_authfile(self):
-+        lib._set_config_on_node(
-+            self.mock_com,
-+            self.mock_rep,
-+            self.node,
-+            "cfg_name",
-+            "cfg",
-+            authfile="/abs/path/my-key.key",
-+            authfile_data="test key".encode("utf-8")
-+        )
-+        self.assertEqual(1, self.mock_com.call_node.call_count)
-+        self.assertEqual(self.node, self.mock_com.call_node.call_args[0][0])
-+        self.assertEqual(
-+            "remote/booth_set_config", self.mock_com.call_node.call_args[0][1]
-+        )
-+        data = url_decode(self.mock_com.call_node.call_args[0][2])
-+        self.assertTrue("data_json" in data)
-+        self.assertEqual(
-+            {
-+                "config": {
-+                    "name": "cfg_name.conf",
-+                    "data": "cfg"
-+                },
-+                "authfile": {
-+                    "name": "my-key.key",
-+                    "data": to_b64("test key")
-+                }
-+            },
-+            json.loads(data["data_json"][0])
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_rep.report_item_list,
-+            [(
-+                Severities.INFO,
-+                report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+                {
-+                    "node": self.node.label,
-+                    "name": "cfg_name",
-+                    "name_list": ["cfg_name"]
-+                }
-+            )]
-+        )
-+
-+    def _assert(self):
-+        self.assertEqual(1, self.mock_com.call_node.call_count)
-+        self.assertEqual(self.node, self.mock_com.call_node.call_args[0][0])
-+        self.assertEqual(
-+            "remote/booth_set_config", self.mock_com.call_node.call_args[0][1]
-+        )
-+        data = url_decode(self.mock_com.call_node.call_args[0][2])
-+        self.assertTrue("data_json" in data)
-+        self.assertEqual(
-+            {
-+                "config": {
-+                    "name": "cfg_name.conf",
-+                    "data": "cfg"
-+                }
-+            },
-+            json.loads(data["data_json"][0])
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_rep.report_item_list,
-+            [(
-+                Severities.INFO,
-+                report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+                {
-+                    "node": self.node.label,
-+                    "name": "cfg_name",
-+                    "name_list": ["cfg_name"]
-+                }
-+            )]
-+        )
-+
-+    def test_authfile_data_None(self):
-+        lib._set_config_on_node(
-+            self.mock_com, self.mock_rep, self.node, "cfg_name", "cfg",
-+            authfile="key.key"
-+        )
-+        self._assert()
-+
-+    def test_authfile_only_data(self):
-+        lib._set_config_on_node(
-+            self.mock_com, self.mock_rep, self.node, "cfg_name", "cfg",
-+            authfile_data="key".encode("utf-8")
-+        )
-+        self._assert()
-+
-+    def test_without_authfile(self):
-+        lib._set_config_on_node(
-+            self.mock_com, self.mock_rep, self.node, "cfg_name", "cfg"
-+        )
-+        self._assert()
-+
-+
-+@mock.patch("pcs.lib.booth.sync.parallel_nodes_communication_helper")
-+class SyncConfigInCluster(TestCase):
-+    def setUp(self):
-+        self.mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-+        self.mock_reporter = MockLibraryReportProcessor()
-+        self.node_list = NodeAddressesList(
-+            [NodeAddresses("node" + str(i) for i in range(5))]
-+        )
-+
-+    def test_without_authfile(self, mock_parallel):
-+        lib.send_config_to_all_nodes(
-+            self.mock_communicator,
-+            self.mock_reporter,
-+            self.node_list,
-+            "cfg_name",
-+            "config data"
-+        )
-+        mock_parallel.assert_called_once_with(
-+            lib._set_config_on_node,
-+            [
-+                (
-+                    [
-+                        self.mock_communicator,
-+                        self.mock_reporter,
-+                        node,
-+                        "cfg_name",
-+                        "config data",
-+                        None,
-+                        None
-+                    ],
-+                    {}
-+                )
-+                for node in self.node_list
-+            ],
-+            self.mock_reporter,
-+            False
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [(
-+                Severities.INFO,
-+                report_codes.BOOTH_DISTRIBUTING_CONFIG,
-+                {"name": "cfg_name"}
-+            )]
-+        )
-+
-+    def test_skip_offline(self, mock_parallel):
-+        lib.send_config_to_all_nodes(
-+            self.mock_communicator,
-+            self.mock_reporter,
-+            self.node_list,
-+            "cfg_name",
-+            "config data",
-+            skip_offline=True
-+        )
-+        mock_parallel.assert_called_once_with(
-+            lib._set_config_on_node,
-+            [
-+                (
-+                    [
-+                        self.mock_communicator,
-+                        self.mock_reporter,
-+                        node,
-+                        "cfg_name",
-+                        "config data",
-+                        None,
-+                        None
-+                    ],
-+                    {}
-+                )
-+                for node in self.node_list
-+                ],
-+            self.mock_reporter,
-+            True
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [(
-+                Severities.INFO,
-+                report_codes.BOOTH_DISTRIBUTING_CONFIG,
-+                {"name": "cfg_name"}
-+            )]
-+        )
-+
-+    def test_with_authfile(self, mock_parallel):
-+        lib.send_config_to_all_nodes(
-+            self.mock_communicator,
-+            self.mock_reporter,
-+            self.node_list,
-+            "cfg_name",
-+            "config data",
-+            authfile="/my/auth/file.key",
-+            authfile_data="authfile data".encode("utf-8")
-+        )
-+        mock_parallel.assert_called_once_with(
-+            lib._set_config_on_node,
-+            [
-+                (
-+                    [
-+                        self.mock_communicator,
-+                        self.mock_reporter,
-+                        node,
-+                        "cfg_name",
-+                        "config data",
-+                        "/my/auth/file.key",
-+                        "authfile data".encode("utf-8")
-+                    ],
-+                    {}
-+                )
-+                for node in self.node_list
-+                ],
-+            self.mock_reporter,
-+            False
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [(
-+                Severities.INFO,
-+                report_codes.BOOTH_DISTRIBUTING_CONFIG,
-+                {"name": "cfg_name"}
-+            )]
-+        )
-+
-+
-+@mock.patch("pcs.lib.booth.config_structure.get_authfile")
-+@mock.patch("pcs.lib.booth.config_parser.parse")
-+@mock.patch("pcs.lib.booth.config_files.read_configs")
-+@mock.patch("pcs.lib.booth.config_files.read_authfile")
-+class SendAllConfigToNodeTest(TestCase):
-+    def setUp(self):
-+        self.mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-+        self.mock_reporter = MockLibraryReportProcessor()
-+        self.node = NodeAddresses("node")
-+
-+    @staticmethod
-+    def mock_parse_fn(config_content):
-+        if config_content not in ["config1", "config2"]:
-+            raise AssertionError(
-+                "unexpected input {0}".format(config_content)
-+            )
-+        return config_content
-+
-+    @staticmethod
-+    def mock_authfile_fn(parsed_config):
-+        _data = {
-+            "config1": "/path/to/file1.key",
-+            "config2": "/path/to/file2.key"
-+        }
-+        if parsed_config not in _data:
-+            raise AssertionError(
-+                "unexpected input {0}".format(parsed_config)
-+            )
-+        return _data[parsed_config]
-+
-+    @staticmethod
-+    def mock_read_authfile_fn(_, authfile_path):
-+        _data = {
-+            "/path/to/file1.key": "some key".encode("utf-8"),
-+            "/path/to/file2.key": "another key".encode("utf-8"),
-+        }
-+        if authfile_path not in _data:
-+            raise AssertionError(
-+                "unexpected input {0}".format(authfile_path)
-+            )
-+        return _data[authfile_path]
-+
-+    def test_success(
-+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
-+    ):
-+        mock_parse.side_effect = self.mock_parse_fn
-+        mock_authfile.side_effect = self.mock_authfile_fn
-+        mock_read_authfile.side_effect = self.mock_read_authfile_fn
-+        mock_read_configs.return_value = {
-+            "name1.conf": "config1",
-+            "name2.conf": "config2"
-+        }
-+        self.mock_communicator.call_node.return_value = """
-+        {
-+            "existing": [],
-+            "failed": {},
-+            "saved": ["name1.conf", "file1.key", "name2.conf", "file2.key"]
-+        }
-+        """
-+        lib.send_all_config_to_node(
-+            self.mock_communicator, self.mock_reporter, self.node
-+        )
-+        self.assertEqual(2, mock_parse.call_count)
-+        mock_parse.assert_has_calls([
-+            mock.call("config1"), mock.call("config2")
-+        ])
-+        self.assertEqual(2, mock_authfile.call_count)
-+        mock_authfile.assert_has_calls([
-+            mock.call("config1"), mock.call("config2")
-+        ])
-+        self.assertEqual(2, mock_read_authfile.call_count)
-+        mock_read_authfile.assert_has_calls([
-+            mock.call(self.mock_reporter, "/path/to/file1.key"),
-+            mock.call(self.mock_reporter, "/path/to/file2.key")
-+        ])
-+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
-+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
-+        self.assertEqual(
-+            self.node, self.mock_communicator.call_node.call_args[0][0]
-+        )
-+        self.assertEqual(
-+            "remote/booth_save_files",
-+            self.mock_communicator.call_node.call_args[0][1]
-+        )
-+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
-+        self.assertFalse("rewrite_existing" in data)
-+        self.assertTrue("data_json" in data)
-+        self.assertEqual(
-+            [
-+                {
-+                    "name": "name1.conf",
-+                    "data": "config1",
-+                    "is_authfile": False
-+                },
-+                {
-+                    "name": "file1.key",
-+                    "data": to_b64("some key"),
-+                    "is_authfile": True
-+                },
-+                {
-+                    "name": "name2.conf",
-+                    "data": "config2",
-+                    "is_authfile": False
-+                },
-+                {
-+                    "name": "file2.key",
-+                    "data": to_b64("another key"),
-+                    "is_authfile": True
-+                }
-+            ],
-+            json.loads(data["data_json"][0])
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    Severities.INFO,
-+                    report_codes.BOOTH_CONFIGS_SAVING_ON_NODE,
-+                    {"node": self.node.label}
-+                ),
-+                (
-+                    Severities.INFO,
-+                    report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+                    {
-+                        "node": self.node.label,
-+                        "name": "name1.conf, file1.key, name2.conf, file2.key",
-+                        "name_list": [
-+                            "name1.conf", "file1.key", "name2.conf", "file2.key"
-+                        ]
-+                    }
-+                )
-+            ]
-+        )
-+
-+    def test_do_not_rewrite_existing(
-+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
-+    ):
-+        mock_parse.side_effect = self.mock_parse_fn
-+        mock_authfile.side_effect = self.mock_authfile_fn
-+        mock_read_authfile.side_effect = self.mock_read_authfile_fn
-+        mock_read_configs.return_value = {
-+            "name1.conf": "config1",
-+            "name2.conf": "config2"
-+        }
-+        self.mock_communicator.call_node.return_value = """
-+        {
-+            "existing": ["name1.conf", "file1.key"],
-+            "failed": {},
-+            "saved": ["name2.conf", "file2.key"]
-+        }
-+        """
-+        assert_raise_library_error(
-+            lambda: lib.send_all_config_to_node(
-+                self.mock_communicator, self.mock_reporter, self.node
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.FILE_ALREADY_EXISTS,
-+                {
-+                    "file_role": None,
-+                    "file_path": "name1.conf",
-+                    "node": self.node.label
-+                },
-+                report_codes.FORCE_FILE_OVERWRITE
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.FILE_ALREADY_EXISTS,
-+                {
-+                    "file_role": None,
-+                    "file_path": "file1.key",
-+                    "node": self.node.label
-+                },
-+                report_codes.FORCE_FILE_OVERWRITE
-+            )
-+        )
-+        self.assertEqual(2, mock_parse.call_count)
-+        mock_parse.assert_has_calls([
-+            mock.call("config1"), mock.call("config2")
-+        ])
-+        self.assertEqual(2, mock_authfile.call_count)
-+        mock_authfile.assert_has_calls([
-+            mock.call("config1"), mock.call("config2")
-+        ])
-+        self.assertEqual(2, mock_read_authfile.call_count)
-+        mock_read_authfile.assert_has_calls([
-+            mock.call(self.mock_reporter, "/path/to/file1.key"),
-+            mock.call(self.mock_reporter, "/path/to/file2.key")
-+        ])
-+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
-+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
-+        self.assertEqual(
-+            self.node, self.mock_communicator.call_node.call_args[0][0]
-+        )
-+        self.assertEqual(
-+            "remote/booth_save_files",
-+            self.mock_communicator.call_node.call_args[0][1]
-+        )
-+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
-+        self.assertFalse("rewrite_existing" in data)
-+        self.assertTrue("data_json" in data)
-+        self.assertEqual(
-+            [
-+                {
-+                    "name": "name1.conf",
-+                    "data": "config1",
-+                    "is_authfile": False
-+                },
-+                {
-+                    "name": "file1.key",
-+                    "data": to_b64("some key"),
-+                    "is_authfile": True
-+                },
-+                {
-+                    "name": "name2.conf",
-+                    "data": "config2",
-+                    "is_authfile": False
-+                },
-+                {
-+                    "name": "file2.key",
-+                    "data": to_b64("another key"),
-+                    "is_authfile": True
-+                }
-+            ],
-+            json.loads(data["data_json"][0])
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    Severities.INFO,
-+                    report_codes.BOOTH_CONFIGS_SAVING_ON_NODE,
-+                    {"node": self.node.label}
-+                ),
-+                (
-+                    Severities.ERROR,
-+                    report_codes.FILE_ALREADY_EXISTS,
-+                    {
-+                        "file_role": None,
-+                        "file_path": "name1.conf",
-+                        "node": self.node.label
-+                    },
-+                    report_codes.FORCE_FILE_OVERWRITE
-+                ),
-+                (
-+                    Severities.ERROR,
-+                    report_codes.FILE_ALREADY_EXISTS,
-+                    {
-+                        "file_role": None,
-+                        "file_path": "file1.key",
-+                        "node": self.node.label
-+                    },
-+                    report_codes.FORCE_FILE_OVERWRITE
-+                )
-+            ]
-+        )
-+
-+    def test_rewrite_existing(
-+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
-+    ):
-+        mock_parse.side_effect = self.mock_parse_fn
-+        mock_authfile.side_effect = self.mock_authfile_fn
-+        mock_read_authfile.side_effect = self.mock_read_authfile_fn
-+        mock_read_configs.return_value = {
-+            "name1.conf": "config1",
-+            "name2.conf": "config2"
-+        }
-+        self.mock_communicator.call_node.return_value = """
-+        {
-+            "existing": ["name1.conf", "file1.key"],
-+            "failed": {},
-+            "saved": ["name2.conf", "file2.key"]
-+        }
-+        """
-+        lib.send_all_config_to_node(
-+            self.mock_communicator,
-+            self.mock_reporter,
-+            self.node,
-+            rewrite_existing=True
-+        )
-+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
-+        self.assertEqual(2, mock_parse.call_count)
-+        mock_parse.assert_has_calls([
-+            mock.call("config1"), mock.call("config2")
-+        ])
-+        self.assertEqual(2, mock_authfile.call_count)
-+        mock_authfile.assert_has_calls([
-+            mock.call("config1"), mock.call("config2")
-+        ])
-+        self.assertEqual(2, mock_read_authfile.call_count)
-+        mock_read_authfile.assert_has_calls([
-+            mock.call(self.mock_reporter, "/path/to/file1.key"),
-+            mock.call(self.mock_reporter, "/path/to/file2.key")
-+        ])
-+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
-+        self.assertEqual(
-+            self.node, self.mock_communicator.call_node.call_args[0][0]
-+        )
-+        self.assertEqual(
-+            "remote/booth_save_files",
-+            self.mock_communicator.call_node.call_args[0][1]
-+        )
-+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
-+        self.assertTrue("rewrite_existing" in data)
-+        self.assertTrue("data_json" in data)
-+        self.assertEqual(
-+            [
-+                {
-+                    "name": "name1.conf",
-+                    "data": "config1",
-+                    "is_authfile": False
-+                },
-+                {
-+                    "name": "file1.key",
-+                    "data": to_b64("some key"),
-+                    "is_authfile": True
-+                },
-+                {
-+                    "name": "name2.conf",
-+                    "data": "config2",
-+                    "is_authfile": False
-+                },
-+                {
-+                    "name": "file2.key",
-+                    "data": to_b64("another key"),
-+                    "is_authfile": True
-+                }
-+            ],
-+            json.loads(data["data_json"][0])
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    Severities.INFO,
-+                    report_codes.BOOTH_CONFIGS_SAVING_ON_NODE,
-+                    {"node": self.node.label}
-+                ),
-+                (
-+                    Severities.WARNING,
-+                    report_codes.FILE_ALREADY_EXISTS,
-+                    {
-+                        "file_role": None,
-+                        "file_path": "name1.conf",
-+                        "node": self.node.label
-+                    }
-+                ),
-+                (
-+                    Severities.WARNING,
-+                    report_codes.FILE_ALREADY_EXISTS,
-+                    {
-+                        "file_role": None,
-+                        "file_path": "file1.key",
-+                        "node": self.node.label
-+                    }
-+                ),
-+                (
-+                    Severities.INFO,
-+                    report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+                    {
-+                        "node": self.node.label,
-+                        "name": "name2.conf, file2.key",
-+                        "name_list": ["name2.conf", "file2.key"]
-+                    }
-+                )
-+            ]
-+        )
-+
-+    def test_write_failure(
-+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
-+    ):
-+        mock_parse.side_effect = self.mock_parse_fn
-+        mock_authfile.side_effect = self.mock_authfile_fn
-+        mock_read_authfile.side_effect = self.mock_read_authfile_fn
-+        mock_read_configs.return_value = {
-+            "name1.conf": "config1",
-+            "name2.conf": "config2"
-+        }
-+        self.mock_communicator.call_node.return_value = """
-+        {
-+            "existing": [],
-+            "failed": {
-+                "name1.conf": "Error message",
-+                "file1.key": "Another error message"
-+            },
-+            "saved": ["name2.conf", "file2.key"]
-+        }
-+        """
-+        assert_raise_library_error(
-+            lambda: lib.send_all_config_to_node(
-+                self.mock_communicator, self.mock_reporter, self.node
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.BOOTH_CONFIG_WRITE_ERROR,
-+                {
-+                    "node": self.node.label,
-+                    "name": "name1.conf",
-+                    "reason": "Error message"
-+                }
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.BOOTH_CONFIG_WRITE_ERROR,
-+                {
-+                    "node": self.node.label,
-+                    "name": "file1.key",
-+                    "reason": "Another error message"
-+                }
-+            )
-+        )
-+        self.assertEqual(2, mock_parse.call_count)
-+        mock_parse.assert_has_calls([
-+            mock.call("config1"), mock.call("config2")
-+        ])
-+        self.assertEqual(2, mock_authfile.call_count)
-+        mock_authfile.assert_has_calls([
-+            mock.call("config1"), mock.call("config2")
-+        ])
-+        self.assertEqual(2, mock_read_authfile.call_count)
-+        mock_read_authfile.assert_has_calls([
-+            mock.call(self.mock_reporter, "/path/to/file1.key"),
-+            mock.call(self.mock_reporter, "/path/to/file2.key")
-+        ])
-+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
-+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
-+        self.assertEqual(
-+            self.node, self.mock_communicator.call_node.call_args[0][0]
-+        )
-+        self.assertEqual(
-+            "remote/booth_save_files",
-+            self.mock_communicator.call_node.call_args[0][1]
-+        )
-+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
-+        self.assertFalse("rewrite_existing" in data)
-+        self.assertTrue("data_json" in data)
-+        self.assertEqual(
-+            [
-+                {
-+                    "name": "name1.conf",
-+                    "data": "config1",
-+                    "is_authfile": False
-+                },
-+                {
-+                    "name": "file1.key",
-+                    "data": to_b64("some key"),
-+                    "is_authfile": True
-+                },
-+                {
-+                    "name": "name2.conf",
-+                    "data": "config2",
-+                    "is_authfile": False
-+                },
-+                {
-+                    "name": "file2.key",
-+                    "data": to_b64("another key"),
-+                    "is_authfile": True
-+                }
-+            ],
-+            json.loads(data["data_json"][0])
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    Severities.INFO,
-+                    report_codes.BOOTH_CONFIGS_SAVING_ON_NODE,
-+                    {"node": self.node.label}
-+                ),
-+                (
-+                    Severities.ERROR,
-+                    report_codes.BOOTH_CONFIG_WRITE_ERROR,
-+                    {
-+                        "node": self.node.label,
-+                        "name": "name1.conf",
-+                        "reason": "Error message"
-+                    }
-+                ),
-+                (
-+                    Severities.ERROR,
-+                    report_codes.BOOTH_CONFIG_WRITE_ERROR,
-+                    {
-+                        "node": self.node.label,
-+                        "name": "file1.key",
-+                        "reason": "Another error message"
-+                    }
-+                )
-+            ]
-+        )
-+
-+    def test_communication_failure(
-+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
-+    ):
-+        mock_parse.side_effect = self.mock_parse_fn
-+        mock_authfile.side_effect = self.mock_authfile_fn
-+        mock_read_authfile.side_effect = self.mock_read_authfile_fn
-+        mock_read_configs.return_value = {
-+            "name1.conf": "config1",
-+            "name2.conf": "config2"
-+        }
-+        self.mock_communicator.call_node.side_effect = NodeConnectionException(
-+            self.node.label, "command", "reason"
-+        )
-+        assert_raise_library_error(
-+            lambda: lib.send_all_config_to_node(
-+                self.mock_communicator, self.mock_reporter, self.node
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                {
-+                    "node": self.node.label,
-+                    "command": "command",
-+                    "reason": "reason"
-+                }
-+            )
-+        )
-+        self.assertEqual(2, mock_parse.call_count)
-+        mock_parse.assert_has_calls([
-+            mock.call("config1"), mock.call("config2")
-+        ])
-+        self.assertEqual(2, mock_authfile.call_count)
-+        mock_authfile.assert_has_calls([
-+            mock.call("config1"), mock.call("config2")
-+        ])
-+        self.assertEqual(2, mock_read_authfile.call_count)
-+        mock_read_authfile.assert_has_calls([
-+            mock.call(self.mock_reporter, "/path/to/file1.key"),
-+            mock.call(self.mock_reporter, "/path/to/file2.key")
-+        ])
-+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
-+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
-+        self.assertEqual(
-+            self.node, self.mock_communicator.call_node.call_args[0][0]
-+        )
-+        self.assertEqual(
-+            "remote/booth_save_files",
-+            self.mock_communicator.call_node.call_args[0][1]
-+        )
-+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
-+        self.assertFalse("rewrite_existing" in data)
-+        self.assertTrue("data_json" in data)
-+        self.assertEqual(
-+            [
-+                {
-+                    "name": "name1.conf",
-+                    "data": "config1",
-+                    "is_authfile": False
-+                },
-+                {
-+                    "name": "file1.key",
-+                    "data": to_b64("some key"),
-+                    "is_authfile": True
-+                },
-+                {
-+                    "name": "name2.conf",
-+                    "data": "config2",
-+                    "is_authfile": False
-+                },
-+                {
-+                    "name": "file2.key",
-+                    "data": to_b64("another key"),
-+                    "is_authfile": True
-+                }
-+            ],
-+            json.loads(data["data_json"][0])
-+        )
-+
-+    def test_wrong_response_format(
-+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
-+    ):
-+        mock_parse.side_effect = self.mock_parse_fn
-+        mock_authfile.side_effect = self.mock_authfile_fn
-+        mock_read_authfile.side_effect = self.mock_read_authfile_fn
-+        mock_read_configs.return_value = {
-+            "name1.conf": "config1",
-+            "name2.conf": "config2"
-+        }
-+        self.mock_communicator.call_node.return_value = """
-+            {
-+                "existing_files": [],
-+                "failed": {
-+                    "name1.conf": "Error message",
-+                    "file1.key": "Another error message"
-+                },
-+                "saved": ["name2.conf", "file2.key"]
-+            }
-+        """
-+        assert_raise_library_error(
-+            lambda: lib.send_all_config_to_node(
-+                self.mock_communicator, self.mock_reporter, self.node
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.INVALID_RESPONSE_FORMAT,
-+                {"node": self.node.label}
-+            )
-+        )
-+        self.assertEqual(2, mock_parse.call_count)
-+        mock_parse.assert_has_calls([
-+            mock.call("config1"), mock.call("config2")
-+        ])
-+        self.assertEqual(2, mock_authfile.call_count)
-+        mock_authfile.assert_has_calls([
-+            mock.call("config1"), mock.call("config2")
-+        ])
-+        self.assertEqual(2, mock_read_authfile.call_count)
-+        mock_read_authfile.assert_has_calls([
-+            mock.call(self.mock_reporter, "/path/to/file1.key"),
-+            mock.call(self.mock_reporter, "/path/to/file2.key")
-+        ])
-+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
-+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
-+        self.assertEqual(
-+            self.node, self.mock_communicator.call_node.call_args[0][0]
-+        )
-+        self.assertEqual(
-+            "remote/booth_save_files",
-+            self.mock_communicator.call_node.call_args[0][1]
-+        )
-+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
-+        self.assertFalse("rewrite_existing" in data)
-+        self.assertTrue("data_json" in data)
-+        self.assertEqual(
-+            [
-+                {
-+                    "name": "name1.conf",
-+                    "data": "config1",
-+                    "is_authfile": False
-+                },
-+                {
-+                    "name": "file1.key",
-+                    "data": to_b64("some key"),
-+                    "is_authfile": True
-+                },
-+                {
-+                    "name": "name2.conf",
-+                    "data": "config2",
-+                    "is_authfile": False
-+                },
-+                {
-+                    "name": "file2.key",
-+                    "data": to_b64("another key"),
-+                    "is_authfile": True
-+                }
-+            ],
-+            json.loads(data["data_json"][0])
-+        )
-+
-+    def test_response_not_json(
-+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
-+    ):
-+        mock_parse.side_effect = self.mock_parse_fn
-+        mock_authfile.side_effect = self.mock_authfile_fn
-+        mock_read_authfile.side_effect = self.mock_read_authfile_fn
-+        mock_read_configs.return_value = {
-+            "name1.conf": "config1",
-+            "name2.conf": "config2"
-+        }
-+        self.mock_communicator.call_node.return_value = "not json"
-+        assert_raise_library_error(
-+            lambda: lib.send_all_config_to_node(
-+                self.mock_communicator, self.mock_reporter, self.node
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.INVALID_RESPONSE_FORMAT,
-+                {"node": self.node.label}
-+            )
-+        )
-+        self.assertEqual(2, mock_parse.call_count)
-+        mock_parse.assert_has_calls([
-+            mock.call("config1"), mock.call("config2")
-+        ])
-+        self.assertEqual(2, mock_authfile.call_count)
-+        mock_authfile.assert_has_calls([
-+            mock.call("config1"), mock.call("config2")
-+        ])
-+        self.assertEqual(2, mock_read_authfile.call_count)
-+        mock_read_authfile.assert_has_calls([
-+            mock.call(self.mock_reporter, "/path/to/file1.key"),
-+            mock.call(self.mock_reporter, "/path/to/file2.key")
-+        ])
-+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
-+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
-+        self.assertEqual(
-+            self.node, self.mock_communicator.call_node.call_args[0][0]
-+        )
-+        self.assertEqual(
-+            "remote/booth_save_files",
-+            self.mock_communicator.call_node.call_args[0][1]
-+        )
-+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
-+        self.assertFalse("rewrite_existing" in data)
-+        self.assertTrue("data_json" in data)
-+        self.assertEqual(
-+            [
-+                {
-+                    "name": "name1.conf",
-+                    "data": "config1",
-+                    "is_authfile": False
-+                },
-+                {
-+                    "name": "file1.key",
-+                    "data": to_b64("some key"),
-+                    "is_authfile": True
-+                },
-+                {
-+                    "name": "name2.conf",
-+                    "data": "config2",
-+                    "is_authfile": False
-+                },
-+                {
-+                    "name": "file2.key",
-+                    "data": to_b64("another key"),
-+                    "is_authfile": True
-+                }
-+            ],
-+            json.loads(data["data_json"][0])
-+        )
-+
-+
-+    def test_configs_without_authfiles(
-+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
-+    ):
-+        def mock_authfile_fn(parsed_config):
-+            if parsed_config == "config1":
-+                return None
-+            elif parsed_config == "config2":
-+                return "/path/to/file2.key"
-+            else:
-+                raise AssertionError(
-+                    "unexpected input: {0}".format(parsed_config)
-+                )
-+
-+        mock_parse.side_effect = self.mock_parse_fn
-+        mock_authfile.side_effect = mock_authfile_fn
-+        mock_read_authfile.return_value = "another key".encode("utf-8")
-+        mock_read_configs.return_value = {
-+            "name1.conf": "config1",
-+            "name2.conf": "config2"
-+        }
-+        self.mock_communicator.call_node.return_value = """
-+        {
-+            "existing": [],
-+            "failed": {},
-+            "saved": ["name1.conf", "name2.conf", "file2.key"]
-+        }
-+        """
-+        lib.send_all_config_to_node(
-+            self.mock_communicator, self.mock_reporter, self.node
-+        )
-+        self.assertEqual(2, mock_parse.call_count)
-+        mock_parse.assert_has_calls([
-+            mock.call("config1"), mock.call("config2")
-+        ])
-+        self.assertEqual(2, mock_authfile.call_count)
-+        mock_authfile.assert_has_calls([
-+            mock.call("config1"), mock.call("config2")
-+        ])
-+        mock_read_authfile.assert_called_once_with(
-+            self.mock_reporter, "/path/to/file2.key"
-+        )
-+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
-+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
-+        self.assertEqual(
-+            self.node, self.mock_communicator.call_node.call_args[0][0]
-+        )
-+        self.assertEqual(
-+            "remote/booth_save_files",
-+            self.mock_communicator.call_node.call_args[0][1]
-+        )
-+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
-+        self.assertFalse("rewrite_existing" in data)
-+        self.assertTrue("data_json" in data)
-+        self.assertEqual(
-+            [
-+                {
-+                    "name": "name1.conf",
-+                    "data": "config1",
-+                    "is_authfile": False
-+                },
-+                {
-+                    "name": "name2.conf",
-+                    "data": "config2",
-+                    "is_authfile": False
-+                },
-+                {
-+                    "name": "file2.key",
-+                    "data": to_b64("another key"),
-+                    "is_authfile": True
-+                }
-+            ],
-+            json.loads(data["data_json"][0])
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    Severities.INFO,
-+                    report_codes.BOOTH_CONFIGS_SAVING_ON_NODE,
-+                    {"node": self.node.label}
-+                ),
-+                (
-+                    Severities.INFO,
-+                    report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+                    {
-+                        "node": self.node.label,
-+                        "name": "name1.conf, name2.conf, file2.key",
-+                        "name_list": ["name1.conf", "name2.conf", "file2.key"]
-+                    }
-+                )
-+            ]
-+        )
-+
-+    def test_unable_to_parse_config(
-+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
-+    ):
-+        def mock_parse_fn(config_data):
-+            if config_data == "config1":
-+                raise LibraryError()
-+            elif config_data == "config2":
-+                return "config2"
-+            else:
-+                raise AssertionError(
-+                    "unexpected input: {0}".format(config_data)
-+                )
-+
-+        mock_parse.side_effect = mock_parse_fn
-+        mock_authfile.return_value = "/path/to/file2.key"
-+        mock_read_authfile.return_value = "another key".encode("utf-8")
-+        mock_read_configs.return_value = {
-+            "name1.conf": "config1",
-+            "name2.conf": "config2"
-+        }
-+        self.mock_communicator.call_node.return_value = """
-+         {
-+             "existing": [],
-+             "failed": {},
-+             "saved": ["name2.conf", "file2.key"]
-+         }
-+         """
-+        lib.send_all_config_to_node(
-+            self.mock_communicator, self.mock_reporter, self.node
-+        )
-+        self.assertEqual(2, mock_parse.call_count)
-+        mock_parse.assert_has_calls([
-+            mock.call("config1"), mock.call("config2")
-+        ])
-+        mock_authfile.assert_called_once_with("config2")
-+        mock_read_authfile.assert_called_once_with(
-+            self.mock_reporter, "/path/to/file2.key"
-+        )
-+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
-+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
-+        self.assertEqual(
-+            self.node, self.mock_communicator.call_node.call_args[0][0]
-+        )
-+        self.assertEqual(
-+            "remote/booth_save_files",
-+            self.mock_communicator.call_node.call_args[0][1]
-+        )
-+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
-+        self.assertFalse("rewrite_existing" in data)
-+        self.assertTrue("data_json" in data)
-+        self.assertEqual(
-+            [
-+                {
-+                    "name": "name2.conf",
-+                    "data": "config2",
-+                    "is_authfile": False
-+                },
-+                {
-+                    "name": "file2.key",
-+                    "data": to_b64("another key"),
-+                    "is_authfile": True
-+                }
-+            ],
-+            json.loads(data["data_json"][0])
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [
-+                (
-+                    Severities.INFO,
-+                    report_codes.BOOTH_CONFIGS_SAVING_ON_NODE,
-+                    {"node": self.node.label}
-+                ),
-+                (
-+                    Severities.WARNING,
-+                    report_codes.BOOTH_SKIPPING_CONFIG,
-+                    {
-+                        "config_file": "name1.conf"
-+                    }
-+                ),
-+                (
-+                    Severities.INFO,
-+                    report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+                    {
-+                        "node": self.node.label,
-+                        "name": "name2.conf, file2.key",
-+                        "name_list": ["name2.conf", "file2.key"]
-+                    }
-+                )
-+            ]
-+        )
-+
-+
-+class PullConfigFromNodeTest(TestCase):
-+    def setUp(self):
-+        self.mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
-+        self.node = NodeAddresses("node")
-+
-+    def test_success(self):
-+        self.mock_communicator.call_node.return_value = "{}"
-+        self.assertEqual(
-+            {}, lib.pull_config_from_node(
-+                self.mock_communicator, self.node, "booth"
-+            )
-+        )
-+        self.mock_communicator.call_node.assert_called_once_with(
-+            self.node, "remote/booth_get_config", "name=booth"
-+        )
-+
-+    def test_not_json(self):
-+        self.mock_communicator.call_node.return_value = "not json"
-+        assert_raise_library_error(
-+            lambda: lib.pull_config_from_node(
-+                self.mock_communicator, self.node, "booth"
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.INVALID_RESPONSE_FORMAT,
-+                {"node": self.node.label}
-+            )
-+        )
-+
-+    def test_communication_failure(self):
-+        self.mock_communicator.call_node.side_effect = NodeConnectionException(
-+            self.node.label, "command", "reason"
-+        )
-+        assert_raise_library_error(
-+            lambda: lib.pull_config_from_node(
-+                self.mock_communicator, self.node, "booth"
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
-+                {
-+                    "node": self.node.label,
-+                    "command": "command",
-+                    "reason": "reason"
-+                }
-+            )
-+        )
-diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
-index f86b63b..d8ce57a 100644
---- a/pcs/lib/cib/tools.py
-+++ b/pcs/lib/cib/tools.py
-@@ -100,6 +100,13 @@ def get_constraints(tree):
-     """
-     return _get_mandatory_section(tree, "configuration/constraints")
- 
-+def get_resources(tree):
-+    """
-+    Return 'resources' element from tree
-+    tree cib etree node
-+    """
-+    return _get_mandatory_section(tree, "configuration/resources")
-+
- def find_parent(element, tag_names):
-     candidate = element
-     while True:
-diff --git a/pcs/lib/commands/booth.py b/pcs/lib/commands/booth.py
-new file mode 100644
-index 0000000..43ea9dd
---- /dev/null
-+++ b/pcs/lib/commands/booth.py
-@@ -0,0 +1,349 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+import base64
-+import os.path
-+from functools import partial
-+
-+from pcs import settings
-+from pcs.lib import external, reports
-+from pcs.lib.booth import (
-+    config_exchange,
-+    config_files,
-+    config_structure,
-+    reports as booth_reports,
-+    resource,
-+    status,
-+    sync,
-+)
-+from pcs.lib.booth.config_parser import parse, build
-+from pcs.lib.booth.env import get_config_file_name
-+from pcs.lib.cib.tools import get_resources
-+from pcs.lib.errors import LibraryError, ReportItemSeverity
-+from pcs.lib.node import NodeAddresses
-+
-+
-+def config_setup(env, booth_configuration, overwrite_existing=False):
-+    """
-+    create boot configuration
-+    list site_list contains site adresses of multisite
-+    list arbitrator_list contains arbitrator adresses of multisite
-+    """
-+
-+    config_structure.validate_peers(
-+        booth_configuration.get("sites", []),
-+        booth_configuration.get("arbitrators", [])
-+    )
-+    config_content = config_exchange.from_exchange_format(booth_configuration)
-+
-+    env.booth.create_key(config_files.generate_key(), overwrite_existing)
-+    config_content = config_structure.set_authfile(
-+        config_content,
-+        env.booth.key_path
-+    )
-+    env.booth.create_config(build(config_content), overwrite_existing)
-+
-+def config_destroy(env, ignore_config_load_problems=False):
-+    env.booth.command_expect_live_env()
-+    env.command_expect_live_corosync_env()
-+
-+    name = env.booth.name
-+    config_is_used = partial(booth_reports.booth_config_is_used, name)
-+
-+    report_list = []
-+
-+    if(env.is_node_in_cluster() and resource.find_for_config(
-+        get_resources(env.get_cib()),
-+        get_config_file_name(name),
-+    )):
-+        report_list.append(config_is_used("in cluster resource"))
-+
-+    #Only systemd is currently supported. Initd does not supports multiple
-+    #instances (here specified by name)
-+    if external.is_systemctl():
-+        if external.is_service_running(env.cmd_runner(), "booth", name):
-+            report_list.append(config_is_used("(running in systemd)"))
-+
-+        if external.is_service_enabled(env.cmd_runner(), "booth", name):
-+            report_list.append(config_is_used("(enabled in systemd)"))
-+
-+    if report_list:
-+        raise LibraryError(*report_list)
-+
-+    authfile_path = None
-+    try:
-+        authfile_path = config_structure.get_authfile(
-+            parse(env.booth.get_config_content())
-+        )
-+    except LibraryError:
-+        if not ignore_config_load_problems:
-+            raise LibraryError(booth_reports.booth_cannot_identify_keyfile())
-+
-+        #if content not received, not valid,... still remove config needed
-+        env.report_processor.process(
-+            booth_reports.booth_cannot_identify_keyfile(
-+                severity=ReportItemSeverity.WARNING
-+            )
-+        )
-+
-+    if(
-+        authfile_path
-+        and
-+        os.path.dirname(authfile_path) == settings.booth_config_dir
-+    ):
-+        env.booth.set_key_path(authfile_path)
-+        env.booth.remove_key()
-+    env.booth.remove_config()
-+
-+def config_show(env):
-+    """
-+    return configuration as tuple of sites list and arbitrators list
-+    """
-+    return config_exchange.to_exchange_format(
-+        parse(env.booth.get_config_content())
-+    )
-+
-+def config_ticket_add(env, ticket_name):
-+    """
-+    add ticket to booth configuration
-+    """
-+    booth_configuration = config_structure.add_ticket(
-+        parse(env.booth.get_config_content()),
-+        ticket_name
-+    )
-+    env.booth.push_config(build(booth_configuration))
-+
-+def config_ticket_remove(env, ticket_name):
-+    """
-+    remove ticket from booth configuration
-+    """
-+    booth_configuration = config_structure.remove_ticket(
-+        parse(env.booth.get_config_content()),
-+        ticket_name
-+    )
-+    env.booth.push_config(build(booth_configuration))
-+
-+def create_in_cluster(env, name, ip, resource_create):
-+    #TODO resource_create is provisional hack until resources are not moved to
-+    #lib
-+    resources_section = get_resources(env.get_cib())
-+
-+    booth_config_file_path = get_config_file_name(name)
-+    if resource.find_for_config(resources_section, booth_config_file_path):
-+        raise LibraryError(booth_reports.booth_already_in_cib(name))
-+
-+    resource.get_creator(resource_create)(
-+        ip,
-+        booth_config_file_path,
-+        create_id = partial(
-+            resource.create_resource_id,
-+            resources_section,
-+            name
-+        )
-+    )
-+
-+def remove_from_cluster(env, name, resource_remove):
-+    #TODO resource_remove is provisional hack until resources are not moved to
-+    #lib
-+    try:
-+        num_of_removed_booth_resources = resource.get_remover(resource_remove)(
-+            get_resources(env.get_cib()),
-+            get_config_file_name(name),
-+        )
-+        if num_of_removed_booth_resources > 1:
-+            env.report_processor.process(
-+                booth_reports.booth_multiple_times_in_cib(
-+                    name,
-+                    severity=ReportItemSeverity.WARNING,
-+                )
-+            )
-+    except resource.BoothNotFoundInCib:
-+        raise LibraryError(booth_reports.booth_not_exists_in_cib(name))
-+    except resource.BoothMultipleOccurenceFoundInCib:
-+        raise LibraryError(booth_reports.booth_multiple_times_in_cib(name))
-+
-+def ticket_operation(operation, env, name, ticket, site_ip):
-+    if not site_ip:
-+        site_ip_list = resource.find_bound_ip(
-+            get_resources(env.get_cib()),
-+            get_config_file_name(name)
-+        )
-+        if len(site_ip_list) != 1:
-+            raise LibraryError(
-+                booth_reports.booth_cannot_determine_local_site_ip()
-+            )
-+        site_ip = site_ip_list[0]
-+
-+    command_output, return_code = env.cmd_runner().run([
-+        settings.booth_binary, operation,
-+        "-s", site_ip,
-+        ticket
-+    ])
-+
-+    if return_code != 0:
-+        raise LibraryError(
-+            booth_reports.booth_ticket_operation_failed(
-+                operation,
-+                command_output,
-+                site_ip,
-+                ticket
-+            )
-+        )
-+
-+ticket_grant = partial(ticket_operation, "grant")
-+ticket_revoke = partial(ticket_operation, "revoke")
-+
-+def config_sync(env, name, skip_offline_nodes=False):
-+    """
-+    Send specified local booth configuration to all nodes in cluster.
-+
-+    env -- LibraryEnvironment
-+    name -- booth instance name
-+    skip_offline_nodes -- if True offline nodes will be skipped
-+    """
-+    config = env.booth.get_config_content()
-+    authfile_path = config_structure.get_authfile(parse(config))
-+    authfile_content = config_files.read_authfile(
-+        env.report_processor, authfile_path
-+    )
-+
-+    sync.send_config_to_all_nodes(
-+        env.node_communicator(),
-+        env.report_processor,
-+        env.get_corosync_conf().get_nodes(),
-+        name,
-+        config,
-+        authfile=authfile_path,
-+        authfile_data=authfile_content,
-+        skip_offline=skip_offline_nodes
-+    )
-+
-+
-+def enable_booth(env, name=None):
-+    """
-+    Enable specified instance of booth service. Currently it is supported only
-+    systemd systems.
-+
-+    env -- LibraryEnvironment
-+    name -- string, name of booth instance
-+    """
-+    external.ensure_is_systemd()
-+    try:
-+        external.enable_service(env.cmd_runner(), "booth", name)
-+    except external.EnableServiceError as e:
-+        raise LibraryError(reports.service_enable_error(
-+            "booth", e.message, instance=name
-+        ))
-+    env.report_processor.process(reports.service_enable_success(
-+        "booth", instance=name
-+    ))
-+
-+
-+def disable_booth(env, name=None):
-+    """
-+    Disable specified instance of booth service. Currently it is supported only
-+    systemd systems.
-+
-+    env -- LibraryEnvironment
-+    name -- string, name of booth instance
-+    """
-+    external.ensure_is_systemd()
-+    try:
-+        external.disable_service(env.cmd_runner(), "booth", name)
-+    except external.DisableServiceError as e:
-+        raise LibraryError(reports.service_disable_error(
-+            "booth", e.message, instance=name
-+        ))
-+    env.report_processor.process(reports.service_disable_success(
-+        "booth", instance=name
-+    ))
-+
-+
-+def start_booth(env, name=None):
-+    """
-+    Start specified instance of booth service. Currently it is supported only
-+    systemd systems. On non systems it can be run like this:
-+        BOOTH_CONF_FILE=<booth-file-path> /etc/initd/booth-arbitrator
-+
-+    env -- LibraryEnvironment
-+    name -- string, name of booth instance
-+    """
-+    external.ensure_is_systemd()
-+    try:
-+        external.start_service(env.cmd_runner(), "booth", name)
-+    except external.StartServiceError as e:
-+        raise LibraryError(reports.service_start_error(
-+            "booth", e.message, instance=name
-+        ))
-+    env.report_processor.process(reports.service_start_success(
-+        "booth", instance=name
-+    ))
-+
-+
-+def stop_booth(env, name=None):
-+    """
-+    Stop specified instance of booth service. Currently it is supported only
-+    systemd systems.
-+
-+    env -- LibraryEnvironment
-+    name -- string, name of booth instance
-+    """
-+    external.ensure_is_systemd()
-+    try:
-+        external.stop_service(env.cmd_runner(), "booth", name)
-+    except external.StopServiceError as e:
-+        raise LibraryError(reports.service_stop_error(
-+            "booth", e.message, instance=name
-+        ))
-+    env.report_processor.process(reports.service_stop_success(
-+        "booth", instance=name
-+    ))
-+
-+
-+def pull_config(env, node_name, name):
-+    """
-+    Get config from specified node and save it on local system. It will
-+    rewrite existing files.
-+
-+    env -- LibraryEnvironment
-+    node_name -- string, name of node from which config should be fetched
-+    name -- string, name of booth instance of which config should be fetched
-+    """
-+    env.report_processor.process(
-+        booth_reports.booth_fetching_config_from_node(node_name, name)
-+    )
-+    output = sync.pull_config_from_node(
-+        env.node_communicator(), NodeAddresses(node_name), name
-+    )
-+    try:
-+        env.booth.create_config(output["config"]["data"], True)
-+        if (
-+            output["authfile"]["name"] is not None and
-+            output["authfile"]["data"]
-+        ):
-+            env.booth.set_key_path(os.path.join(
-+                settings.booth_config_dir, output["authfile"]["name"]
-+            ))
-+            env.booth.create_key(
-+                base64.b64decode(
-+                    output["authfile"]["data"].encode("utf-8")
-+                ),
-+                True
-+            )
-+        env.report_processor.process(
-+            booth_reports.booth_config_saved(name_list=[name])
-+        )
-+    except KeyError:
-+        raise LibraryError(reports.invalid_response_format(node_name))
-+
-+
-+def get_status(env, name=None):
-+    return {
-+        "status": status.get_daemon_status(env.cmd_runner(), name),
-+        "ticket": status.get_tickets_status(env.cmd_runner(), name),
-+        "peers": status.get_peers_status(env.cmd_runner(), name),
-+    }
-diff --git a/pcs/lib/commands/test/test_booth.py b/pcs/lib/commands/test/test_booth.py
-new file mode 100644
-index 0000000..20bf06a
---- /dev/null
-+++ b/pcs/lib/commands/test/test_booth.py
-@@ -0,0 +1,614 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+import os
-+import base64
-+
-+from unittest import TestCase
-+
-+from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-+from pcs.test.tools.assertions import (
-+    assert_raise_library_error,
-+    assert_report_item_list_equal,
-+)
-+
-+from pcs import settings
-+from pcs.common import report_codes
-+from pcs.lib.booth import resource as booth_resource
-+from pcs.lib.env import LibraryEnvironment
-+from pcs.lib.node import NodeAddresses
-+from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities
-+from pcs.lib.commands import booth as commands
-+from pcs.lib.external import (
-+    NodeCommunicator,
-+    CommandRunner,
-+    EnableServiceError,
-+    DisableServiceError,
-+    StartServiceError,
-+    StopServiceError
-+)
-+
-+def patch_commands(target, *args, **kwargs):
-+    return mock.patch(
-+        "pcs.lib.commands.booth.{0}".format(target), *args, **kwargs
-+    )
-+
-+@mock.patch("pcs.lib.booth.config_files.generate_key", return_value="key value")
-+@mock.patch("pcs.lib.commands.booth.build", return_value="config content")
-+@mock.patch("pcs.lib.booth.config_structure.validate_peers")
-+class ConfigSetupTest(TestCase):
-+    def test_successfuly_build_and_write_to_std_path(
-+        self, mock_validate_peers, mock_build, mock_generate_key
-+    ):
-+        env = mock.MagicMock()
-+        commands.config_setup(
-+            env,
-+            booth_configuration={
-+                "sites": ["1.1.1.1"],
-+                "arbitrators": ["2.2.2.2"],
-+            },
-+        )
-+        env.booth.create_config.assert_called_once_with(
-+            "config content",
-+            False
-+        )
-+        env.booth.create_key.assert_called_once_with(
-+            "key value",
-+            False
-+        )
-+        mock_validate_peers.assert_called_once_with(
-+            ["1.1.1.1"], ["2.2.2.2"]
-+        )
-+
-+    def test_sanitize_peers_before_validation(
-+        self, mock_validate_peers, mock_build, mock_generate_key
-+    ):
-+        commands.config_setup(env=mock.MagicMock(), booth_configuration={})
-+        mock_validate_peers.assert_called_once_with([], [])
-+
-+
-+class ConfigDestroyTest(TestCase):
-+    @patch_commands("external.is_systemctl", mock.Mock(return_value=True))
-+    @patch_commands("external.is_service_enabled", mock.Mock(return_value=True))
-+    @patch_commands("external.is_service_running", mock.Mock(return_value=True))
-+    @patch_commands("resource.find_for_config", mock.Mock(return_value=[True]))
-+    def test_raises_when_booth_config_in_use(self):
-+        env = mock.MagicMock()
-+        env.booth.name = "somename"
-+
-+        assert_raise_library_error(
-+            lambda: commands.config_destroy(env),
-+            (
-+                Severities.ERROR,
-+                report_codes.BOOTH_CONFIG_IS_USED,
-+                {
-+                    "name": "somename",
-+                    "detail": "in cluster resource",
-+                }
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.BOOTH_CONFIG_IS_USED,
-+                {
-+                    "name": "somename",
-+                    "detail": "(enabled in systemd)",
-+                }
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.BOOTH_CONFIG_IS_USED,
-+                {
-+                    "name": "somename",
-+                    "detail": "(running in systemd)",
-+                }
-+            )
-+        )
-+
-+    @patch_commands("external.is_systemctl", mock.Mock(return_value=False))
-+    @patch_commands("resource.find_for_config", mock.Mock(return_value=[]))
-+    @patch_commands("parse", mock.Mock(side_effect=LibraryError()))
-+    def test_raises_when_cannot_get_content_of_config(self):
-+        env = mock.MagicMock()
-+        assert_raise_library_error(
-+            lambda: commands.config_destroy(env),
-+            (
-+                Severities.ERROR,
-+                report_codes.BOOTH_CANNOT_IDENTIFY_KEYFILE,
-+                {},
-+                report_codes.FORCE_BOOTH_DESTROY
-+            )
-+        )
-+
-+    @patch_commands("external.is_systemctl", mock.Mock(return_value=False))
-+    @patch_commands("resource.find_for_config", mock.Mock(return_value=[]))
-+    @patch_commands("parse", mock.Mock(side_effect=LibraryError()))
-+    def test_remove_config_even_if_cannot_get_its_content_when_forced(self):
-+        env = mock.MagicMock()
-+        env.report_processor = MockLibraryReportProcessor()
-+        commands.config_destroy(env, ignore_config_load_problems=True)
-+        env.booth.remove_config.assert_called_once_with()
-+        assert_report_item_list_equal(env.report_processor.report_item_list, [
-+            (
-+                Severities.WARNING,
-+                report_codes.BOOTH_CANNOT_IDENTIFY_KEYFILE,
-+                {}
-+            )
-+        ])
-+
-+@mock.patch("pcs.lib.commands.booth.config_structure.get_authfile")
-+@mock.patch("pcs.lib.commands.booth.parse")
-+@mock.patch("pcs.lib.booth.config_files.read_authfile")
-+@mock.patch("pcs.lib.booth.sync.send_config_to_all_nodes")
-+class ConfigSyncTest(TestCase):
-+    def setUp(self):
-+        self.mock_env = mock.MagicMock()
-+        self.mock_rep = MockLibraryReportProcessor()
-+        self.mock_env.report_processor = self.mock_rep
-+        self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
-+        self.mock_env.node_communicator.return_value = self.mock_com
-+        self.node_list = ["node1", "node2", "node3"]
-+        corosync_conf = mock.MagicMock()
-+        corosync_conf.get_nodes.return_value = self.node_list
-+        self.mock_env.get_corosync_conf.return_value = corosync_conf
-+        self.mock_env.booth.get_config_content.return_value = "config"
-+
-+    def test_skip_offline(
-+        self, mock_sync, mock_read_key, mock_parse, mock_get_authfile
-+    ):
-+        mock_get_authfile.return_value = "/key/path.key"
-+        mock_read_key.return_value = "key"
-+        commands.config_sync(self.mock_env, "name", True)
-+        self.mock_env.booth.get_config_content.assert_called_once_with()
-+        mock_read_key.assert_called_once_with(self.mock_rep, "/key/path.key")
-+        mock_parse.assert_called_once_with("config")
-+        mock_sync.assert_called_once_with(
-+            self.mock_com,
-+            self.mock_rep,
-+            self.node_list,
-+            "name",
-+            "config",
-+            authfile="/key/path.key",
-+            authfile_data="key",
-+            skip_offline=True
-+        )
-+
-+    def test_do_not_skip_offline(
-+        self, mock_sync, mock_read_key, mock_parse, mock_get_authfile
-+    ):
-+        mock_get_authfile.return_value = "/key/path.key"
-+        mock_read_key.return_value = "key"
-+        commands.config_sync(self.mock_env, "name")
-+        self.mock_env.booth.get_config_content.assert_called_once_with()
-+        mock_read_key.assert_called_once_with(self.mock_rep, "/key/path.key")
-+        mock_parse.assert_called_once_with("config")
-+        mock_sync.assert_called_once_with(
-+            self.mock_com,
-+            self.mock_rep,
-+            self.node_list,
-+            "name",
-+            "config",
-+            authfile="/key/path.key",
-+            authfile_data="key",
-+            skip_offline=False
-+        )
-+
-+
-+@mock.patch("pcs.lib.commands.booth.external.ensure_is_systemd")
-+@mock.patch("pcs.lib.external.enable_service")
-+class EnableBoothTest(TestCase):
-+    def setUp(self):
-+        self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)
-+        self.mock_rep = MockLibraryReportProcessor()
-+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
-+        self.mock_env.cmd_runner.return_value = self.mock_run
-+        self.mock_env.report_processor = self.mock_rep
-+
-+    def test_success(self, mock_enable, mock_is_systemctl):
-+        commands.enable_booth(self.mock_env, "name")
-+        mock_enable.assert_called_once_with(self.mock_run, "booth", "name")
-+        mock_is_systemctl.assert_called_once_with()
-+        assert_report_item_list_equal(
-+            self.mock_rep.report_item_list,
-+            [(
-+                Severities.INFO,
-+                report_codes.SERVICE_ENABLE_SUCCESS,
-+                {
-+                    "service": "booth",
-+                    "node": None,
-+                    "instance": "name",
-+                }
-+            )]
-+        )
-+
-+    def test_failed(self, mock_enable, mock_is_systemctl):
-+        mock_enable.side_effect = EnableServiceError("booth", "msg", "name")
-+        assert_raise_library_error(
-+            lambda: commands.enable_booth(self.mock_env, "name"),
-+            (
-+                Severities.ERROR,
-+                report_codes.SERVICE_ENABLE_ERROR,
-+                {
-+                    "service": "booth",
-+                    "reason": "msg",
-+                    "node": None,
-+                    "instance": "name",
-+                }
-+            )
-+        )
-+        mock_enable.assert_called_once_with(self.mock_run, "booth", "name")
-+        mock_is_systemctl.assert_called_once_with()
-+
-+
-+@mock.patch("pcs.lib.commands.booth.external.ensure_is_systemd")
-+@mock.patch("pcs.lib.external.disable_service")
-+class DisableBoothTest(TestCase):
-+    def setUp(self):
-+        self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)
-+        self.mock_rep = MockLibraryReportProcessor()
-+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
-+        self.mock_env.cmd_runner.return_value = self.mock_run
-+        self.mock_env.report_processor = self.mock_rep
-+
-+    def test_success(self, mock_disable, mock_is_systemctl):
-+        commands.disable_booth(self.mock_env, "name")
-+        mock_disable.assert_called_once_with(self.mock_run, "booth", "name")
-+        mock_is_systemctl.assert_called_once_with()
-+        assert_report_item_list_equal(
-+            self.mock_rep.report_item_list,
-+            [(
-+                Severities.INFO,
-+                report_codes.SERVICE_DISABLE_SUCCESS,
-+                {
-+                    "service": "booth",
-+                    "node": None,
-+                    "instance": "name",
-+                }
-+            )]
-+        )
-+
-+    def test_failed(self, mock_disable, mock_is_systemctl):
-+        mock_disable.side_effect = DisableServiceError("booth", "msg", "name")
-+        assert_raise_library_error(
-+            lambda: commands.disable_booth(self.mock_env, "name"),
-+            (
-+                Severities.ERROR,
-+                report_codes.SERVICE_DISABLE_ERROR,
-+                {
-+                    "service": "booth",
-+                    "reason": "msg",
-+                    "node": None,
-+                    "instance": "name",
-+                }
-+            )
-+        )
-+        mock_disable.assert_called_once_with(self.mock_run, "booth", "name")
-+        mock_is_systemctl.assert_called_once_with()
-+
-+
-+@mock.patch("pcs.lib.commands.booth.external.ensure_is_systemd")
-+@mock.patch("pcs.lib.external.start_service")
-+class StartBoothTest(TestCase):
-+    def setUp(self):
-+        self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)
-+        self.mock_rep = MockLibraryReportProcessor()
-+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
-+        self.mock_env.cmd_runner.return_value = self.mock_run
-+        self.mock_env.report_processor = self.mock_rep
-+
-+    def test_success(self, mock_start, mock_is_systemctl):
-+        commands.start_booth(self.mock_env, "name")
-+        mock_start.assert_called_once_with(self.mock_run, "booth", "name")
-+        mock_is_systemctl.assert_called_once_with()
-+        assert_report_item_list_equal(
-+            self.mock_rep.report_item_list,
-+            [(
-+                Severities.INFO,
-+                report_codes.SERVICE_START_SUCCESS,
-+                {
-+                    "service": "booth",
-+                    "node": None,
-+                    "instance": "name",
-+                }
-+            )]
-+        )
-+
-+    def test_failed(self, mock_start, mock_is_systemctl):
-+        mock_start.side_effect = StartServiceError("booth", "msg", "name")
-+        assert_raise_library_error(
-+            lambda: commands.start_booth(self.mock_env, "name"),
-+            (
-+                Severities.ERROR,
-+                report_codes.SERVICE_START_ERROR,
-+                {
-+                    "service": "booth",
-+                    "reason": "msg",
-+                    "node": None,
-+                    "instance": "name",
-+                }
-+            )
-+        )
-+        mock_start.assert_called_once_with(self.mock_run, "booth", "name")
-+        mock_is_systemctl.assert_called_once_with()
-+
-+
-+@mock.patch("pcs.lib.commands.booth.external.ensure_is_systemd")
-+@mock.patch("pcs.lib.external.stop_service")
-+class StopBoothTest(TestCase):
-+    def setUp(self):
-+        self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)
-+        self.mock_rep = MockLibraryReportProcessor()
-+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
-+        self.mock_env.cmd_runner.return_value = self.mock_run
-+        self.mock_env.report_processor = self.mock_rep
-+
-+    def test_success(self, mock_stop, mock_is_systemctl):
-+        commands.stop_booth(self.mock_env, "name")
-+        mock_stop.assert_called_once_with(self.mock_run, "booth", "name")
-+        mock_is_systemctl.assert_called_once_with()
-+        assert_report_item_list_equal(
-+            self.mock_rep.report_item_list,
-+            [(
-+                Severities.INFO,
-+                report_codes.SERVICE_STOP_SUCCESS,
-+                {
-+                    "service": "booth",
-+                    "node": None,
-+                    "instance": "name",
-+                }
-+            )]
-+        )
-+
-+    def test_failed(self, mock_stop, mock_is_systemctl):
-+        mock_stop.side_effect = StopServiceError("booth", "msg", "name")
-+        assert_raise_library_error(
-+            lambda: commands.stop_booth(self.mock_env, "name"),
-+            (
-+                Severities.ERROR,
-+                report_codes.SERVICE_STOP_ERROR,
-+                {
-+                    "service": "booth",
-+                    "reason": "msg",
-+                    "node": None,
-+                    "instance": "name",
-+                }
-+            )
-+        )
-+        mock_stop.assert_called_once_with(self.mock_run, "booth", "name")
-+        mock_is_systemctl.assert_called_once_with()
-+
-+
-+@mock.patch("pcs.lib.booth.sync.pull_config_from_node")
-+class PullConfigTest(TestCase):
-+    def setUp(self):
-+        self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)
-+        self.mock_rep = MockLibraryReportProcessor()
-+        self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
-+        self.mock_env.node_communicator.return_value = self.mock_com
-+        self.mock_env.report_processor = self.mock_rep
-+
-+    def test_with_authfile(self, mock_pull):
-+        mock_pull.return_value = {
-+            "config": {
-+                "name": "name.conf",
-+                "data": "config"
-+            },
-+            "authfile": {
-+                "name": "name.key",
-+                "data": base64.b64encode("key".encode("utf-8")).decode("utf-8")
-+            }
-+        }
-+        commands.pull_config(self.mock_env, "node", "name")
-+        mock_pull.assert_called_once_with(
-+            self.mock_com, NodeAddresses("node"), "name"
-+        )
-+        self.mock_env.booth.create_config.called_once_with("config", True)
-+        self.mock_env.booth.set_key_path.called_once_with(os.path.join(
-+            settings.booth_config_dir, "name.key"
-+        ))
-+        self.mock_env.booth.create_key.called_once_with(
-+            "key".encode("utf-8"), True
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_rep.report_item_list,
-+            [
-+                (
-+                    Severities.INFO,
-+                    report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE,
-+                    {
-+                        "node": "node",
-+                        "config": "name"
-+                    }
-+                ),
-+                (
-+                    Severities.INFO,
-+                    report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+                    {
-+                        "node": None,
-+                        "name": "name",
-+                        "name_list": ["name"]
-+                    }
-+                )
-+            ]
-+        )
-+
-+    def test_without_authfile(self, mock_pull):
-+        mock_pull.return_value = {
-+            "config": {
-+                "name": "name.conf",
-+                "data": "config"
-+            },
-+            "authfile": {
-+                "name": None,
-+                "data": None
-+            }
-+        }
-+        commands.pull_config(self.mock_env, "node", "name")
-+        mock_pull.assert_called_once_with(
-+            self.mock_com, NodeAddresses("node"), "name"
-+        )
-+        self.mock_env.booth.create_config.called_once_with("config", True)
-+        self.assertEqual(0, self.mock_env.booth.set_key_path.call_count)
-+        self.assertEqual(0, self.mock_env.booth.create_key.call_count)
-+        assert_report_item_list_equal(
-+            self.mock_rep.report_item_list,
-+            [
-+                (
-+                    Severities.INFO,
-+                    report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE,
-+                    {
-+                        "node": "node",
-+                        "config": "name"
-+                    }
-+                ),
-+                (
-+                    Severities.INFO,
-+                    report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+                    {
-+                        "node": None,
-+                        "name": "name",
-+                        "name_list": ["name"]
-+                    }
-+                )
-+            ]
-+        )
-+
-+    def test_invalid_input(self, mock_pull):
-+        mock_pull.return_value = {}
-+        assert_raise_library_error(
-+            lambda: commands.pull_config(self.mock_env, "node", "name"),
-+            (
-+                Severities.ERROR,
-+                report_codes.INVALID_RESPONSE_FORMAT,
-+                {"node": "node"}
-+            )
-+        )
-+        mock_pull.assert_called_once_with(
-+            self.mock_com, NodeAddresses("node"), "name"
-+        )
-+        self.assertEqual(0, self.mock_env.booth.create_config.call_count)
-+        self.assertEqual(0, self.mock_env.booth.set_key_path.call_count)
-+        self.assertEqual(0, self.mock_env.booth.create_key.call_count)
-+        assert_report_item_list_equal(
-+            self.mock_rep.report_item_list,
-+            [(
-+                Severities.INFO,
-+                report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE,
-+                {
-+                    "node": "node",
-+                    "config": "name"
-+                }
-+            )]
-+        )
-+
-+class TicketOperationTest(TestCase):
-+    @mock.patch("pcs.lib.booth.resource.find_bound_ip")
-+    def test_raises_when_implicit_site_not_found_in_cib(
-+        self, mock_find_bound_ip
-+    ):
-+        mock_find_bound_ip.return_value = []
-+        assert_raise_library_error(
-+            lambda: commands.ticket_operation(
-+                "grant", mock.Mock(), "booth", "ABC", site_ip=None
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP,
-+                {}
-+            ),
-+        )
-+
-+    def test_raises_when_command_fail(self):
-+        mock_run = mock.Mock(return_value=("some message", 1))
-+        mock_env = mock.MagicMock(
-+            cmd_runner=mock.Mock(return_value=mock.MagicMock(run=mock_run))
-+        )
-+        assert_raise_library_error(
-+            lambda: commands.ticket_operation(
-+                "grant", mock_env, "booth", "ABC", site_ip="1.2.3.4"
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.BOOTH_TICKET_OPERATION_FAILED,
-+                {
-+                    "operation": "grant",
-+                    "reason": "some message",
-+                    "site_ip": "1.2.3.4",
-+                    "ticket_name": "ABC",
-+                }
-+            ),
-+        )
-+
-+class CreateInClusterTest(TestCase):
-+    @patch_commands("get_resources", mock.MagicMock())
-+    def test_raises_when_is_created_already(self):
-+        assert_raise_library_error(
-+            lambda: commands.create_in_cluster(
-+                mock.MagicMock(), "somename", ip="1.2.3.4", resource_create=None
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.BOOTH_ALREADY_IN_CIB,
-+                {
-+                    "name": "somename",
-+                }
-+            ),
-+        )
-+
-+class RemoveFromClusterTest(TestCase):
-+    @patch_commands("resource.get_remover", mock.Mock(return_value = mock.Mock(
-+        side_effect=booth_resource.BoothNotFoundInCib()
-+    )))
-+    def test_raises_when_no_booth_resource_found(self):
-+        assert_raise_library_error(
-+            lambda: commands.remove_from_cluster(
-+                mock.MagicMock(), "somename", resource_remove=None
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.BOOTH_NOT_EXISTS_IN_CIB,
-+                {
-+                    'name': 'somename',
-+                }
-+            ),
-+        )
-+
-+    @patch_commands("resource.get_remover", mock.Mock(return_value = mock.Mock(
-+        side_effect=booth_resource.BoothMultipleOccurenceFoundInCib()
-+    )))
-+    def test_raises_when_multiple_booth_resource_found(self):
-+        assert_raise_library_error(
-+            lambda: commands.remove_from_cluster(
-+                mock.MagicMock(), "somename", resource_remove=None
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.BOOTH_MULTIPLE_TIMES_IN_CIB,
-+                {
-+                    'name': 'somename',
-+                },
-+                report_codes.FORCE_BOOTH_REMOVE_FROM_CIB,
-+            ),
-+        )
-+
-+    @patch_commands("resource.get_remover", mock.Mock(return_value = mock.Mock(
-+        return_value=2
-+    )))
-+    def test_warn_when_multiple_booth_resources_removed(self):
-+        report_processor=MockLibraryReportProcessor()
-+        commands.remove_from_cluster(
-+            mock.MagicMock(report_processor=report_processor),
-+            "somename",
-+            resource_remove=None
-+        )
-+        assert_report_item_list_equal(report_processor.report_item_list, [(
-+            Severities.WARNING,
-+            report_codes.BOOTH_MULTIPLE_TIMES_IN_CIB,
-+            {
-+                'name': 'somename',
-+            },
-+        )])
-diff --git a/pcs/lib/commands/test/test_ticket.py b/pcs/lib/commands/test/test_ticket.py
-index 751001b..d8b8a5f 100644
---- a/pcs/lib/commands/test/test_ticket.py
-+++ b/pcs/lib/commands/test/test_ticket.py
-@@ -5,27 +5,22 @@ from __future__ import (
-     unicode_literals,
- )
- 
--import logging
- from unittest import TestCase
- 
- from pcs.common import report_codes
- from pcs.lib.commands.constraint import ticket as ticket_command
--from pcs.lib.env import LibraryEnvironment as Env
- from pcs.lib.errors import ReportItemSeverity as severities
-+from pcs.lib.test.misc import get_mocked_env
- from pcs.test.tools.assertions import (
-     assert_xml_equal,
-     assert_raise_library_error
- )
--from pcs.test.tools.custom_mock import MockLibraryReportProcessor
- from pcs.test.tools.misc import get_test_resource as rc
--from pcs.test.tools.pcs_mock import mock
- from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
- 
- 
- class CreateTest(TestCase):
-     def setUp(self):
--        self.mock_logger = mock.MagicMock(logging.Logger)
--        self.mock_reporter = MockLibraryReportProcessor()
-         self.create_cib = get_xml_manipulation_creator_from_file(
-             rc("cib-empty.xml")
-         )
-@@ -37,7 +32,7 @@ class CreateTest(TestCase):
-                 .append_to_first_tag_name('resources', resource_xml)
-         )
- 
--        env = Env(self.mock_logger, self.mock_reporter, cib_data=str(cib))
-+        env = get_mocked_env(cib_data=str(cib))
-         ticket_command.create(env, "ticketA", "resourceA", {
-             "loss-policy": "fence",
-             "rsc-role": "master"
-@@ -59,11 +54,7 @@ class CreateTest(TestCase):
-         )
- 
-     def test_refuse_for_nonexisting_resource(self):
--        env = Env(
--            self.mock_logger,
--            self.mock_reporter,
--            cib_data=str(self.create_cib())
--        )
-+        env = get_mocked_env(cib_data=str(self.create_cib()))
-         assert_raise_library_error(
-             lambda: ticket_command.create(
-                 env, "ticketA", "resourceA", "master", {"loss-policy": "fence"}
-diff --git a/pcs/lib/corosync/live.py b/pcs/lib/corosync/live.py
-index b49b9f6..1e68c31 100644
---- a/pcs/lib/corosync/live.py
-+++ b/pcs/lib/corosync/live.py
-@@ -22,6 +22,9 @@ def get_local_corosync_conf():
-     except IOError as e:
-         raise LibraryError(reports.corosync_config_read_error(path, e.strerror))
- 
-+def exists_local_corosync_conf():
-+    return os.path.exists(settings.corosync_conf_file)
-+
- def set_remote_corosync_conf(node_communicator, node_addr, config_text):
-     """
-     Send corosync.conf to a node
-diff --git a/pcs/lib/env.py b/pcs/lib/env.py
-index 24e4252..b139c58 100644
---- a/pcs/lib/env.py
-+++ b/pcs/lib/env.py
-@@ -5,20 +5,27 @@ from __future__ import (
-     unicode_literals,
- )
- 
-+import os.path
-+
- from lxml import etree
- 
-+from pcs import settings
- from pcs.lib import reports
-+from pcs.lib.booth.env import BoothEnv
-+from pcs.lib.cib.tools import ensure_cib_version
-+from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade
-+from pcs.lib.corosync.live import (
-+    exists_local_corosync_conf,
-+    get_local_corosync_conf,
-+    reload_config as reload_corosync_config,
-+)
- from pcs.lib.external import (
-     is_cman_cluster,
-     is_service_running,
-     CommandRunner,
-     NodeCommunicator,
- )
--from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade
--from pcs.lib.corosync.live import (
--    get_local_corosync_conf,
--    reload_config as reload_corosync_config,
--)
-+from pcs.lib.errors import LibraryError
- from pcs.lib.nodes_task import (
-     distribute_corosync_conf,
-     check_corosync_offline_on_nodes,
-@@ -29,7 +36,6 @@ from pcs.lib.pacemaker import (
-     get_cib_xml,
-     replace_cib_configuration_xml,
- )
--from pcs.lib.cib.tools import ensure_cib_version
- 
- 
- class LibraryEnvironment(object):
-@@ -43,6 +49,7 @@ class LibraryEnvironment(object):
-         user_groups=None,
-         cib_data=None,
-         corosync_conf_data=None,
-+        booth=None,
-         auth_tokens_getter=None,
-     ):
-         self._logger = logger
-@@ -51,6 +58,9 @@ class LibraryEnvironment(object):
-         self._user_groups = [] if user_groups is None else user_groups
-         self._cib_data = cib_data
-         self._corosync_conf_data = corosync_conf_data
-+        self._booth = (
-+            BoothEnv(report_processor, booth) if booth is not None else None
-+        )
-         self._is_cman_cluster = None
-         # TODO tokens probably should not be inserted from outside, but we're
-         # postponing dealing with them, because it's not that easy to move
-@@ -169,6 +179,24 @@ class LibraryEnvironment(object):
-         else:
-             self._corosync_conf_data = corosync_conf_data
- 
-+    def is_node_in_cluster(self):
-+        if self.is_cman_cluster:
-+            #TODO --cluster_conf is not propagated here. So no live check not
-+            #needed here. But this should not be permanently
-+            return os.path.exists(settings.corosync_conf_file)
-+
-+        if not self.is_corosync_conf_live:
-+            raise AssertionError(
-+                "Cannot check if node is in cluster with mocked corosync_conf."
-+            )
-+        return exists_local_corosync_conf()
-+
-+    def command_expect_live_corosync_env(self):
-+        if not self.is_corosync_conf_live:
-+            raise LibraryError(reports.live_environment_required([
-+                "--corosync_conf"
-+            ]))
-+
-     @property
-     def is_corosync_conf_live(self):
-         return self._corosync_conf_data is None
-@@ -195,3 +223,7 @@ class LibraryEnvironment(object):
-             else:
-                 self._auth_tokens = {}
-         return self._auth_tokens
-+
-+    @property
-+    def booth(self):
-+        return self._booth
-diff --git a/pcs/lib/env_file.py b/pcs/lib/env_file.py
-new file mode 100644
-index 0000000..e683a57
---- /dev/null
-+++ b/pcs/lib/env_file.py
-@@ -0,0 +1,122 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+import os.path
-+
-+from pcs.common import report_codes
-+from pcs.common.tools import format_environment_error
-+from pcs.lib import reports
-+from pcs.lib.errors import ReportItemSeverity, LibraryError, LibraryEnvError
-+
-+
-+class GhostFile(object):
-+    is_live = False
-+    def __init__(self, file_role, content=None):
-+        self.__file_role = file_role
-+        self.__content = content
-+        self.__no_existing_file_expected = False
-+        self.__can_overwrite_existing_file = False
-+        self.__is_binary = False
-+
-+    def read(self):
-+        if self.__content is None:
-+            raise LibraryEnvError(
-+                reports.file_does_not_exist(self.__file_role)
-+            )
-+
-+        return self.__content
-+
-+    def remove(self, silence_no_existence):
-+        raise AssertionError("Remove GhostFile is not supported.")
-+
-+    def write(self, content, file_operation=None, is_binary=False):
-+        """
-+        callable file_operation is there only for RealFile compatible interface
-+            it has no efect
-+        """
-+        self.__is_binary = is_binary
-+        self.__content = content
-+
-+    def assert_no_conflict_with_existing(
-+        self, report_processor, can_overwrite_existing=False
-+    ):
-+        self.__no_existing_file_expected = True
-+        self.__can_overwrite_existing_file = can_overwrite_existing
-+
-+    def export(self):
-+        return {
-+            "content": self.__content,
-+            "no_existing_file_expected": self.__no_existing_file_expected,
-+            "can_overwrite_existing_file": self.__can_overwrite_existing_file,
-+            "is_binary": self.__is_binary,
-+        }
-+
-+
-+class RealFile(object):
-+    is_live = True
-+    def __init__(
-+        self, file_role, file_path,
-+        overwrite_code=report_codes.FORCE_FILE_OVERWRITE
-+    ):
-+        self.__file_role = file_role
-+        self.__file_path = file_path
-+        self.__overwrite_code = overwrite_code
-+
-+    def assert_no_conflict_with_existing(
-+        self, report_processor, can_overwrite_existing=False
-+    ):
-+        if os.path.exists(self.__file_path):
-+            report_processor.process(reports.file_already_exists(
-+                self.__file_role,
-+                self.__file_path,
-+                ReportItemSeverity.WARNING if can_overwrite_existing
-+                    else ReportItemSeverity.ERROR,
-+                forceable=None if can_overwrite_existing
-+                    else self.__overwrite_code,
-+            ))
-+
-+    def write(self, content, file_operation=None, is_binary=False):
-+        """
-+        callable file_operation takes path and proces operation on it e.g. chmod
-+        """
-+        mode = "wb" if is_binary else "w"
-+        try:
-+            with open(self.__file_path, mode) as config_file:
-+                config_file.write(content)
-+            if file_operation:
-+                file_operation(self.__file_path)
-+        except EnvironmentError as e:
-+            raise self.__report_io_error(e, "write")
-+
-+    def read(self):
-+        try:
-+            with open(self.__file_path, "r") as file:
-+                return file.read()
-+        except EnvironmentError as e:
-+            raise self.__report_io_error(e, "read")
-+
-+    def remove(self, silence_no_existence=False):
-+        if os.path.exists(self.__file_path):
-+            try:
-+                os.remove(self.__file_path)
-+            except EnvironmentError as e:
-+                raise self.__report_io_error(e, "remove")
-+        elif not silence_no_existence:
-+            raise LibraryError(reports.file_io_error(
-+                self.__file_role,
-+                file_path=self.__file_path,
-+                operation="remove",
-+                reason="File does not exist"
-+            ))
-+
-+    def __report_io_error(self, e, operation):
-+        return LibraryError(reports.file_io_error(
-+            self.__file_role,
-+            file_path=self.__file_path,
-+            operation=operation,
-+            reason=format_environment_error(e)
-+        ))
-diff --git a/pcs/lib/errors.py b/pcs/lib/errors.py
-index 9cab5e9..0a8f4fa 100644
---- a/pcs/lib/errors.py
-+++ b/pcs/lib/errors.py
-@@ -8,6 +8,20 @@ from __future__ import (
- class LibraryError(Exception):
-     pass
- 
-+class LibraryEnvError(LibraryError):
-+    def __init__(self, *args, **kwargs):
-+        super(LibraryEnvError, self).__init__(*args, **kwargs)
-+        self.processed = []
-+
-+    def sign_processed(self, report):
-+        self.processed.append(report)
-+
-+    @property
-+    def unprocessed(self):
-+        return [report for report in self.args if report not in self.processed]
-+
-+
-+
- class ReportItemSeverity(object):
-     ERROR = 'ERROR'
-     WARNING = 'WARNING'
-diff --git a/pcs/lib/external.py b/pcs/lib/external.py
-index c773e5a..25e071f 100644
---- a/pcs/lib/external.py
-+++ b/pcs/lib/external.py
-@@ -59,9 +59,10 @@ from pcs import settings
- 
- class ManageServiceError(Exception):
-     #pylint: disable=super-init-not-called
--    def __init__(self, service, message=None):
-+    def __init__(self, service, message=None, instance=None):
-         self.service = service
-         self.message = message
-+        self.instance = instance
- 
- class DisableServiceError(ManageServiceError):
-     pass
-@@ -91,6 +92,22 @@ def is_dir_nonempty(path):
-     return len(os.listdir(path)) > 0
- 
- 
-+def _get_service_name(service, instance=None):
-+    return "{0}{1}.service".format(
-+        service, "" if instance is None else "@{0}".format(instance)
-+    )
-+
-+def ensure_is_systemd():
-+    """
-+    Ensure if current system is systemd system. Raises Library error if not.
-+    """
-+    if not is_systemctl():
-+        raise LibraryError(
-+            reports.unsupported_operation_on_non_systemd_systems()
-+        )
-+
-+
-+
- @simple_cache
- def is_systemctl():
-     """
-@@ -108,74 +125,82 @@ def is_systemctl():
-     return False
- 
- 
--def disable_service(runner, service):
-+def disable_service(runner, service, instance=None):
-     """
-     Disable specified service in local system.
-     Raise DisableServiceError or LibraryError on failure.
- 
-     runner -- CommandRunner
-     service -- name of service
-+    instance -- instance name, it ha no effect on not systemd systems.
-+        If None no instance name will be used.
-     """
-     if is_systemctl():
-         output, retval = runner.run([
--            "systemctl", "disable", service + ".service"
-+            "systemctl", "disable", _get_service_name(service, instance)
-         ])
-     else:
-         if not is_service_installed(runner, service):
-             return
-         output, retval = runner.run(["chkconfig", service, "off"])
-     if retval != 0:
--        raise DisableServiceError(service, output.rstrip())
-+        raise DisableServiceError(service, output.rstrip(), instance)
- 
- 
--def enable_service(runner, service):
-+def enable_service(runner, service, instance=None):
-     """
-     Enable specified service in local system.
-     Raise EnableServiceError or LibraryError on failure.
- 
-     runner -- CommandRunner
-     service -- name of service
-+    instance -- instance name, it ha no effect on not systemd systems.
-+        If None no instance name will be used.
-     """
-     if is_systemctl():
-         output, retval = runner.run([
--            "systemctl", "enable", service + ".service"
-+            "systemctl", "enable", _get_service_name(service, instance)
-         ])
-     else:
-         output, retval = runner.run(["chkconfig", service, "on"])
-     if retval != 0:
--        raise EnableServiceError(service, output.rstrip())
-+        raise EnableServiceError(service, output.rstrip(), instance)
- 
- 
--def start_service(runner, service):
-+def start_service(runner, service, instance=None):
-     """
-     Start specified service in local system
-     CommandRunner runner
-     string service service name
-+    string instance instance name, it ha no effect on not systemd systems.
-+        If None no instance name will be used.
-     """
-     if is_systemctl():
-         output, retval = runner.run([
--            "systemctl", "start", "{0}.service".format(service)
-+            "systemctl", "start", _get_service_name(service, instance)
-         ])
-     else:
-         output, retval = runner.run(["service", service, "start"])
-     if retval != 0:
--        raise StartServiceError(service, output.rstrip())
-+        raise StartServiceError(service, output.rstrip(), instance)
- 
- 
--def stop_service(runner, service):
-+def stop_service(runner, service, instance=None):
-     """
-     Stop specified service in local system
-     CommandRunner runner
-     string service service name
-+    string instance instance name, it ha no effect on not systemd systems.
-+        If None no instance name will be used.
-     """
-     if is_systemctl():
-         output, retval = runner.run([
--            "systemctl", "stop", "{0}.service".format(service)
-+            "systemctl", "stop", _get_service_name(service, instance)
-         ])
-     else:
-         output, retval = runner.run(["service", service, "stop"])
-     if retval != 0:
--        raise StopServiceError(service, output.rstrip())
-+        raise StopServiceError(service, output.rstrip(), instance)
- 
- 
- def kill_services(runner, services):
-@@ -196,7 +221,7 @@ def kill_services(runner, services):
-             raise KillServicesError(list(services), output.rstrip())
- 
- 
--def is_service_enabled(runner, service):
-+def is_service_enabled(runner, service, instance=None):
-     """
-     Check if specified service is enabled in local system.
- 
-@@ -205,7 +230,7 @@ def is_service_enabled(runner, service):
-     """
-     if is_systemctl():
-         _, retval = runner.run(
--            ["systemctl", "is-enabled", service + ".service"]
-+            ["systemctl", "is-enabled", _get_service_name(service, instance)]
-         )
-     else:
-         _, retval = runner.run(["chkconfig", service])
-@@ -213,7 +238,7 @@ def is_service_enabled(runner, service):
-     return retval == 0
- 
- 
--def is_service_running(runner, service):
-+def is_service_running(runner, service, instance=None):
-     """
-     Check if specified service is currently running on local system.
- 
-@@ -221,7 +246,11 @@ def is_service_running(runner, service):
-     service -- name of service
-     """
-     if is_systemctl():
--        _, retval = runner.run(["systemctl", "is-active", service + ".service"])
-+        _, retval = runner.run([
-+            "systemctl",
-+            "is-active",
-+            _get_service_name(service, instance)
-+        ])
-     else:
-         _, retval = runner.run(["service", service, "status"])
- 
-@@ -314,6 +343,9 @@ class CommandRunner(object):
-         self, args, ignore_stderr=False, stdin_string=None, env_extend=None,
-         binary_output=False
-     ):
-+        #Reset environment variables by empty dict is desired here.  We need to
-+        #get rid of defaults - we do not know the context and environment of the
-+        #library.  So executable must be specified with full path.
-         env_vars = dict(env_extend) if env_extend else dict()
-         env_vars.update(self._env_vars)
- 
-diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
-index fc2670b..eac95c7 100644
---- a/pcs/lib/reports.py
-+++ b/pcs/lib/reports.py
-@@ -1153,27 +1153,37 @@ def cman_broadcast_all_rings():
-             + "broadcast in only one ring"
-     )
- 
--def service_start_started(service):
-+def service_start_started(service, instance=None):
-     """
-     system service is being started
-     string service service name or description
-+    string instance instance of service
-     """
-+    if instance:
-+        msg = "Starting {service}@{instance}..."
-+    else:
-+        msg = "Starting {service}..."
-     return ReportItem.info(
-         report_codes.SERVICE_START_STARTED,
--        "Starting {service}...",
-+        msg,
-         info={
-             "service": service,
-+            "instance": instance,
-         }
-     )
- 
--def service_start_error(service, reason, node=None):
-+def service_start_error(service, reason, node=None, instance=None):
-     """
-     system service start failed
-     string service service name or description
-     string reason error message
-     string node node on which service has been requested to start
-+    string instance instance of service
-     """
--    msg = "Unable to start {service}: {reason}"
-+    if instance:
-+        msg = "Unable to start {service}@{instance}: {reason}"
-+    else:
-+        msg = "Unable to start {service}: {reason}"
-     return ReportItem.error(
-         report_codes.SERVICE_START_ERROR,
-         msg if node is None else "{node}: " + msg,
-@@ -1181,33 +1191,43 @@ def service_start_error(service, reason, node=None):
-             "service": service,
-             "reason": reason,
-             "node": node,
-+            "instance": instance,
-         }
-     )
- 
--def service_start_success(service, node=None):
-+def service_start_success(service, node=None, instance=None):
-     """
-     system service was started successfully
-     string service service name or description
-     string node node on which service has been requested to start
-+    string instance instance of service
-     """
--    msg = "{service} started"
-+    if instance:
-+        msg = "{service}@{instance} started"
-+    else:
-+        msg = "{service} started"
-     return ReportItem.info(
-         report_codes.SERVICE_START_SUCCESS,
-         msg if node is None else "{node}: " + msg,
-         info={
-             "service": service,
-             "node": node,
-+            "instance": instance,
-         }
-     )
- 
--def service_start_skipped(service, reason, node=None):
-+def service_start_skipped(service, reason, node=None, instance=None):
-     """
-     starting system service was skipped, no error occured
-     string service service name or description
-     string reason why the start has been skipped
-     string node node on which service has been requested to start
-+    string instance instance of service
-     """
--    msg = "not starting {service} - {reason}"
-+    if instance:
-+        msg = "not starting {service}@{instance} - {reason}"
-+    else:
-+        msg = "not starting {service} - {reason}"
-     return ReportItem.info(
-         report_codes.SERVICE_START_SKIPPED,
-         msg if node is None else "{node}: " + msg,
-@@ -1215,30 +1235,41 @@ def service_start_skipped(service, reason, node=None):
-             "service": service,
-             "reason": reason,
-             "node": node,
-+            "instance": instance,
-         }
-     )
- 
--def service_stop_started(service):
-+def service_stop_started(service, instance=None):
-     """
-     system service is being stopped
-     string service service name or description
-+    string instance instance of service
-     """
-+    if instance:
-+        msg = "Stopping {service}@{instance}..."
-+    else:
-+        msg = "Stopping {service}..."
-     return ReportItem.info(
-         report_codes.SERVICE_STOP_STARTED,
--        "Stopping {service}...",
-+        msg,
-         info={
-             "service": service,
-+            "instance": instance,
-         }
-     )
- 
--def service_stop_error(service, reason, node=None):
-+def service_stop_error(service, reason, node=None, instance=None):
-     """
-     system service stop failed
-     string service service name or description
-     string reason error message
-     string node node on which service has been requested to stop
-+    string instance instance of service
-     """
--    msg = "Unable to stop {service}: {reason}"
-+    if instance:
-+        msg = "Unable to stop {service}@{instance}: {reason}"
-+    else:
-+        msg = "Unable to stop {service}: {reason}"
-     return ReportItem.error(
-         report_codes.SERVICE_STOP_ERROR,
-         msg if node is None else "{node}: " + msg,
-@@ -1246,22 +1277,28 @@ def service_stop_error(service, reason, node=None):
-             "service": service,
-             "reason": reason,
-             "node": node,
-+            "instance": instance,
-         }
-     )
- 
--def service_stop_success(service, node=None):
-+def service_stop_success(service, node=None, instance=None):
-     """
-     system service was stopped successfully
-     string service service name or description
-     string node node on which service has been requested to stop
-+    string instance instance of service
-     """
--    msg = "{service} stopped"
-+    if instance:
-+        msg = "{service}@{instance} stopped"
-+    else:
-+        msg = "{service} stopped"
-     return ReportItem.info(
-         report_codes.SERVICE_STOP_SUCCESS,
-         msg if node is None else "{node}: " + msg,
-         info={
-             "service": service,
-             "node": node,
-+            "instance": instance,
-         }
-     )
- 
-@@ -1295,27 +1332,37 @@ def service_kill_success(services):
-         }
-     )
- 
--def service_enable_started(service):
-+def service_enable_started(service, instance=None):
-     """
-     system service is being enabled
-     string service service name or description
-+    string instance instance of service
-     """
-+    if instance:
-+        msg = "Enabling {service}@{instance}..."
-+    else:
-+        msg = "Enabling {service}..."
-     return ReportItem.info(
-         report_codes.SERVICE_ENABLE_STARTED,
--        "Enabling {service}...",
-+        msg,
-         info={
-             "service": service,
-+            "instance": instance,
-         }
-     )
- 
--def service_enable_error(service, reason, node=None):
-+def service_enable_error(service, reason, node=None, instance=None):
-     """
-     system service enable failed
-     string service service name or description
-     string reason error message
-     string node node on which service was enabled
-+    string instance instance of service
-     """
--    msg = "Unable to enable {service}: {reason}"
-+    if instance:
-+        msg = "Unable to enable {service}@{instance}: {reason}"
-+    else:
-+        msg = "Unable to enable {service}: {reason}"
-     return ReportItem.error(
-         report_codes.SERVICE_ENABLE_ERROR,
-         msg if node is None else "{node}: " + msg,
-@@ -1323,33 +1370,43 @@ def service_enable_error(service, reason, node=None):
-             "service": service,
-             "reason": reason,
-             "node": node,
-+            "instance": instance,
-         }
-     )
- 
--def service_enable_success(service, node=None):
-+def service_enable_success(service, node=None, instance=None):
-     """
-     system service was enabled successfully
-     string service service name or description
-     string node node on which service has been enabled
-+    string instance instance of service
-     """
--    msg = "{service} enabled"
-+    if instance:
-+        msg = "{service}@{instance} enabled"
-+    else:
-+        msg = "{service} enabled"
-     return ReportItem.info(
-         report_codes.SERVICE_ENABLE_SUCCESS,
-         msg if node is None else "{node}: " + msg,
-         info={
-             "service": service,
-             "node": node,
-+            "instance": instance,
-         }
-     )
- 
--def service_enable_skipped(service, reason, node=None):
-+def service_enable_skipped(service, reason, node=None, instance=None):
-     """
-     enabling system service was skipped, no error occured
-     string service service name or description
-     string reason why the enabling has been skipped
-     string node node on which service has been requested to enable
-+    string instance instance of service
-     """
--    msg = "not enabling {service} - {reason}"
-+    if instance:
-+        msg = "not enabling {service}@{instance} - {reason}"
-+    else:
-+        msg = "not enabling {service} - {reason}"
-     return ReportItem.info(
-         report_codes.SERVICE_ENABLE_SKIPPED,
-         msg if node is None else "{node}: " + msg,
-@@ -1357,30 +1414,41 @@ def service_enable_skipped(service, reason, node=None):
-             "service": service,
-             "reason": reason,
-             "node": node,
-+            "instance": instance
-         }
-     )
- 
--def service_disable_started(service):
-+def service_disable_started(service, instance=None):
-     """
-     system service is being disabled
-     string service service name or description
-+    string instance instance of service
-     """
-+    if instance:
-+        msg = "Disabling {service}@{instance}..."
-+    else:
-+        msg = "Disabling {service}..."
-     return ReportItem.info(
-         report_codes.SERVICE_DISABLE_STARTED,
--        "Disabling {service}...",
-+        msg,
-         info={
-             "service": service,
-+            "instance": instance,
-         }
-     )
- 
--def service_disable_error(service, reason, node=None):
-+def service_disable_error(service, reason, node=None, instance=None):
-     """
-     system service disable failed
-     string service service name or description
-     string reason error message
-     string node node on which service was disabled
-+    string instance instance of service
-     """
--    msg = "Unable to disable {service}: {reason}"
-+    if instance:
-+        msg = "Unable to disable {service}@{instance}: {reason}"
-+    else:
-+        msg = "Unable to disable {service}: {reason}"
-     return ReportItem.error(
-         report_codes.SERVICE_DISABLE_ERROR,
-         msg if node is None else "{node}: " + msg,
-@@ -1388,22 +1456,28 @@ def service_disable_error(service, reason, node=None):
-             "service": service,
-             "reason": reason,
-             "node": node,
-+            "instance": instance,
-         }
-     )
- 
--def service_disable_success(service, node=None):
-+def service_disable_success(service, node=None, instance=None):
-     """
-     system service was disabled successfully
-     string service service name or description
-     string node node on which service was disabled
-+    string instance instance of service
-     """
--    msg = "{service} disabled"
-+    if instance:
-+        msg = "{service}@{instance} disabled"
-+    else:
-+        msg = "{service} disabled"
-     return ReportItem.info(
-         report_codes.SERVICE_DISABLE_SUCCESS,
-         msg if node is None else "{node}: " + msg,
-         info={
-             "service": service,
-             "node": node,
-+            "instance": instance,
-         }
-     )
- 
-@@ -1742,3 +1816,88 @@ def unable_to_upgrade_cib_to_required_version(
-             "current_version": "{0}.{1}.{2}".format(*current_version)
-         }
-     )
-+
-+def file_already_exists(
-+        file_role, file_path, severity=ReportItemSeverity.ERROR,
-+        forceable=None, node=None
-+    ):
-+    msg = "file {file_path} already exists"
-+    if file_role:
-+        msg = "{file_role} " + msg
-+    if node:
-+        msg = "{node}: " + msg
-+    return ReportItem(
-+        report_codes.FILE_ALREADY_EXISTS,
-+        severity,
-+        msg,
-+        info={
-+            "file_role": file_role,
-+            "file_path": file_path,
-+            "node": node,
-+        },
-+        forceable=forceable,
-+    )
-+
-+def file_does_not_exist(file_role, file_path=""):
-+    return ReportItem.error(
-+        report_codes.FILE_DOES_NOT_EXIST,
-+        "{file_role} file {file_path} does not exist",
-+        info={
-+            "file_role": file_role,
-+            "file_path": file_path,
-+        },
-+    )
-+
-+def file_io_error(
-+    file_role, file_path="", reason="", operation="work with",
-+    severity=ReportItemSeverity.ERROR
-+):
-+    if file_path:
-+        msg = "unable to {operation} {file_role} '{file_path}': {reason}"
-+    else:
-+        msg = "unable to {operation} {file_role}: {reason}"
-+    return ReportItem(
-+        report_codes.FILE_IO_ERROR,
-+        severity,
-+        msg,
-+        info={
-+            "file_role": file_role,
-+            "file_path": file_path,
-+            "reason": reason,
-+            "operation": operation
-+        },
-+    )
-+
-+def unable_to_determine_user_uid(user):
-+    return ReportItem.error(
-+        report_codes.UNABLE_TO_DETERMINE_USER_UID,
-+        "Unable to determine uid of user '{user}'",
-+        info={
-+            "user": user
-+        }
-+    )
-+
-+def unable_to_determine_group_gid(group):
-+    return ReportItem.error(
-+        report_codes.UNABLE_TO_DETERMINE_GROUP_GID,
-+        "Unable to determine gid of group '{group}'",
-+        info={
-+            "group": group
-+        }
-+    )
-+
-+def unsupported_operation_on_non_systemd_systems():
-+    return ReportItem.error(
-+        report_codes.UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS,
-+        "unsupported operation on non systemd systems"
-+    )
-+
-+def live_environment_required(forbidden_options):
-+    return ReportItem.error(
-+        report_codes.LIVE_ENVIRONMENT_REQUIRED,
-+        "This command does not support {options_string}",
-+        info={
-+            "forbidden_options": forbidden_options,
-+            "options_string": ", ".join(forbidden_options),
-+        }
-+    )
-diff --git a/pcs/lib/test/misc.py b/pcs/lib/test/misc.py
-new file mode 100644
-index 0000000..1b1670a
---- /dev/null
-+++ b/pcs/lib/test/misc.py
-@@ -0,0 +1,20 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+import logging
-+
-+from pcs.lib.env import LibraryEnvironment as Env
-+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-+from pcs.test.tools.pcs_mock import mock
-+
-+
-+def get_mocked_env(**kwargs):
-+    return Env(
-+        logger=mock.MagicMock(logging.Logger),
-+        report_processor=MockLibraryReportProcessor(),
-+        **kwargs
-+    )
-diff --git a/pcs/lib/test/test_env_file.py b/pcs/lib/test/test_env_file.py
-new file mode 100644
-index 0000000..3e27af1
---- /dev/null
-+++ b/pcs/lib/test/test_env_file.py
-@@ -0,0 +1,187 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from unittest import TestCase
-+
-+from pcs.common import report_codes
-+from pcs.lib.env_file import RealFile, GhostFile
-+from pcs.lib.errors import ReportItemSeverity as severities
-+from pcs.test.tools.assertions import(
-+    assert_raise_library_error,
-+    assert_report_item_list_equal
-+)
-+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-+from pcs.test.tools.pcs_mock import mock
-+
-+
-+class GhostFileReadTest(TestCase):
-+    def test_raises_when_trying_read_nonexistent_file(self):
-+        assert_raise_library_error(
-+            lambda: GhostFile("some role", content=None).read(),
-+            (
-+                severities.ERROR,
-+                report_codes.FILE_DOES_NOT_EXIST,
-+                {
-+                    "file_role": "some role",
-+                }
-+            ),
-+        )
-+
-+@mock.patch("pcs.lib.env_file.os.path.exists", return_value=True)
-+class RealFileAssertNoConflictWithExistingTest(TestCase):
-+    def check(self, report_processor, can_overwrite_existing=False):
-+        real_file = RealFile("some role", "/etc/booth/some-name.conf")
-+        real_file.assert_no_conflict_with_existing(
-+            report_processor,
-+            can_overwrite_existing
-+        )
-+
-+    def test_success_when_config_not_exists(self, mock_exists):
-+        mock_exists.return_value = False
-+        report_processor=MockLibraryReportProcessor()
-+        self.check(report_processor)
-+        assert_report_item_list_equal(report_processor.report_item_list, [])
-+
-+    def test_raises_when_config_exists_and_overwrite_not_allowed(self, mock_ex):
-+        assert_raise_library_error(
-+            lambda: self.check(MockLibraryReportProcessor()),
-+            (
-+                severities.ERROR,
-+                report_codes.FILE_ALREADY_EXISTS,
-+                {
-+                    "file_path": "/etc/booth/some-name.conf"
-+                },
-+                report_codes.FORCE_FILE_OVERWRITE,
-+            ),
-+        )
-+
-+    def test_warn_when_config_exists_and_overwrite_allowed(self, mock_exists):
-+        report_processor=MockLibraryReportProcessor()
-+        self.check(report_processor, can_overwrite_existing=True)
-+        assert_report_item_list_equal(report_processor.report_item_list, [(
-+            severities.WARNING,
-+            report_codes.FILE_ALREADY_EXISTS,
-+            {
-+                "file_path": "/etc/booth/some-name.conf"
-+            },
-+        )])
-+
-+class RealFileWriteTest(TestCase):
-+    def test_success_write_content_to_path(self):
-+        mock_open = mock.mock_open()
-+        mock_file_operation = mock.Mock()
-+        with mock.patch("pcs.lib.env_file.open", mock_open, create=True):
-+            RealFile("some role", "/etc/booth/some-name.conf").write(
-+                "config content",
-+                file_operation=mock_file_operation
-+            )
-+            mock_open.assert_called_once_with("/etc/booth/some-name.conf", "w")
-+            mock_open().write.assert_called_once_with("config content")
-+            mock_file_operation.assert_called_once_with(
-+                "/etc/booth/some-name.conf"
-+            )
-+
-+    def test_success_binary(self):
-+        mock_open = mock.mock_open()
-+        mock_file_operation = mock.Mock()
-+        with mock.patch("pcs.lib.env_file.open", mock_open, create=True):
-+            RealFile("some role", "/etc/booth/some-name.conf").write(
-+                "config content".encode("utf-8"),
-+                file_operation=mock_file_operation,
-+                is_binary=True
-+            )
-+            mock_open.assert_called_once_with("/etc/booth/some-name.conf", "wb")
-+            mock_open().write.assert_called_once_with(
-+                "config content".encode("utf-8")
-+            )
-+            mock_file_operation.assert_called_once_with(
-+                "/etc/booth/some-name.conf"
-+            )
-+
-+    def test_raises_when_could_not_write(self):
-+        assert_raise_library_error(
-+            lambda:
-+            RealFile("some role", "/no/existing/file.path").write(["content"]),
-+            (
-+                severities.ERROR,
-+                report_codes.FILE_IO_ERROR,
-+                {
-+                    "reason":
-+                        "No such file or directory: '/no/existing/file.path'"
-+                    ,
-+                }
-+            )
-+        )
-+
-+class RealFileReadTest(TestCase):
-+    def test_success_read_content_from_file(self):
-+        mock_open = mock.mock_open()
-+        with mock.patch("pcs.lib.env_file.open", mock_open, create=True):
-+            mock_open().read.return_value = "test booth\nconfig"
-+            self.assertEqual(
-+                "test booth\nconfig",
-+                RealFile("some role", "/path/to.file").read()
-+            )
-+
-+    def test_raises_when_could_not_read(self):
-+        assert_raise_library_error(
-+            lambda: RealFile("some role", "/no/existing/file.path").read(),
-+            (
-+                severities.ERROR,
-+                report_codes.FILE_IO_ERROR,
-+                {
-+                    "reason":
-+                        "No such file or directory: '/no/existing/file.path'"
-+                    ,
-+                }
-+            )
-+        )
-+
-+class RealFileRemoveTest(TestCase):
-+    @mock.patch("pcs.lib.env_file.os.remove")
-+    @mock.patch("pcs.lib.env_file.os.path.exists", return_value=True)
-+    def test_success_remove_file(self, _, mock_remove):
-+        RealFile("some role", "/path/to.file").remove()
-+        mock_remove.assert_called_once_with("/path/to.file")
-+
-+    @mock.patch(
-+        "pcs.lib.env_file.os.remove",
-+        side_effect=EnvironmentError(1, "mock remove failed", "/path/to.file")
-+    )
-+    @mock.patch("pcs.lib.env_file.os.path.exists", return_value=True)
-+    def test_raise_library_error_when_remove_failed(self, _, dummy):
-+        assert_raise_library_error(
-+            lambda: RealFile("some role", "/path/to.file").remove(),
-+            (
-+                severities.ERROR,
-+                report_codes.FILE_IO_ERROR,
-+                {
-+                    'reason': "mock remove failed: '/path/to.file'",
-+                    'file_role': 'some role',
-+                    'file_path': '/path/to.file'
-+                }
-+            )
-+        )
-+
-+    @mock.patch("pcs.lib.env_file.os.path.exists", return_value=False)
-+    def test_existence_is_required(self, _):
-+        assert_raise_library_error(
-+            lambda: RealFile("some role", "/path/to.file").remove(),
-+            (
-+                severities.ERROR,
-+                report_codes.FILE_IO_ERROR,
-+                {
-+                    'reason': "File does not exist",
-+                    'file_role': 'some role',
-+                    'file_path': '/path/to.file'
-+                }
-+            )
-+        )
-+
-+    @mock.patch("pcs.lib.env_file.os.path.exists", return_value=False)
-+    def test_noexistent_can_be_silenced(self, _):
-+        RealFile("some role", "/path/to.file").remove(silence_no_existence=True)
-diff --git a/pcs/lib/test/test_errors.py b/pcs/lib/test/test_errors.py
-new file mode 100644
-index 0000000..2e99e19
---- /dev/null
-+++ b/pcs/lib/test/test_errors.py
-@@ -0,0 +1,20 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from unittest import TestCase
-+
-+from pcs.lib.errors import LibraryEnvError
-+
-+
-+class LibraryEnvErrorTest(TestCase):
-+    def test_can_sign_solved_reports(self):
-+        e = LibraryEnvError("first", "second", "third")
-+        for report in e.args:
-+            if report == "second":
-+                e.sign_processed(report)
-+
-+        self.assertEqual(["first", "third"], e.unprocessed)
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 09c0235..52497a0 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -45,6 +45,9 @@ Manage quorum device provider on the local host.
- quorum
- Manage cluster quorum settings.
- .TP
-+booth
-+Manage booth (cluster ticket manager).
-+.TP
- status
- View cluster status.
- .TP
-@@ -573,6 +576,55 @@ Cancel waiting for all nodes when establishing quorum.  Useful in situations whe
- .TP
- update [auto_tie_breaker=[0|1]] [last_man_standing=[0|1]] [last_man_standing_window=[<time in ms>]] [wait_for_all=[0|1]]
- Add/Change quorum options.  At least one option must be specified.  Options are documented in corosync's votequorum(5) man page.  Requires the cluster to be stopped.
-+.SS "booth"
-+.TP
-+setup sites <address> <address> [<address>...] [arbitrators <address> ...] [\fB\-\-force\fR]
-+Write new booth configuration with specified sites and arbitrators.  Total number of peers (sites and arbitrators) must be odd.  When the configuration file already exists, command fails unless \fB\-\-force\fR is specified.
-+.TP
-+destroy
-+Remove booth configuration files.
-+.TP
-+ticket add <ticket>
-+Add new ticket to the current configuration.
-+.TP
-+ticket remove <ticket>
-+Remove the specified ticket from the current configuration.
-+.TP
-+config
-+Show booth configuration.
-+.TP
-+create ip <address>
-+Make the cluster run booth service on the specified ip address as a cluster resource.  Typically this is used to run booth site.
-+.TP
-+remove
-+Remove booth resources created by the "pcs booth create" command.
-+.TP
-+ticket grant <ticket> [<site address>]
-+Grant the ticket for the site specified by address.  Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted.
-+.TP
-+ticket revoke <ticket> [<site address>]
-+Revoke the ticket for the site specified by address.  Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted.
-+.TP
-+status
-+Print current status of booth on the local node.
-+.TP
-+pull <node>
-+Pull booth configuration from the specified node.
-+.TP
-+sync [\fB\-\-skip\-offline\fR]
-+Send booth configuration from the local node to all nodes in the cluster.
-+.TP
-+enable
-+Enable booth arbitrator service.
-+.TP
-+disable
-+Disable booth arbitrator service.
-+.TP
-+start
-+Start booth arbitrator service.
-+.TP
-+stop
-+Stop booth arbitrator service.
- .SS "status"
- .TP
- [status] [\fB\-\-full\fR | \fB\-\-hide-inactive\fR]
-diff --git a/pcs/resource.py b/pcs/resource.py
-index a85f46f..66c743c 100644
---- a/pcs/resource.py
-+++ b/pcs/resource.py
-@@ -60,7 +60,10 @@ def resource_cmd(argv):
-             argv, with_clone=True
-         )
-         try:
--            resource_create(res_id, res_type, ra_values, op_values, meta_values, clone_opts)
-+            resource_create(
-+                res_id, res_type, ra_values, op_values, meta_values, clone_opts,
-+                group=utils.pcs_options.get("--group", None)
-+            )
-         except CmdLineInputError as e:
-             utils.exit_on_cmdline_input_errror(e, "resource", 'create')
-     elif (sub_cmd == "move"):
-@@ -437,7 +440,10 @@ def format_desc(indent, desc):
- 
- # Create a resource using cibadmin
- # ra_class, ra_type & ra_provider must all contain valid info
--def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_opts=[]):
-+def resource_create(
-+    ra_id, ra_type, ra_values, op_values, meta_values=[], clone_opts=[],
-+    group=None
-+):
-     if "--wait" in utils.pcs_options:
-         wait_timeout = utils.validate_wait_get_timeout()
-         if "--disabled" in utils.pcs_options:
-@@ -588,7 +594,7 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_
- 
-     if "--clone" in utils.pcs_options or len(clone_opts) > 0:
-         dom, dummy_clone_id = resource_clone_create(dom, [ra_id] + clone_opts)
--        if "--group" in utils.pcs_options:
-+        if group:
-             print("Warning: --group ignored when creating a clone")
-         if "--master" in utils.pcs_options:
-             print("Warning: --master ignored when creating a clone")
-@@ -596,11 +602,10 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_
-         dom, dummy_master_id = resource_master_create(
-             dom, [ra_id] + master_meta_values
-         )
--        if "--group" in utils.pcs_options:
-+        if group:
-             print("Warning: --group ignored when creating a master")
--    elif "--group" in utils.pcs_options:
--        groupname = utils.pcs_options["--group"]
--        dom = resource_group_add(dom, groupname, [ra_id])
-+    elif group:
-+        dom = resource_group_add(dom, group, [ra_id])
- 
-     utils.replace_cib_configuration(dom)
- 
-diff --git a/pcs/settings_default.py b/pcs/settings_default.py
-index 15421fd..86913bf 100644
---- a/pcs/settings_default.py
-+++ b/pcs/settings_default.py
-@@ -41,3 +41,5 @@ nagios_metadata_path = "/usr/share/pacemaker/nagios/plugins-metadata/"
- sbd_watchdog_default = "/dev/watchdog"
- sbd_config = "/etc/sysconfig/sbd"
- pacemaker_wait_timeout_status = 62
-+booth_config_dir = "/etc/booth"
-+booth_binary = "/usr/sbin/booth"
-diff --git a/pcs/stonith.py b/pcs/stonith.py
-index c02f35a..93332ef 100644
---- a/pcs/stonith.py
-+++ b/pcs/stonith.py
-@@ -174,7 +174,8 @@ def stonith_create(argv):
-         utils.process_library_reports(e.args)
- 
-     resource.resource_create(
--        stonith_id, "stonith:" + stonith_type, st_values, op_values, meta_values
-+        stonith_id, "stonith:" + stonith_type, st_values, op_values, meta_values,
-+        group=utils.pcs_options.get("--group", None)
-     )
- 
- def stonith_level(argv):
-diff --git a/pcs/test/resources/.gitignore b/pcs/test/resources/.gitignore
-index 8c710cf..b0434e7 100644
---- a/pcs/test/resources/.gitignore
-+++ b/pcs/test/resources/.gitignore
-@@ -1,2 +1,3 @@
- *.tmp
- temp*.xml
-+temp-*
-diff --git a/pcs/test/resources/tmp_keyfile b/pcs/test/resources/tmp_keyfile
-new file mode 100644
-index 0000000..6b584e8
---- /dev/null
-+++ b/pcs/test/resources/tmp_keyfile
-@@ -0,0 +1 @@
-+content
-\ No newline at end of file
-diff --git a/pcs/test/suite.py b/pcs/test/suite.py
-index 5b29918..b6c7be2 100755
---- a/pcs/test/suite.py
-+++ b/pcs/test/suite.py
-@@ -9,19 +9,12 @@ from __future__ import (
- import sys
- import os.path
- 
--major, minor = sys.version_info[:2]
--if major == 2 and minor == 6:
--    import unittest2 as unittest
--else:
--    import unittest
--
--
- PACKAGE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
-     os.path.abspath(__file__)
- )))
-+sys.path.insert(0, PACKAGE_DIR)
- 
--def put_package_to_path():
--    sys.path.insert(0, PACKAGE_DIR)
-+from pcs.test.tools import pcs_unittest as unittest
- 
- def prepare_test_name(test_name):
-     """
-@@ -65,18 +58,17 @@ def discover_tests(explicitly_enumerated_tests, exclude_enumerated_tests=False):
-     return unittest.TestLoader().loadTestsFromNames(explicitly_enumerated_tests)
- 
- def run_tests(tests, verbose=False, color=False):
--    resultclass = unittest.runner.TextTestResult
-+    resultclass = unittest.TextTestResult
-     if color:
-         from pcs.test.tools.color_text_runner import ColorTextTestResult
-         resultclass = ColorTextTestResult
- 
--    testRunner = unittest.runner.TextTestRunner(
-+    testRunner = unittest.TextTestRunner(
-         verbosity=2 if verbose else 1,
-         resultclass=resultclass
-     )
-     return testRunner.run(tests)
- 
--put_package_to_path()
- explicitly_enumerated_tests = [
-     prepare_test_name(arg) for arg in sys.argv[1:] if arg not in (
-         "-v",
-diff --git a/pcs/test/test_alert.py b/pcs/test/test_alert.py
-index bb61600..f6ea70d 100644
---- a/pcs/test/test_alert.py
-+++ b/pcs/test/test_alert.py
-@@ -7,7 +7,6 @@ from __future__ import (
- )
- 
- import shutil
--import sys
- 
- from pcs.test.tools.misc import (
-     get_test_resource as rc,
-@@ -15,12 +14,7 @@ from pcs.test.tools.misc import (
- )
- from pcs.test.tools.assertions import AssertPcsMixin
- from pcs.test.tools.pcs_runner import PcsRunner
--
--major, minor = sys.version_info[:2]
--if major == 2 and minor == 6:
--    import unittest2 as unittest
--else:
--    import unittest
-+from pcs.test.tools import pcs_unittest as unittest
- 
- 
- old_cib = rc("cib-empty.xml")
-diff --git a/pcs/test/test_booth.py b/pcs/test/test_booth.py
-new file mode 100644
-index 0000000..5ddc06d
---- /dev/null
-+++ b/pcs/test/test_booth.py
-@@ -0,0 +1,342 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+import os
-+import shutil
-+
-+from pcs.test.tools import pcs_unittest as unittest
-+from pcs.test.tools.assertions import AssertPcsMixin, console_report
-+from pcs.test.tools.misc import get_test_resource as rc
-+from pcs.test.tools.pcs_runner import PcsRunner
-+from pcs import settings
-+
-+
-+EMPTY_CIB = rc("cib-empty.xml")
-+TEMP_CIB = rc("temp-cib.xml")
-+
-+BOOTH_CONFIG_FILE = rc("temp-booth.cfg")
-+BOOTH_KEY_FILE = rc("temp-booth.key")
-+
-+BOOTH_RESOURCE_AGENT_INSTALLED = "booth-site" in os.listdir(
-+    os.path.join(settings.ocf_resources, "pacemaker")
-+)
-+need_booth_resource_agent = unittest.skipUnless(
-+    BOOTH_RESOURCE_AGENT_INSTALLED,
-+    "test requires resource agent ocf:pacemaker:booth-site"
-+    " which is not istalled"
-+)
-+
-+
-+def fake_file(command):
-+    return "{0} --booth-conf={1} --booth-key={2}".format(
-+        command,
-+        BOOTH_CONFIG_FILE,
-+        BOOTH_KEY_FILE,
-+    )
-+
-+def ensure_booth_config_exists():
-+    if not os.path.exists(BOOTH_CONFIG_FILE):
-+        with open(BOOTH_CONFIG_FILE, "w") as config_file:
-+            config_file.write("")
-+
-+def ensure_booth_config_not_exists():
-+    if os.path.exists(BOOTH_CONFIG_FILE):
-+        os.remove(BOOTH_CONFIG_FILE)
-+    if os.path.exists(BOOTH_KEY_FILE):
-+        os.remove(BOOTH_KEY_FILE)
-+
-+class BoothMixin(AssertPcsMixin):
-+    def setUp(self):
-+        shutil.copy(EMPTY_CIB, TEMP_CIB)
-+        self.pcs_runner = PcsRunner(TEMP_CIB)
-+
-+    def assert_pcs_success(self, command, *args, **kwargs):
-+        return super(BoothMixin, self).assert_pcs_success(
-+            fake_file(command), *args, **kwargs
-+        )
-+
-+    def assert_pcs_fail(self, command, *args, **kwargs):
-+        return super(BoothMixin, self).assert_pcs_fail(
-+            fake_file(command), *args, **kwargs
-+        )
-+
-+    def assert_pcs_fail_original(self, *args, **kwargs):
-+        return super(BoothMixin, self).assert_pcs_fail(*args, **kwargs)
-+
-+class SetupTest(BoothMixin, unittest.TestCase):
-+    def test_sucess_setup_booth_config(self):
-+        ensure_booth_config_not_exists()
-+        self.assert_pcs_success(
-+            "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3"
-+        )
-+        self.assert_pcs_success(
-+            "booth config",
-+            stdout_full=console_report(
-+                "site = 1.1.1.1",
-+                "site = 2.2.2.2",
-+                "arbitrator = 3.3.3.3",
-+                "authfile = {0}".format(BOOTH_KEY_FILE),
-+            )
-+        )
-+        with open(BOOTH_KEY_FILE) as key_file:
-+            self.assertEqual(64, len(key_file.read()))
-+
-+
-+    def test_fail_when_config_exists_already(self):
-+        ensure_booth_config_exists()
-+        try:
-+            self.assert_pcs_fail(
-+                "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3",
-+                (
-+                    "Error: booth config file {0} already exists, use --force"
-+                    " to override\n"
-+                ).format(BOOTH_CONFIG_FILE)
-+            )
-+        finally:
-+            if os.path.exists(BOOTH_CONFIG_FILE):
-+                os.remove(BOOTH_CONFIG_FILE)
-+
-+    def test_warn_when_config_file_exists_already_but_is_forced(self):
-+        ensure_booth_config_exists()
-+        self.assert_pcs_success(
-+            "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3 --force",
-+            stdout_full=[
-+                "Warning: booth config file"
-+                    " {0} already exists".format(BOOTH_CONFIG_FILE)
-+                ,
-+                "Warning: booth key file"
-+                    " {0} already exists".format(BOOTH_KEY_FILE)
-+                ,
-+            ]
-+        )
-+        ensure_booth_config_not_exists()
-+
-+
-+    def test_fail_on_multiple_reasons(self):
-+        self.assert_pcs_fail(
-+            "booth setup sites 1.1.1.1 arbitrators 1.1.1.1 2.2.2.2 3.3.3.3",
-+            console_report(
-+                "Error: lack of sites for booth configuration (need 2 at least)"
-+                    ": sites 1.1.1.1"
-+                ,
-+                "Error: odd number of peers is required (entered 4 peers)",
-+                "Error: duplicate address for booth configuration: 1.1.1.1",
-+            )
-+        )
-+
-+    def test_refuse_partialy_mocked_environment(self):
-+        self.assert_pcs_fail_original(
-+            "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3"
-+                " --booth-conf=/some/file" #no --booth-key!
-+            ,
-+            "Error: With --booth-conf must be specified --booth-key as well\n"
-+        )
-+        self.assert_pcs_fail_original(
-+            "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3"
-+                " --booth-key=/some/file" #no --booth-conf!
-+            ,
-+            "Error: With --booth-key must be specified --booth-conf as well\n"
-+        )
-+
-+    def test_show_usage_when_no_site_specified(self):
-+        self.assert_pcs_fail("booth setup arbitrators 3.3.3.3", stdout_start=[
-+            "",
-+            "Usage: pcs booth <command>"
-+        ])
-+
-+
-+class DestroyTest(BoothMixin, unittest.TestCase):
-+    def test_failed_when_using_mocked_booth_env(self):
-+        self.assert_pcs_fail(
-+            "booth destroy",
-+            "Error: This command does not support --booth-conf, --booth-key\n"
-+        )
-+
-+    @need_booth_resource_agent
-+    def test_failed_when_booth_in_cib(self):
-+        ensure_booth_config_not_exists()
-+        name = " --name=some-weird-booth-name"
-+        self.assert_pcs_success(
-+            "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3" + name
-+        )
-+        self.assert_pcs_success("booth create ip 1.1.1.1" + name)
-+        self.assert_pcs_fail_original(
-+            "booth destroy" + name,
-+            #If there is booth@some-weird-booth-name in systemd (enabled or
-+            #started) the message continue with it because destroy command works
-+            #always on live environment. "Cleaner" solution takes more effort
-+            #than what it's worth
-+            stdout_start=(
-+                "Error: booth instance 'some-weird-booth-name' is used in"
-+                " cluster resource\n"
-+            ),
-+        )
-+
-+class BoothTest(unittest.TestCase, BoothMixin):
-+    def setUp(self):
-+        shutil.copy(EMPTY_CIB, TEMP_CIB)
-+        self.pcs_runner = PcsRunner(TEMP_CIB)
-+        ensure_booth_config_not_exists()
-+        self.assert_pcs_success(
-+            "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3"
-+        )
-+
-+class AddTicketTest(BoothTest):
-+    def test_success_add_ticket(self):
-+        self.assert_pcs_success("booth ticket add TicketA")
-+        self.assert_pcs_success("booth config", stdout_full=console_report(
-+            "site = 1.1.1.1",
-+            "site = 2.2.2.2",
-+            "arbitrator = 3.3.3.3",
-+            "authfile = {0}".format(BOOTH_KEY_FILE),
-+            'ticket = "TicketA"',
-+        ))
-+
-+    def test_fail_on_bad_ticket_name(self):
-+        self.assert_pcs_fail(
-+            "booth ticket add @TicketA",
-+            "Error: booth ticket name '@TicketA' is not valid, use alphanumeric"
-+            " chars or dash\n"
-+        )
-+
-+    def test_fail_on_duplicit_ticket_name(self):
-+        self.assert_pcs_success("booth ticket add TicketA")
-+        self.assert_pcs_fail(
-+            "booth ticket add TicketA",
-+            "Error: booth ticket name 'TicketA' already exists in configuration"
-+            "\n"
-+        )
-+
-+class RemoveTicketTest(BoothTest):
-+    def test_success_remove_ticket(self):
-+        self.assert_pcs_success("booth ticket add TicketA")
-+        self.assert_pcs_success("booth config", stdout_full=console_report(
-+            "site = 1.1.1.1",
-+            "site = 2.2.2.2",
-+            "arbitrator = 3.3.3.3",
-+            "authfile = {0}".format(BOOTH_KEY_FILE),
-+            'ticket = "TicketA"',
-+        ))
-+        self.assert_pcs_success("booth ticket remove TicketA")
-+        self.assert_pcs_success("booth config", stdout_full=console_report(
-+            "site = 1.1.1.1",
-+            "site = 2.2.2.2",
-+            "arbitrator = 3.3.3.3",
-+            "authfile = {0}".format(BOOTH_KEY_FILE),
-+        ))
-+
-+    def test_fail_when_ticket_does_not_exist(self):
-+        self.assert_pcs_fail(
-+            "booth ticket remove TicketA",
-+            "Error: booth ticket name 'TicketA' does not exist\n"
-+        )
-+
-+@need_booth_resource_agent
-+class CreateTest(BoothTest):
-+    def test_sucessfully_create_booth_resource_group(self):
-+        self.assert_pcs_success("resource show", "NO resources configured\n")
-+        self.assert_pcs_success("booth create ip 192.168.122.120")
-+        self.assert_pcs_success("resource show", [
-+             " Resource Group: booth-booth-group",
-+             "     booth-booth-ip	(ocf::heartbeat:IPaddr2):	Stopped",
-+             "     booth-booth-service	(ocf::pacemaker:booth-site):	Stopped",
-+        ])
-+        self.assert_pcs_success("resource show booth-booth-ip", [
-+             " Resource: booth-booth-ip (class=ocf provider=heartbeat type=IPaddr2)",
-+             "  Attributes: ip=192.168.122.120",
-+             "  Operations: start interval=0s timeout=20s (booth-booth-ip-start-interval-0s)",
-+             "              stop interval=0s timeout=20s (booth-booth-ip-stop-interval-0s)",
-+             "              monitor interval=10s timeout=20s (booth-booth-ip-monitor-interval-10s)",
-+        ])
-+
-+    def test_refuse_create_booth_when_config_is_already_in_use(self):
-+        self.assert_pcs_success("booth create ip 192.168.122.120")
-+        self.assert_pcs_fail("booth create ip 192.168.122.121", [
-+            "Error: booth instance 'booth' is already created as cluster"
-+                " resource"
-+        ])
-+
-+@need_booth_resource_agent
-+class RemoveTest(BoothTest):
-+    def test_failed_when_no_booth_configuration_created(self):
-+        self.assert_pcs_success("resource show", "NO resources configured\n")
-+        self.assert_pcs_fail("booth remove", [
-+            "Error: booth instance 'booth' not found in cib"
-+        ])
-+
-+    def test_failed_when_multiple_booth_configuration_created(self):
-+        self.assert_pcs_success("resource show", "NO resources configured\n")
-+        self.assert_pcs_success("booth create ip 192.168.122.120")
-+        self.assert_pcs_success(
-+            "resource create some-id ocf:pacemaker:booth-site"
-+            " config=/etc/booth/booth.conf"
-+        )
-+        self.assert_pcs_success("resource show", [
-+             " Resource Group: booth-booth-group",
-+             "     booth-booth-ip	(ocf::heartbeat:IPaddr2):	Stopped",
-+             "     booth-booth-service	(ocf::pacemaker:booth-site):	Stopped",
-+             " some-id	(ocf::pacemaker:booth-site):	Stopped",
-+        ])
-+        self.assert_pcs_fail("booth remove", [
-+            "Error: found more than one booth instance 'booth' in cib, use"
-+            " --force to override"
-+        ])
-+
-+
-+    def test_remove_added_booth_configuration(self):
-+        self.assert_pcs_success("resource show", "NO resources configured\n")
-+        self.assert_pcs_success("booth create ip 192.168.122.120")
-+        self.assert_pcs_success("resource show", [
-+             " Resource Group: booth-booth-group",
-+             "     booth-booth-ip	(ocf::heartbeat:IPaddr2):	Stopped",
-+             "     booth-booth-service	(ocf::pacemaker:booth-site):	Stopped",
-+        ])
-+        self.assert_pcs_success("booth remove", [
-+            "Deleting Resource - booth-booth-ip",
-+            "Deleting Resource (and group) - booth-booth-service",
-+        ])
-+        self.assert_pcs_success("resource show", "NO resources configured\n")
-+
-+    def test_fail_when_booth_is_not_currently_configured(self):
-+        pass
-+
-+class TicketGrantTest(BoothTest):
-+    def test_failed_when_implicit_site_but_not_correct_confgiuration_in_cib(
-+        self
-+    ):
-+        self.assert_pcs_success("booth ticket add T1")
-+        #no resource in cib
-+        self.assert_pcs_fail("booth ticket grant T1", [
-+            "Error: cannot determine local site ip, please specify site"
-+                " parameter"
-+            ,
-+        ])
-+
-+class TicketRevokeTest(BoothTest):
-+    def test_failed_when_implicit_site_but_not_correct_confgiuration_in_cib(
-+        self
-+    ):
-+        self.assert_pcs_success("booth ticket add T1")
-+        #no resource in cib
-+        self.assert_pcs_fail("booth ticket revoke T1", [
-+            "Error: cannot determine local site ip, please specify site"
-+                " parameter"
-+            ,
-+        ])
-+
-+class ConfigTest(unittest.TestCase, BoothMixin):
-+    def setUp(self):
-+        shutil.copy(EMPTY_CIB, TEMP_CIB)
-+        self.pcs_runner = PcsRunner(TEMP_CIB)
-+    def test_fail_when_config_file_do_not_exists(self):
-+        ensure_booth_config_not_exists()
-+        self.assert_pcs_fail(
-+            "booth config",
-+            "Error: Booth config file '{0}' does not exist\n".format(
-+                BOOTH_CONFIG_FILE
-+            )
-+        )
-diff --git a/pcs/test/test_lib_cib_tools.py b/pcs/test/test_lib_cib_tools.py
-index e1f2313..10f8a96 100644
---- a/pcs/test/test_lib_cib_tools.py
-+++ b/pcs/test/test_lib_cib_tools.py
-@@ -136,6 +136,27 @@ class GetConstraintsTest(CibToolsTest):
-             ),
-         )
- 
-+class GetResourcesTest(CibToolsTest):
-+    def test_success_if_exists(self):
-+        self.assertEqual(
-+            "resources",
-+            lib.get_resources(self.cib.tree).tag
-+        )
-+
-+    def test_raise_if_missing(self):
-+        for section in self.cib.tree.findall(".//configuration/resources"):
-+            section.getparent().remove(section)
-+        assert_raise_library_error(
-+            lambda: lib.get_resources(self.cib.tree),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_CANNOT_FIND_MANDATORY_SECTION,
-+                {
-+                    "section": "configuration/resources",
-+                }
-+            ),
-+        )
-+
- 
- class GetAclsTest(CibToolsTest):
-     def setUp(self):
-diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py
-index 929a50d..a4ec0f9 100644
---- a/pcs/test/test_lib_external.py
-+++ b/pcs/test/test_lib_external.py
-@@ -1068,6 +1068,25 @@ class DisableServiceTest(TestCase):
-         lib.disable_service(self.mock_runner, self.service)
-         self.assertEqual(self.mock_runner.run.call_count, 0)
- 
-+    def test_instance_systemctl(self, mock_systemctl):
-+        mock_systemctl.return_value = True
-+        self.mock_runner.run.return_value = ("", 0)
-+        lib.disable_service(self.mock_runner, self.service, instance="test")
-+        self.mock_runner.run.assert_called_once_with([
-+            "systemctl",
-+            "disable",
-+            "{0}@{1}.service".format(self.service, "test")
-+        ])
-+
-+    @mock.patch("pcs.lib.external.is_service_installed")
-+    def test_instance_not_systemctl(self, mock_is_installed, mock_systemctl):
-+        mock_is_installed.return_value = True
-+        mock_systemctl.return_value = False
-+        self.mock_runner.run.return_value = ("", 0)
-+        lib.disable_service(self.mock_runner, self.service, instance="test")
-+        self.mock_runner.run.assert_called_once_with(
-+            ["chkconfig", self.service, "off"]
-+        )
- 
- @mock.patch("pcs.lib.external.is_systemctl")
- class EnableServiceTest(TestCase):
-@@ -1113,6 +1132,24 @@ class EnableServiceTest(TestCase):
-             ["chkconfig", self.service, "on"]
-         )
- 
-+    def test_instance_systemctl(self, mock_systemctl):
-+        mock_systemctl.return_value = True
-+        self.mock_runner.run.return_value = ("", 0)
-+        lib.enable_service(self.mock_runner, self.service, instance="test")
-+        self.mock_runner.run.assert_called_once_with([
-+            "systemctl",
-+            "enable",
-+            "{0}@{1}.service".format(self.service, "test")
-+        ])
-+
-+    def test_instance_not_systemctl(self, mock_systemctl):
-+        mock_systemctl.return_value = False
-+        self.mock_runner.run.return_value = ("", 0)
-+        lib.enable_service(self.mock_runner, self.service, instance="test")
-+        self.mock_runner.run.assert_called_once_with(
-+            ["chkconfig", self.service, "on"]
-+        )
-+
- 
- @mock.patch("pcs.lib.external.is_systemctl")
- class StartServiceTest(TestCase):
-@@ -1158,6 +1195,22 @@ class StartServiceTest(TestCase):
-             ["service", self.service, "start"]
-         )
- 
-+    def test_instance_systemctl(self, mock_systemctl):
-+        mock_systemctl.return_value = True
-+        self.mock_runner.run.return_value = ("", 0)
-+        lib.start_service(self.mock_runner, self.service, instance="test")
-+        self.mock_runner.run.assert_called_once_with([
-+            "systemctl", "start", "{0}@{1}.service".format(self.service, "test")
-+        ])
-+
-+    def test_instance_not_systemctl(self, mock_systemctl):
-+        mock_systemctl.return_value = False
-+        self.mock_runner.run.return_value = ("", 0)
-+        lib.start_service(self.mock_runner, self.service, instance="test")
-+        self.mock_runner.run.assert_called_once_with(
-+            ["service", self.service, "start"]
-+        )
-+
- 
- @mock.patch("pcs.lib.external.is_systemctl")
- class StopServiceTest(TestCase):
-@@ -1203,6 +1256,22 @@ class StopServiceTest(TestCase):
-             ["service", self.service, "stop"]
-         )
- 
-+    def test_instance_systemctl(self, mock_systemctl):
-+        mock_systemctl.return_value = True
-+        self.mock_runner.run.return_value = ("", 0)
-+        lib.stop_service(self.mock_runner, self.service, instance="test")
-+        self.mock_runner.run.assert_called_once_with([
-+            "systemctl", "stop", "{0}@{1}.service".format(self.service, "test")
-+        ])
-+
-+    def test_instance_not_systemctl(self, mock_systemctl):
-+        mock_systemctl.return_value = False
-+        self.mock_runner.run.return_value = ("", 0)
-+        lib.stop_service(self.mock_runner, self.service, instance="test")
-+        self.mock_runner.run.assert_called_once_with(
-+            ["service", self.service, "stop"]
-+        )
-+
- 
- class KillServicesTest(TestCase):
-     def setUp(self):
-@@ -1470,3 +1539,20 @@ pacemaker      	0:off	1:off	2:off	3:off	4:off	5:off	6:off
-         self.assertEqual(lib.get_non_systemd_services(self.mock_runner), [])
-         self.assertEqual(mock_is_systemctl.call_count, 1)
-         self.assertEqual(self.mock_runner.call_count, 0)
-+
-+@mock.patch("pcs.lib.external.is_systemctl")
-+class EnsureIsSystemctlTest(TestCase):
-+    def test_systemd(self, mock_is_systemctl):
-+        mock_is_systemctl.return_value = True
-+        lib.ensure_is_systemd()
-+
-+    def test_not_systemd(self, mock_is_systemctl):
-+        mock_is_systemctl.return_value = False
-+        assert_raise_library_error(
-+            lib.ensure_is_systemd,
-+            (
-+                severity.ERROR,
-+                report_codes.UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS,
-+                {}
-+            )
-+        )
-diff --git a/pcs/test/tools/color_text_runner.py b/pcs/test/tools/color_text_runner.py
-index 78a0787..b8383f6 100644
---- a/pcs/test/tools/color_text_runner.py
-+++ b/pcs/test/tools/color_text_runner.py
-@@ -5,12 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--import sys
--major, minor = sys.version_info[:2]
--if major == 2 and minor == 6:
--    import unittest2 as unittest
--else:
--    import unittest
-+from pcs.test.tools import pcs_unittest as unittest
- 
- 
- palete = {
-@@ -37,7 +32,7 @@ palete = {
- def apply(key_list, text):
-     return("".join([palete[key] for key in key_list]) + text + palete["end"])
- 
--TextTestResult = unittest.runner.TextTestResult
-+TextTestResult = unittest.TextTestResult
- #pylint: disable=bad-super-call
- class ColorTextTestResult(TextTestResult):
-     def addSuccess(self, test):
-diff --git a/pcs/test/tools/pcs_unittest.py b/pcs/test/tools/pcs_unittest.py
-new file mode 100644
-index 0000000..4a3205d
---- /dev/null
-+++ b/pcs/test/tools/pcs_unittest.py
-@@ -0,0 +1,7 @@
-+import sys
-+major, minor = sys.version_info[:2]
-+if major == 2 and minor == 6:
-+    from unittest2 import *
-+else:
-+    from unittest import *
-+del major, minor, sys
-diff --git a/pcs/usage.py b/pcs/usage.py
-index ef60b64..baa70d0 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -21,6 +21,7 @@ def full_usage():
-     out += strip_extras(acl([],False))
-     out += strip_extras(qdevice([],False))
-     out += strip_extras(quorum([],False))
-+    out += strip_extras(booth([],False))
-     out += strip_extras(status([],False))
-     out += strip_extras(config([],False))
-     out += strip_extras(pcsd([],False))
-@@ -167,6 +168,7 @@ Commands:
-     acl         Set pacemaker access control lists.
-     qdevice     Manage quorum device provider.
-     quorum      Manage cluster quorum settings.
-+    booth       Manage booth (cluster ticket manager).
-     status      View cluster status.
-     config      View and manage cluster configuration.
-     pcsd        Manage pcs daemon.
-@@ -1407,6 +1409,75 @@ Commands:
-     else:
-         return output
- 
-+def booth(args=[], pout=True):
-+    output = """
-+Usage: pcs booth <command>
-+Manage booth (cluster ticket manager)
-+
-+Commands:
-+    setup sites <address> <address> [<address>...] [arbitrators <address> ...]
-+            [--force]
-+        Write new booth configuration with specified sites and arbitrators.
-+        Total number of peers (sites and arbitrators) must be odd.  When
-+        the configuration file already exists, command fails unless --force
-+        is specified.
-+
-+    destroy
-+        Remove booth configuration files.
-+
-+    ticket add <ticket>
-+        Add new ticket to the current configuration.
-+
-+    ticket remove <ticket>
-+        Remove the specified ticket from the current configuration.
-+
-+    config
-+        Show booth configuration.
-+
-+    create ip <address>
-+        Make the cluster run booth service on the specified ip address as
-+        a cluster resource.  Typically this is used to run booth site.
-+
-+    remove
-+        Remove booth resources created by the "pcs booth create" command.
-+
-+    ticket grant <ticket> [<site address>]
-+        Grant the ticket for the site specified by address.  Site address which
-+        has been specified with 'pcs booth create' command is used if
-+        'site address' is omitted.
-+
-+    ticket revoke <ticket> [<site address>]
-+        Revoke the ticket for the site specified by address.  Site address which
-+        has been specified with 'pcs booth create' command is used if
-+        'site address' is omitted.
-+
-+    status
-+        Print current status of booth on the local node.
-+
-+    pull <node>
-+        Pull booth configuration from the specified node.
-+
-+    sync [--skip-offline]
-+        Send booth configuration from the local node to all nodes
-+        in the cluster.
-+
-+    enable
-+        Enable booth arbitrator service.
-+
-+    disable
-+        Disable booth arbitrator service.
-+
-+    start
-+        Start booth arbitrator service.
-+
-+    stop
-+        Stop booth arbitrator service.
-+"""
-+    if pout:
-+        print(sub_usage(args, output))
-+    else:
-+        return output
-+
- 
- def alert(args=[], pout=True):
-     output = """
-@@ -1460,6 +1531,7 @@ def show(main_usage_name, rest_usage_names):
-         "property": property,
-         "qdevice": qdevice,
-         "quorum": quorum,
-+        "booth": booth,
-         "resource": resource,
-         "status": status,
-         "stonith": stonith,
-diff --git a/pcs/utils.py b/pcs/utils.py
-index 25274dc..8b2cf7c 100644
---- a/pcs/utils.py
-+++ b/pcs/utils.py
-@@ -25,35 +25,6 @@ import base64
- import threading
- import logging
- 
--try:
--    # python2
--    from urllib import urlencode as urllib_urlencode
--except ImportError:
--    # python3
--    from urllib.parse import urlencode as urllib_urlencode
--try:
--    # python2
--    from urllib2 import (
--        build_opener as urllib_build_opener,
--        install_opener as urllib_install_opener,
--        HTTPCookieProcessor as urllib_HTTPCookieProcessor,
--        HTTPSHandler as urllib_HTTPSHandler,
--        HTTPError as urllib_HTTPError,
--        URLError as urllib_URLError
--    )
--except ImportError:
--    # python3
--    from urllib.request import (
--        build_opener as urllib_build_opener,
--        install_opener as urllib_install_opener,
--        HTTPCookieProcessor as urllib_HTTPCookieProcessor,
--        HTTPSHandler as urllib_HTTPSHandler
--    )
--    from urllib.error import (
--        HTTPError as urllib_HTTPError,
--        URLError as urllib_URLError
--    )
--
- 
- from pcs import settings, usage
- from pcs.cli.common.reports import (
-@@ -89,6 +60,40 @@ from pcs.lib.pacemaker_values import(
- from pcs.cli.common import middleware
- from pcs.cli.common.env import Env
- from pcs.cli.common.lib_wrapper import Library
-+from pcs.cli.booth.command import DEFAULT_BOOTH_NAME
-+import pcs.cli.booth.env
-+
-+
-+try:
-+    # python2
-+    from urllib import urlencode as urllib_urlencode
-+except ImportError:
-+    # python3
-+    from urllib.parse import urlencode as urllib_urlencode
-+try:
-+    # python2
-+    from urllib2 import (
-+        build_opener as urllib_build_opener,
-+        install_opener as urllib_install_opener,
-+        HTTPCookieProcessor as urllib_HTTPCookieProcessor,
-+        HTTPSHandler as urllib_HTTPSHandler,
-+        HTTPError as urllib_HTTPError,
-+        URLError as urllib_URLError
-+    )
-+except ImportError:
-+    # python3
-+    from urllib.request import (
-+        build_opener as urllib_build_opener,
-+        install_opener as urllib_install_opener,
-+        HTTPCookieProcessor as urllib_HTTPCookieProcessor,
-+        HTTPSHandler as urllib_HTTPSHandler
-+    )
-+    from urllib.error import (
-+        HTTPError as urllib_HTTPError,
-+        URLError as urllib_URLError
-+    )
-+
-+
- 
- 
- PYTHON2 = sys.version[0] == "2"
-@@ -2691,6 +2696,11 @@ def get_middleware_factory():
-         cib=middleware.cib(usefile, get_cib, replace_cib_configuration),
-         corosync_conf_existing=middleware.corosync_conf_existing(
-             pcs_options.get("--corosync_conf", None)
-+        ),
-+        booth_conf=pcs.cli.booth.env.middleware_config(
-+            pcs_options.get("--name", DEFAULT_BOOTH_NAME),
-+            pcs_options.get("--booth-conf", None),
-+            pcs_options.get("--booth-key", None),
-         )
-     )
- 
-diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
-index 553a20c..d46cd62 100644
---- a/pcsd/pcs.rb
-+++ b/pcsd/pcs.rb
-@@ -8,6 +8,7 @@ require 'net/https'
- require 'json'
- require 'fileutils'
- require 'backports'
-+require 'base64'
- 
- require 'config.rb'
- require 'cfgsync.rb'
-@@ -19,6 +20,9 @@ require 'auth.rb'
- class NotImplementedException < NotImplementedError
- end
- 
-+class InvalidFileNameException < NameError
-+end
-+
- def getAllSettings(auth_user, cib_dom=nil)
-   unless cib_dom
-     cib_dom = get_cib_dom(auth_user)
-@@ -1357,10 +1361,10 @@ def pcsd_restart_nodes(auth_user, nodes)
-   }
- end
- 
--def write_file_lock(path, perm, data)
-+def write_file_lock(path, perm, data, binary=false)
-+  file = nil
-   begin
--    file = nil
--    file = File.open(path, 'w', perm)
-+    file = File.open(path, binary ? 'wb' : 'w', perm)
-     file.flock(File::LOCK_EX)
-     file.write(data)
-   rescue => e
-@@ -1374,6 +1378,23 @@ def write_file_lock(path, perm, data)
-   end
- end
- 
-+def read_file_lock(path, binary=false)
-+  file = nil
-+  begin
-+    file = File.open(path, binary ? 'rb' : 'r')
-+    file.flock(File::LOCK_SH)
-+    return file.read()
-+  rescue => e
-+    $logger.error("Cannot read file '#{path}': #{e.message}")
-+    raise
-+  ensure
-+    unless file.nil?
-+      file.flock(File::LOCK_UN)
-+      file.close()
-+    end
-+  end
-+end
-+
- def verify_cert_key_pair(cert, key)
-   errors = []
-   cert_modulus = nil
-@@ -2028,3 +2049,52 @@ def get_parsed_local_sbd_config()
-     return nil
-   end
- end
-+
-+def write_booth_config(config, data)
-+  if config.include?('/')
-+    raise InvalidFileNameException.new(config)
-+  end
-+  write_file_lock(File.join(BOOTH_CONFIG_DIR, config), nil, data)
-+end
-+
-+def read_booth_config(config)
-+  if config.include?('/')
-+    raise InvalidFileNameException.new(config)
-+  end
-+  config_path = File.join(BOOTH_CONFIG_DIR, config)
-+  unless File.file?(config_path)
-+    return nil
-+  end
-+  return read_file_lock(config_path)
-+end
-+
-+def write_booth_authfile(filename, data)
-+  if filename.include?('/')
-+    raise InvalidFileNameException.new(filename)
-+  end
-+  write_file_lock(
-+    File.join(BOOTH_CONFIG_DIR, filename), 0600, Base64.decode64(data), true
-+  )
-+end
-+
-+def read_booth_authfile(filename)
-+  if filename.include?('/')
-+    raise InvalidFileNameException.new(filename)
-+  end
-+  return Base64.strict_encode64(
-+    read_file_lock(File.join(BOOTH_CONFIG_DIR, filename), true)
-+  )
-+end
-+
-+def get_authfile_from_booth_config(config_data)
-+  authfile_path = nil
-+  config_data.split("\n").each {|line|
-+    if line.include?('=')
-+      parts = line.split('=', 2)
-+      if parts[0].strip == 'authfile'
-+        authfile_path = parts[1].strip
-+      end
-+    end
-+  }
-+  return authfile_path
-+end
-diff --git a/pcsd/remote.rb b/pcsd/remote.rb
-index ebf425c..134ac5d 100644
---- a/pcsd/remote.rb
-+++ b/pcsd/remote.rb
-@@ -83,6 +83,10 @@ def remote(params, request, auth_user)
-       :qdevice_client_disable => method(:qdevice_client_disable),
-       :qdevice_client_start => method(:qdevice_client_start),
-       :qdevice_client_stop => method(:qdevice_client_stop),
-+      :booth_set_config => method(:booth_set_config),
-+      :booth_save_files => method(:booth_save_files),
-+      :booth_get_config => method(:booth_get_config),
-+
-   }
-   remote_cmd_with_pacemaker = {
-       :pacemaker_node_status => method(:remote_pacemaker_node_status),
-@@ -2677,3 +2681,143 @@ def unmanage_resource(param, request, auth_user)
-     return [400, 'Invalid input data format']
-   end
- end
-+
-+def booth_set_config(params, request, auth_user)
-+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
-+    return 403, 'Permission denied'
-+  end
-+  begin
-+    unless params[:data_json]
-+      return [400, "Missing required parameter 'data_json'"]
-+    end
-+    data = JSON.parse(params[:data_json], {:symbolize_names => true})
-+  rescue JSON::ParserError
-+    return [400, 'Invalid input data format']
-+  end
-+  config = data[:config]
-+  authfile = data[:authfile]
-+  return [400, 'Invalid input data format'] unless (
-+    config and config[:name] and config[:data]
-+  )
-+  return [400, 'Invalid input data format'] if (
-+    authfile and (not authfile[:name] or not authfile[:data])
-+  )
-+  begin
-+    write_booth_config(config[:name], config[:data])
-+    if authfile
-+      write_booth_authfile(authfile[:name], authfile[:data])
-+    end
-+  rescue InvalidFileNameException => e
-+    return [400, "Invalid format of config/key file name '#{e.message}'"]
-+  rescue => e
-+    msg = "Unable to save booth configuration: #{e.message}"
-+    $logger.error(msg)
-+    return [400, msg]
-+  end
-+  msg = 'Booth configuration saved.'
-+  $logger.info(msg)
-+  return [200, msg]
-+end
-+
-+def booth_save_files(params, request, auth_user)
-+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
-+    return 403, 'Permission denied'
-+  end
-+  begin
-+    data = JSON.parse(params[:data_json], {:symbolize_names => true})
-+    data.each { |file|
-+      unless file[:name] and file[:data]
-+        return [400, 'Invalid input data format']
-+      end
-+      if file[:name].include?('/')
-+        return [400, "Invalid file name format '#{file[:name]}'"]
-+      end
-+    }
-+  rescue JSON::ParserError, NoMethodError
-+    return [400, 'Invalid input data format']
-+  end
-+  rewrite_existing = (
-+  params.include?('rewrite_existing') || params.include?(:rewrite_existing)
-+  )
-+
-+  conflict_files = []
-+  data.each { |file|
-+    next unless File.file?(File.join(BOOTH_CONFIG_DIR, file[:name]))
-+    if file[:is_authfile]
-+      cur_data = read_booth_authfile(file[:name])
-+    else
-+      cur_data = read_booth_config(file[:name])
-+    end
-+    if cur_data != file[:data]
-+      conflict_files << file[:name]
-+    end
-+  }
-+
-+  write_failed = {}
-+  saved_files = []
-+  data.each { |file|
-+    next if conflict_files.include?(file[:name]) and not rewrite_existing
-+    begin
-+      if file[:is_authfile]
-+        write_booth_authfile(file[:name], file[:data])
-+      else
-+        write_booth_config(file[:name], file[:data])
-+      end
-+      saved_files << file[:name]
-+    rescue => e
-+      msg = "Unable to save file (#{file[:name]}): #{e.message}"
-+      $logger.error(msg)
-+      write_failed[file[:name]] = e
-+    end
-+  }
-+  return [200, JSON.generate({
-+    :existing => conflict_files,
-+    :saved => saved_files,
-+    :failed => write_failed
-+  })]
-+end
-+
-+def booth_get_config(params, request, auth_user)
-+  unless allowed_for_local_cluster(auth_user, Permissions::READ)
-+    return 403, 'Permission denied'
-+  end
-+  name = params[:name]
-+  if name
-+    config_file_name = "#{name}.conf"
-+  else
-+    config_file_name = 'booth.conf'
-+  end
-+  if config_file_name.include?('/')
-+    return [400, 'Invalid name of booth configuration']
-+  end
-+  begin
-+    config_data = read_booth_config(config_file_name)
-+    unless config_data
-+      return [400, "Config doesn't exist"]
-+    end
-+    authfile_name = nil
-+    authfile_data = nil
-+    authfile_path = get_authfile_from_booth_config(config_data)
-+    if authfile_path
-+      if File.dirname(authfile_path) != BOOTH_CONFIG_DIR
-+        return [
-+          400, "Authfile of specified config is not in '#{BOOTH_CONFIG_DIR}'"
-+        ]
-+      end
-+      authfile_name = File.basename(authfile_path)
-+      authfile_data = read_booth_authfile(authfile_name)
-+    end
-+    return [200, JSON.generate({
-+      :config => {
-+        :name => config_file_name,
-+        :data => config_data
-+      },
-+      :authfile => {
-+        :name => authfile_name,
-+        :data => authfile_data
-+      }
-+    })]
-+  rescue => e
-+    return [400, "Unable to read booth config/key file: #{e.message}"]
-+  end
-+end
-diff --git a/pcsd/settings.rb b/pcsd/settings.rb
-index 51f00ac..e702585 100644
---- a/pcsd/settings.rb
-+++ b/pcsd/settings.rb
-@@ -20,6 +20,7 @@ PACEMAKERD = "/usr/sbin/pacemakerd"
- CIBADMIN = "/usr/sbin/cibadmin"
- SBD_CONFIG = '/etc/sysconfig/sbd'
- CIB_PATH='/var/lib/pacemaker/cib/cib.xml'
-+BOOTH_CONFIG_DIR='/etc/booth'
- 
- COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR = "/etc/corosync/qnetd/nssdb"
- COROSYNC_QDEVICE_NET_SERVER_CA_FILE = (
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1308514-02-booth-support-improvements.patch b/SOURCES/bz1308514-02-booth-support-improvements.patch
deleted file mode 100644
index 9cb5f3c..0000000
--- a/SOURCES/bz1308514-02-booth-support-improvements.patch
+++ /dev/null
@@ -1,1904 +0,0 @@
-From 798a8ab276fb816c3d9cfa5ba0a8ed55a3ed6cd2 Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Mon, 29 Aug 2016 15:14:25 +0200
-Subject: [PATCH] squash bz1308514 Wider support for booth configura
-
-50fb38db5e26 append a new line at the end of the booth config
-
-a03b98c0f9e1 add bash completion for booth
-
-52b97fa9ef32 clean up ip resource if creating booth res. fails
-
-3d0e698a83fc fix allow force remove multiple booth resources
-
-1ac88efab2cd refactor booth remove
-
-6b41c5cc1661 add booth restart command
-
-706c6f32f172 fix usage: ticket grant/revoke not for arbitrator
-
-26fa2a241227 complete man (stayed behind usage)
-
-75f8da852641 modify exchange format of booth config
-
-ffe6ec7ea8d2 show all booth config lines including unsupported
-
-8722fb7ede2e add support for options during add booth ticket
-
-50eb49a4350b fix naming of booth reportes and report codes according to convetions
-
-6dfb7c82d802 simplify booth config distribution reports
-
-116a7a311cd7 fix adding node to cluster when booth is not installed
-
-23abb122e2d1 fix getting a list of existing booth config files
-
-ebd9fc496e24 display booth config as it is (plain, not parsed)
-
-0183814dd57a add ability to display booth config from a remote node
----
- pcs/booth.py                                |   6 +-
- pcs/cli/booth/command.py                    |  72 ++++++++++--------
- pcs/cli/booth/test/test_command.py          |  21 ++++--
- pcs/cli/common/lib_wrapper.py               |   3 +-
- pcs/common/report_codes.py                  |   9 +--
- pcs/lib/booth/config_exchange.py            |  47 ++++--------
- pcs/lib/booth/config_files.py               |  15 +++-
- pcs/lib/booth/config_parser.py              |   3 +-
- pcs/lib/booth/config_structure.py           |  35 ++++++++-
- pcs/lib/booth/reports.py                    |  97 +++++++++++++-----------
- pcs/lib/booth/resource.py                   |  49 ++++--------
- pcs/lib/booth/sync.py                       |  12 +--
- pcs/lib/booth/test/test_config_exchange.py  |  56 ++++++--------
- pcs/lib/booth/test/test_config_files.py     |  32 ++++++--
- pcs/lib/booth/test/test_config_parser.py    |   2 +
- pcs/lib/booth/test/test_config_structure.py |  66 ++++++++++++++++-
- pcs/lib/booth/test/test_resource.py         | 111 ++++++++++++----------------
- pcs/lib/booth/test/test_sync.py             |  56 +++++++-------
- pcs/lib/commands/booth.py                   |  88 ++++++++++++++--------
- pcs/lib/commands/test/test_booth.py         |  50 +++++++------
- pcs/pcs.8                                   |  11 ++-
- pcs/test/test_booth.py                      |  77 +++++++++++++++++--
- pcs/usage.py                                |  13 +++-
- 23 files changed, 564 insertions(+), 367 deletions(-)
-
-diff --git a/pcs/booth.py b/pcs/booth.py
-index 764dcd8..5ec41bf 100644
---- a/pcs/booth.py
-+++ b/pcs/booth.py
-@@ -12,7 +12,7 @@ from pcs import utils
- from pcs.cli.booth import command
- from pcs.cli.common.errors import CmdLineInputError
- from pcs.lib.errors import LibraryError
--from pcs.resource import resource_create, resource_remove
-+from pcs.resource import resource_create, resource_remove, resource_restart
- 
- 
- def booth_cmd(lib, argv, modifiers):
-@@ -47,13 +47,15 @@ def booth_cmd(lib, argv, modifiers):
-             else:
-                 raise CmdLineInputError()
-         elif sub_cmd == "create":
--            command.get_create_in_cluster(resource_create)(
-+            command.get_create_in_cluster(resource_create, resource_remove)(
-                 lib, argv_next, modifiers
-             )
-         elif sub_cmd == "remove":
-             command.get_remove_from_cluster(resource_remove)(
-                 lib, argv_next, modifiers
-             )
-+        elif sub_cmd == "restart":
-+            command.get_restart(resource_restart)(lib, argv_next, modifiers)
-         elif sub_cmd == "sync":
-             command.sync(lib, argv_next, modifiers)
-         elif sub_cmd == "pull":
-diff --git a/pcs/cli/booth/command.py b/pcs/cli/booth/command.py
-index bea6582..0b71a01 100644
---- a/pcs/cli/booth/command.py
-+++ b/pcs/cli/booth/command.py
-@@ -6,7 +6,7 @@ from __future__ import (
- )
- 
- from pcs.cli.common.errors import CmdLineInputError
--from pcs.cli.common.parse_args import group_by_keywords
-+from pcs.cli.common.parse_args import group_by_keywords, prepare_options
- 
- 
- DEFAULT_BOOTH_NAME = "booth"
-@@ -18,15 +18,25 @@ def config_setup(lib, arg_list, modifiers):
-     """
-     create booth config
-     """
--    booth_configuration = group_by_keywords(
-+    peers = group_by_keywords(
-         arg_list,
-         set(["sites", "arbitrators"]),
-         keyword_repeat_allowed=False
-     )
--    if "sites" not in booth_configuration or not booth_configuration["sites"]:
-+    if "sites" not in peers or not peers["sites"]:
-         raise CmdLineInputError()
- 
--    lib.booth.config_setup(booth_configuration, modifiers["force"])
-+    booth_config = []
-+    for site in peers["sites"]:
-+        booth_config.append({"key": "site", "value": site, "details": []})
-+    for arbitrator in peers["arbitrators"]:
-+        booth_config.append({
-+            "key": "arbitrator",
-+            "value": arbitrator,
-+            "details": [],
-+        })
-+
-+    lib.booth.config_setup(booth_config, modifiers["force"])
- 
- def config_destroy(lib, arg_list, modifiers):
-     """
-@@ -41,36 +51,20 @@ def config_show(lib, arg_list, modifiers):
-     """
-     print booth config
-     """
--    booth_configuration = lib.booth.config_show()
--    authfile_lines = []
--    if booth_configuration["authfile"]:
--        authfile_lines.append(
--            "authfile = {0}".format(booth_configuration["authfile"])
--        )
-+    if len(arg_list) > 1:
-+        raise CmdLineInputError()
-+    node = None if not arg_list else arg_list[0]
-+
-+    print(lib.booth.config_text(DEFAULT_BOOTH_NAME, node), end="")
- 
--    line_list = (
--        ["site = {0}".format(site) for site in booth_configuration["sites"]]
--        +
--        [
--            "arbitrator = {0}".format(arbitrator)
--            for arbitrator in booth_configuration["arbitrators"]
--        ]
--        + authfile_lines +
--        [
--            'ticket = "{0}"'.format(ticket)
--            for ticket in booth_configuration["tickets"]
--        ]
--    )
--    for line in line_list:
--        print(line)
- 
- def config_ticket_add(lib, arg_list, modifiers):
-     """
-     add ticket to current configuration
-     """
--    if len(arg_list) != 1:
-+    if not arg_list:
-         raise CmdLineInputError
--    lib.booth.config_ticket_add(arg_list[0])
-+    lib.booth.config_ticket_add(arg_list[0], prepare_options(arg_list[1:]))
- 
- def config_ticket_remove(lib, arg_list, modifiers):
-     """
-@@ -96,7 +90,7 @@ def ticket_revoke(lib, arg_list, modifiers):
- def ticket_grant(lib, arg_list, modifiers):
-     ticket_operation(lib.booth.ticket_grant, arg_list, modifiers)
- 
--def get_create_in_cluster(resource_create):
-+def get_create_in_cluster(resource_create, resource_remove):
-     #TODO resource_remove is provisional hack until resources are not moved to
-     #lib
-     def create_in_cluster(lib, arg_list, modifiers):
-@@ -108,6 +102,7 @@ def get_create_in_cluster(resource_create):
-             __get_name(modifiers),
-             ip,
-             resource_create,
-+            resource_remove,
-         )
-     return create_in_cluster
- 
-@@ -118,10 +113,28 @@ def get_remove_from_cluster(resource_remove):
-         if arg_list:
-             raise CmdLineInputError()
- 
--        lib.booth.remove_from_cluster(__get_name(modifiers), resource_remove)
-+        lib.booth.remove_from_cluster(
-+            __get_name(modifiers),
-+            resource_remove,
-+            modifiers["force"],
-+        )
- 
-     return remove_from_cluster
- 
-+def get_restart(resource_restart):
-+    #TODO resource_restart is provisional hack until resources are not moved to
-+    #lib
-+    def restart(lib, arg_list, modifiers):
-+        if arg_list:
-+            raise CmdLineInputError()
-+
-+        lib.booth.restart(
-+            __get_name(modifiers),
-+            resource_restart,
-+            modifiers["force"],
-+        )
-+
-+    return restart
- 
- def sync(lib, arg_list, modifiers):
-     if arg_list:
-@@ -175,3 +188,4 @@ def status(lib, arg_list, modifiers):
-     if booth_status.get("status"):
-         print("DAEMON STATUS:")
-         print(booth_status["status"])
-+
-diff --git a/pcs/cli/booth/test/test_command.py b/pcs/cli/booth/test/test_command.py
-index 00216f2..019a74f 100644
---- a/pcs/cli/booth/test/test_command.py
-+++ b/pcs/cli/booth/test/test_command.py
-@@ -28,10 +28,12 @@ class ConfigSetupTest(TestCase):
-             }
-         )
-         lib.booth.config_setup.assert_called_once_with(
--            {
--                "sites": ["1.1.1.1", "2.2.2.2", "4.4.4.4"],
--                "arbitrators": ["3.3.3.3"],
--            },
-+            [
-+                {"key": "site", "value": "1.1.1.1", "details": []},
-+                {"key": "site", "value": "2.2.2.2", "details": []},
-+                {"key": "site", "value": "4.4.4.4", "details": []},
-+                {"key": "arbitrator", "value": "3.3.3.3", "details": []},
-+            ],
-             False
-         )
- 
-@@ -40,5 +42,12 @@ class ConfigTicketAddTest(TestCase):
-         lib = mock.MagicMock()
-         lib.booth = mock.MagicMock()
-         lib.booth.config_ticket_add = mock.MagicMock()
--        command.config_ticket_add(lib, arg_list=["TICKET_A"], modifiers={})
--        lib.booth.config_ticket_add.assert_called_once_with("TICKET_A")
-+        command.config_ticket_add(
-+            lib,
-+            arg_list=["TICKET_A", "timeout=10"],
-+            modifiers={}
-+        )
-+        lib.booth.config_ticket_add.assert_called_once_with(
-+            "TICKET_A",
-+            {"timeout": "10"},
-+        )
-diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
-index c836575..94a1311 100644
---- a/pcs/cli/common/lib_wrapper.py
-+++ b/pcs/cli/common/lib_wrapper.py
-@@ -209,11 +209,12 @@ def load_module(env, middleware_factory, name):
-             {
-                 "config_setup": booth.config_setup,
-                 "config_destroy": booth.config_destroy,
--                "config_show": booth.config_show,
-+                "config_text": booth.config_text,
-                 "config_ticket_add": booth.config_ticket_add,
-                 "config_ticket_remove": booth.config_ticket_remove,
-                 "create_in_cluster": booth.create_in_cluster,
-                 "remove_from_cluster": booth.remove_from_cluster,
-+                "restart": booth.restart,
-                 "config_sync": booth.config_sync,
-                 "enable": booth.enable_booth,
-                 "disable": booth.disable_booth,
-diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
-index 672c2e3..5e46a1f 100644
---- a/pcs/common/report_codes.py
-+++ b/pcs/common/report_codes.py
-@@ -29,16 +29,15 @@ BOOTH_ADDRESS_DUPLICATION = "BOOTH_ADDRESS_DUPLICATION"
- BOOTH_ALREADY_IN_CIB = "BOOTH_ALREADY_IN_CIB"
- BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP = "BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP"
- BOOTH_CANNOT_IDENTIFY_KEYFILE = "BOOTH_CANNOT_IDENTIFY_KEYFILE"
-+BOOTH_CONFIG_ACCEPTED_BY_NODE = "BOOTH_CONFIG_ACCEPTED_BY_NODE"
-+BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR = "BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR"
-+BOOTH_CONFIG_DISTRIBUTION_STARTED = "BOOTH_CONFIG_DISTRIBUTION_STARTED"
- BOOTH_CONFIG_FILE_ALREADY_EXISTS = "BOOTH_CONFIG_FILE_ALREADY_EXISTS"
- BOOTH_CONFIG_IO_ERROR = "BOOTH_CONFIG_IO_ERROR"
- BOOTH_CONFIG_IS_USED = "BOOTH_CONFIG_IS_USED"
- BOOTH_CONFIG_READ_ERROR = "BOOTH_CONFIG_READ_ERROR"
--BOOTH_CONFIG_WRITE_ERROR = "BOOTH_CONFIG_WRITE_ERROR"
- BOOTH_CONFIG_UNEXPECTED_LINES = "BOOTH_CONFIG_UNEXPECTED_LINES"
--BOOTH_CONFIGS_SAVED_ON_NODE = "BOOTH_CONFIGS_SAVED_ON_NODE"
--BOOTH_CONFIGS_SAVING_ON_NODE = "BOOTH_CONFIGS_SAVING_ON_NODE"
- BOOTH_DAEMON_STATUS_ERROR = "BOOTH_DAEMON_STATUS_ERROR"
--BOOTH_DISTRIBUTING_CONFIG = "BOOTH_DISTRIBUTING_CONFIG"
- BOOTH_EVEN_PEERS_NUM = "BOOTH_EVEN_PEERS_NUM"
- BOOTH_FETCHING_CONFIG_FROM_NODE = "BOOTH_FETCHING_CONFIG_FROM_NODE"
- BOOTH_INVALID_CONFIG_NAME = "BOOTH_INVALID_CONFIG_NAME"
-@@ -50,8 +49,8 @@ BOOTH_PEERS_STATUS_ERROR = "BOOTH_PEERS_STATUS_ERROR"
- BOOTH_SKIPPING_CONFIG = "BOOTH_SKIPPING_CONFIG"
- BOOTH_TICKET_DOES_NOT_EXIST = "BOOTH_TICKET_DOES_NOT_EXIST"
- BOOTH_TICKET_DUPLICATE = "BOOTH_TICKET_DUPLICATE"
--BOOTH_TICKET_OPERATION_FAILED = "BOOTH_TICKET_OPERATION_FAILED"
- BOOTH_TICKET_NAME_INVALID = "BOOTH_TICKET_NAME_INVALID"
-+BOOTH_TICKET_OPERATION_FAILED = "BOOTH_TICKET_OPERATION_FAILED"
- BOOTH_TICKET_STATUS_ERROR = "BOOTH_TICKET_STATUS_ERROR"
- BOOTH_UNSUPORTED_FILE_LOCATION = "BOOTH_UNSUPORTED_FILE_LOCATION"
- CIB_ALERT_NOT_FOUND = "CIB_ALERT_NOT_FOUND"
-diff --git a/pcs/lib/booth/config_exchange.py b/pcs/lib/booth/config_exchange.py
-index e0569ba..377af1d 100644
---- a/pcs/lib/booth/config_exchange.py
-+++ b/pcs/lib/booth/config_exchange.py
-@@ -6,38 +6,23 @@ from __future__ import (
- )
- from pcs.lib.booth.config_structure import ConfigItem
- 
--EXCHANGE_PRIMITIVES = ["authfile"]
--EXCHANGE_LISTS = [
--    ("site", "sites"),
--    ("arbitrator", "arbitrators"),
--    ("ticket", "tickets"),
--]
--
--
- def to_exchange_format(booth_configuration):
--    exchange_lists = dict(EXCHANGE_LISTS)
--    exchange = dict(
--        (exchange_key, []) for exchange_key in exchange_lists.values()
--    )
--
--    for key, value, _ in booth_configuration:
--        if key in exchange_lists:
--            exchange[exchange_lists[key]].append(value)
--        if key in EXCHANGE_PRIMITIVES:
--            exchange[key] = value
--
--    return exchange
-+    return [
-+        {
-+            "key": item.key,
-+            "value": item.value,
-+            "details": to_exchange_format(item.details),
-+        }
-+        for item in booth_configuration
-+    ]
- 
- 
- def from_exchange_format(exchange_format):
--    booth_config = []
--    for key in EXCHANGE_PRIMITIVES:
--        if key in exchange_format:
--            booth_config.append(ConfigItem(key, exchange_format[key]))
--
--    for key, exchange_key in EXCHANGE_LISTS:
--        booth_config.extend([
--            ConfigItem(key, value)
--            for value in exchange_format.get(exchange_key, [])
--        ])
--    return booth_config
-+    return [
-+        ConfigItem(
-+            item["key"],
-+            item["value"],
-+            from_exchange_format(item["details"]),
-+        )
-+        for item in exchange_format
-+    ]
-diff --git a/pcs/lib/booth/config_files.py b/pcs/lib/booth/config_files.py
-index aaad951..7b91379 100644
---- a/pcs/lib/booth/config_files.py
-+++ b/pcs/lib/booth/config_files.py
-@@ -24,10 +24,17 @@ def get_all_configs_file_names():
-     Returns list of all file names ending with '.conf' in booth configuration
-     directory.
-     """
-+    if not os.path.isdir(BOOTH_CONFIG_DIR):
-+        return []
-     return [
--        file_name for file_name in os.listdir(BOOTH_CONFIG_DIR)
--        if os.path.isfile(file_name) and file_name.endswith(".conf") and
--        len(file_name) > len(".conf")
-+        file_name
-+        for file_name in os.listdir(BOOTH_CONFIG_DIR)
-+        if
-+            file_name.endswith(".conf")
-+            and
-+            len(file_name) > len(".conf")
-+            and
-+            os.path.isfile(os.path.join(BOOTH_CONFIG_DIR, file_name))
-     ]
- 
- 
-@@ -55,7 +62,7 @@ def read_configs(reporter, skip_wrong_config=False):
-         try:
-             output[file_name] = _read_config(file_name)
-         except EnvironmentError:
--            report_list.append(reports.booth_config_unable_to_read(
-+            report_list.append(reports.booth_config_read_error(
-                 file_name,
-                 (
-                     ReportItemSeverity.WARNING if skip_wrong_config
-diff --git a/pcs/lib/booth/config_parser.py b/pcs/lib/booth/config_parser.py
-index 62d2203..bdc79fd 100644
---- a/pcs/lib/booth/config_parser.py
-+++ b/pcs/lib/booth/config_parser.py
-@@ -23,7 +23,8 @@ def parse(content):
-         )
- 
- def build(config_line_list):
--    return "\n".join(build_to_lines(config_line_list))
-+    newline = [""]
-+    return "\n".join(build_to_lines(config_line_list) + newline)
- 
- def build_to_lines(config_line_list, deep=0):
-     line_list = []
-diff --git a/pcs/lib/booth/config_structure.py b/pcs/lib/booth/config_structure.py
-index c92f718..8977b7a 100644
---- a/pcs/lib/booth/config_structure.py
-+++ b/pcs/lib/booth/config_structure.py
-@@ -7,6 +7,7 @@ from __future__ import (
- 
- import re
- 
-+import pcs.lib.reports as common_reports
- from pcs.lib.booth import reports
- from pcs.lib.errors import LibraryError
- from collections import namedtuple
-@@ -66,6 +67,15 @@ def validate_peers(site_list, arbitrator_list):
-     if report:
-         raise LibraryError(*report)
- 
-+def take_peers(booth_configuration):
-+    return (
-+        pick_list_by_key(booth_configuration, "site"),
-+        pick_list_by_key(booth_configuration, "arbitrator"),
-+    )
-+
-+def pick_list_by_key(booth_configuration, key):
-+    return [item.value for item in booth_configuration if item.key == key]
-+
- def remove_ticket(booth_configuration, ticket_name):
-     validate_ticket_exists(booth_configuration, ticket_name)
-     return [
-@@ -73,11 +83,14 @@ def remove_ticket(booth_configuration, ticket_name):
-         if config_item.key != "ticket" or config_item.value != ticket_name
-     ]
- 
--def add_ticket(booth_configuration, ticket_name):
-+def add_ticket(booth_configuration, ticket_name, options):
-     validate_ticket_name(ticket_name)
-     validate_ticket_unique(booth_configuration, ticket_name)
-+    validate_ticket_options(options)
-     return booth_configuration + [
--        ConfigItem("ticket", ticket_name)
-+        ConfigItem("ticket", ticket_name, [
-+            ConfigItem(key, value) for key, value in options.items()
-+        ])
-     ]
- 
- def validate_ticket_exists(booth_configuration, ticket_name):
-@@ -88,6 +101,24 @@ def validate_ticket_unique(booth_configuration, ticket_name):
-     if ticket_exists(booth_configuration, ticket_name):
-         raise LibraryError(reports.booth_ticket_duplicate(ticket_name))
- 
-+def validate_ticket_options(options):
-+    reports = []
-+    for key in sorted(options):
-+        if key in GLOBAL_KEYS:
-+            reports.append(
-+                common_reports.invalid_option(key, TICKET_KEYS, "booth ticket")
-+            )
-+
-+        if not options[key].strip():
-+            reports.append(common_reports.invalid_option_value(
-+                key,
-+                options[key],
-+                "no-empty",
-+            ))
-+
-+    if reports:
-+        raise LibraryError(*reports)
-+
- def ticket_exists(booth_configuration, ticket_name):
-     return any(
-         value for key, value, _ in booth_configuration
-diff --git a/pcs/lib/booth/reports.py b/pcs/lib/booth/reports.py
-index 8a804e0..6aa9d3d 100644
---- a/pcs/lib/booth/reports.py
-+++ b/pcs/lib/booth/reports.py
-@@ -197,22 +197,17 @@ def booth_multiple_times_in_cib(
-     )
- 
- 
--def booth_distributing_config(name=None):
-+def booth_config_distribution_started():
-     """
--    Sending booth config to all nodes in cluster.
--
--    name -- name of booth instance
-+    booth configuration is about to be sent to nodes
-     """
-     return ReportItem.info(
--        report_codes.BOOTH_DISTRIBUTING_CONFIG,
--        "Sending booth config{0} to all cluster nodes.".format(
--            " ({name})" if name and name != "booth" else ""
--        ),
--        info={"name": name}
-+        report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
-+        "Sending booth configuration to cluster nodes..."
-     )
- 
- 
--def booth_config_saved(node=None, name_list=None):
-+def booth_config_accepted_by_node(node=None, name_list=None):
-     """
-     Booth config has been saved on specified node.
- 
-@@ -229,7 +224,7 @@ def booth_config_saved(node=None, name_list=None):
-         msg = "Booth config saved."
-         name = None
-     return ReportItem.info(
--        report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+        report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
-         msg if node is None else "{node}: " + msg,
-         info={
-             "node": node,
-@@ -239,30 +234,7 @@ def booth_config_saved(node=None, name_list=None):
-     )
- 
- 
--def booth_config_unable_to_read(
--    name, severity=ReportItemSeverity.ERROR, forceable=None
--):
--    """
--    Unable to read from specified booth instance config.
--
--    name -- name of booth instance
--    severity -- severity of report item
--    forceable -- is this report item forceable? by what category?
--    """
--    if name and name != "booth":
--        msg = "Unable to read booth config ({name})."
--    else:
--        msg = "Unable to read booth config."
--    return ReportItem(
--        report_codes.BOOTH_CONFIG_READ_ERROR,
--        severity,
--        msg,
--        info={"name": name},
--        forceable=forceable
--    )
--
--
--def booth_config_not_saved(node, reason, name=None):
-+def booth_config_distribution_node_error(node, reason, name=None):
-     """
-     Saving booth config failed on specified node.
- 
-@@ -275,7 +247,7 @@ def booth_config_not_saved(node, reason, name=None):
-     else:
-         msg = "Unable to save booth config on node '{node}': {reason}"
-     return ReportItem.error(
--        report_codes.BOOTH_CONFIG_WRITE_ERROR,
-+        report_codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR,
-         msg,
-         info={
-             "node": node,
-@@ -285,20 +257,36 @@ def booth_config_not_saved(node, reason, name=None):
-     )
- 
- 
--def booth_sending_local_configs_to_node(node):
-+def booth_config_read_error(
-+    name, severity=ReportItemSeverity.ERROR, forceable=None
-+):
-     """
--    Sending all local booth configs to node
-+    Unable to read from specified booth instance config.
- 
--    node -- node name
-+    name -- name of booth instance
-+    severity -- severity of report item
-+    forceable -- is this report item forceable? by what category?
-     """
--    return ReportItem.info(
--        report_codes.BOOTH_CONFIGS_SAVING_ON_NODE,
--        "{node}: Saving booth config(s)...",
--        info={"node": node}
-+    if name and name != "booth":
-+        msg = "Unable to read booth config ({name})."
-+    else:
-+        msg = "Unable to read booth config."
-+    return ReportItem(
-+        report_codes.BOOTH_CONFIG_READ_ERROR,
-+        severity,
-+        msg,
-+        info={"name": name},
-+        forceable=forceable
-     )
- 
- 
--def booth_fetching_config_from_node(node, config=None):
-+def booth_fetching_config_from_node_started(node, config=None):
-+    """
-+    fetching of booth config from specified node started
-+
-+    node -- node from which config is fetching
-+    config -- config name
-+    """
-     if config or config == 'booth':
-         msg = "Fetching booth config from node '{node}'..."
-     else:
-@@ -314,6 +302,12 @@ def booth_fetching_config_from_node(node, config=None):
- 
- 
- def booth_unsupported_file_location(file):
-+    """
-+    location of booth configuration file (config, authfile) file is not
-+    supported (not in /etc/booth/)
-+
-+    file -- file path
-+    """
-     return ReportItem.warning(
-         report_codes.BOOTH_UNSUPORTED_FILE_LOCATION,
-         "skipping file {file}: unsupported file location",
-@@ -322,6 +316,11 @@ def booth_unsupported_file_location(file):
- 
- 
- def booth_daemon_status_error(reason):
-+    """
-+    Unable to get status of booth daemon because of error.
-+
-+    reason -- reason
-+    """
-     return ReportItem.error(
-         report_codes.BOOTH_DAEMON_STATUS_ERROR,
-         "unable to get status of booth daemon: {reason}",
-@@ -330,6 +329,11 @@ def booth_daemon_status_error(reason):
- 
- 
- def booth_tickets_status_error(reason=None):
-+    """
-+    Unable to get status of booth tickets because of error.
-+
-+    reason -- reason
-+    """
-     return ReportItem.error(
-         report_codes.BOOTH_TICKET_STATUS_ERROR,
-         "unable to get status of booth tickets",
-@@ -340,6 +344,11 @@ def booth_tickets_status_error(reason=None):
- 
- 
- def booth_peers_status_error(reason=None):
-+    """
-+    Unable to get status of booth peers because of error.
-+
-+    reason -- reason
-+    """
-     return ReportItem.error(
-         report_codes.BOOTH_PEERS_STATUS_ERROR,
-         "unable to get status of booth peers",
-diff --git a/pcs/lib/booth/resource.py b/pcs/lib/booth/resource.py
-index e793713..a4b7b1e 100644
---- a/pcs/lib/booth/resource.py
-+++ b/pcs/lib/booth/resource.py
-@@ -8,18 +8,12 @@ from __future__ import (
- from pcs.lib.cib.tools import find_unique_id
- 
- 
--class BoothNotFoundInCib(Exception):
--    pass
--
--class BoothMultipleOccurenceFoundInCib(Exception):
--    pass
--
- def create_resource_id(resources_section, name, suffix):
-     return find_unique_id(
-         resources_section.getroottree(), "booth-{0}-{1}".format(name, suffix)
-     )
- 
--def get_creator(resource_create):
-+def get_creator(resource_create, resource_remove=None):
-     #TODO resource_create  is provisional hack until resources are not moved to
-     #lib
-     def create_booth_in_cluster(ip, booth_config_file_path, create_id):
-@@ -36,15 +30,18 @@ def get_creator(resource_create):
-             clone_opts=[],
-             group=group_id,
-         )
--        resource_create(
--            ra_id=booth_id,
--            ra_type="ocf:pacemaker:booth-site",
--            ra_values=["config={0}".format(booth_config_file_path)],
--            op_values=[],
--            meta_values=[],
--            clone_opts=[],
--            group=group_id,
--        )
-+        try:
-+            resource_create(
-+                ra_id=booth_id,
-+                ra_type="ocf:pacemaker:booth-site",
-+                ra_values=["config={0}".format(booth_config_file_path)],
-+                op_values=[],
-+                meta_values=[],
-+                clone_opts=[],
-+                group=group_id,
-+            )
-+        except SystemExit:
-+            resource_remove(ip_id)
-     return create_booth_in_cluster
- 
- def is_ip_resource(resource_element):
-@@ -64,28 +61,12 @@ def find_grouped_ip_element_to_remove(booth_element):
-     return None
- 
- def get_remover(resource_remove):
--    def remove_from_cluster(
--        resources_section, booth_config_file_path, remove_multiple=False
--    ):
--        element_list = find_for_config(
--            resources_section,
--            booth_config_file_path
--        )
--        if not element_list:
--            raise BoothNotFoundInCib()
--
--        if len(element_list) > 1 and not remove_multiple:
--            raise BoothMultipleOccurenceFoundInCib()
--
--        number_of_removed_booth_elements = 0
--        for element in element_list:
-+    def remove_from_cluster(booth_element_list):
-+        for element in booth_element_list:
-             ip_resource_to_remove = find_grouped_ip_element_to_remove(element)
-             if ip_resource_to_remove is not None:
-                 resource_remove(ip_resource_to_remove.attrib["id"])
-             resource_remove(element.attrib["id"])
--            number_of_removed_booth_elements += 1
--
--        return number_of_removed_booth_elements
- 
-     return remove_from_cluster
- 
-diff --git a/pcs/lib/booth/sync.py b/pcs/lib/booth/sync.py
-index c9bc30b..374b96d 100644
---- a/pcs/lib/booth/sync.py
-+++ b/pcs/lib/booth/sync.py
-@@ -57,7 +57,7 @@ def _set_config_on_node(
-         "remote/booth_set_config",
-         NodeCommunicator.format_data_dict([("data_json", json.dumps(data))])
-     )
--    reporter.process(reports.booth_config_saved(node.label, [name]))
-+    reporter.process(reports.booth_config_accepted_by_node(node.label, [name]))
- 
- 
- def send_config_to_all_nodes(
-@@ -77,7 +77,7 @@ def send_config_to_all_nodes(
-     authfile_data -- content of authfile as bytes
-     skip_offline -- if True offline nodes will be skipped
-     """
--    reporter.process(reports.booth_distributing_config(name))
-+    reporter.process(reports.booth_config_distribution_started())
-     parallel_nodes_communication_helper(
-         _set_config_on_node,
-         [
-@@ -115,6 +115,9 @@ def send_all_config_to_node(
-     config_dict = booth_conf.read_configs(reporter, skip_wrong_config)
-     if not config_dict:
-         return
-+
-+    reporter.process(reports.booth_config_distribution_started())
-+
-     file_list = []
-     for config, config_data in sorted(config_dict.items()):
-         try:
-@@ -145,7 +148,6 @@ def send_all_config_to_node(
-     if rewrite_existing:
-         data.append(("rewrite_existing", "1"))
- 
--    reporter.process(reports.booth_sending_local_configs_to_node(node.label))
-     try:
-         response = json.loads(communicator.call_node(
-             node,
-@@ -165,12 +167,12 @@ def send_all_config_to_node(
-                 node.label
-             ))
-         for file, reason in response["failed"].items():
--            report_list.append(reports.booth_config_not_saved(
-+            report_list.append(reports.booth_config_distribution_node_error(
-                 node.label, reason, file
-             ))
-         reporter.process_list(report_list)
-         reporter.process(
--            reports.booth_config_saved(node.label, response["saved"])
-+            reports.booth_config_accepted_by_node(node.label, response["saved"])
-         )
-     except NodeCommunicationException as e:
-         raise LibraryError(node_communicator_exception_to_report_item(e))
-diff --git a/pcs/lib/booth/test/test_config_exchange.py b/pcs/lib/booth/test/test_config_exchange.py
-index a9a40ce..eb1885c 100644
---- a/pcs/lib/booth/test/test_config_exchange.py
-+++ b/pcs/lib/booth/test/test_config_exchange.py
-@@ -17,47 +17,35 @@ class FromExchangeFormatTest(TestCase):
-                 config_structure.ConfigItem("site", "2.2.2.2"),
-                 config_structure.ConfigItem("arbitrator", "3.3.3.3"),
-                 config_structure.ConfigItem("ticket", "TA"),
--                config_structure.ConfigItem("ticket", "TB"),
-+                config_structure.ConfigItem("ticket", "TB", [
-+                    config_structure.ConfigItem("expire", "10")
-+                ]),
-             ],
--            config_exchange.from_exchange_format(
--                {
--                    "sites": ["1.1.1.1", "2.2.2.2"],
--                    "arbitrators": ["3.3.3.3"],
--                    "tickets": ["TA", "TB"],
--                    "authfile": "/path/to/auth.file",
--                },
--            )
-+            config_exchange.from_exchange_format([
-+                {"key": "authfile","value": "/path/to/auth.file","details": []},
-+                {"key": "site", "value": "1.1.1.1", "details": []},
-+                {"key": "site", "value": "2.2.2.2", "details": []},
-+                {"key": "arbitrator", "value": "3.3.3.3", "details": []},
-+                {"key": "ticket", "value": "TA", "details": []},
-+                {"key": "ticket", "value": "TB", "details": [
-+                    {"key": "expire", "value": "10", "details": []}
-+                ]},
-+            ])
-         )
- 
- 
- class GetExchenageFormatTest(TestCase):
-     def test_convert_parsed_config_to_exchange_format(self):
-         self.assertEqual(
--            {
--                "sites": ["1.1.1.1", "2.2.2.2"],
--                "arbitrators": ["3.3.3.3"],
--                "tickets": ["TA", "TB"],
--                "authfile": "/path/to/auth.file",
--            },
--            config_exchange.to_exchange_format([
--                config_structure.ConfigItem("site", "1.1.1.1"),
--                config_structure.ConfigItem("site", "2.2.2.2"),
--                config_structure.ConfigItem("arbitrator", "3.3.3.3"),
--                config_structure.ConfigItem("authfile", "/path/to/auth.file"),
--                config_structure.ConfigItem("ticket", "TA"),
--                config_structure.ConfigItem("ticket", "TB", [
--                    config_structure.ConfigItem("timeout", "10")
--                ]),
--            ])
--        )
--
--    def test_convert_parsed_config_to_exchange_format_without_authfile(self):
--        self.assertEqual(
--            {
--                "sites": ["1.1.1.1", "2.2.2.2"],
--                "arbitrators": ["3.3.3.3"],
--                "tickets": ["TA", "TB"],
--            },
-+            [
-+                {"key": "site", "value": "1.1.1.1", "details": []},
-+                {"key": "site", "value": "2.2.2.2", "details": []},
-+                {"key": "arbitrator", "value": "3.3.3.3", "details": []},
-+                {"key": "ticket", "value": "TA", "details": []},
-+                {"key": "ticket", "value": "TB", "details": [
-+                    {"key": "timeout", "value": "10", "details": []}
-+                ]},
-+            ],
-             config_exchange.to_exchange_format([
-                 config_structure.ConfigItem("site", "1.1.1.1"),
-                 config_structure.ConfigItem("site", "2.2.2.2"),
-diff --git a/pcs/lib/booth/test/test_config_files.py b/pcs/lib/booth/test/test_config_files.py
-index 2d4c3ea..8266cac 100644
---- a/pcs/lib/booth/test/test_config_files.py
-+++ b/pcs/lib/booth/test/test_config_files.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from os.path import join
-+import os.path
- from unittest import TestCase
- 
- from pcs.common import report_codes, env_file_role_codes as file_roles
-@@ -21,20 +21,38 @@ def patch_config_files(target, *args, **kwargs):
-         "pcs.lib.booth.config_files.{0}".format(target), *args, **kwargs
-     )
- 
-+@mock.patch("os.path.isdir")
- @mock.patch("os.listdir")
- @mock.patch("os.path.isfile")
- class GetAllConfigsFileNamesTest(TestCase):
--    def test_success(self, mock_is_file, mock_listdir):
-+    def test_booth_config_dir_is_no_dir(
-+        self, mock_is_file, mock_listdir, mock_isdir
-+    ):
-+        mock_isdir.return_value = False
-+        self.assertEqual([], config_files.get_all_configs_file_names())
-+        mock_isdir.assert_called_once_with(BOOTH_CONFIG_DIR)
-+        self.assertEqual(0, mock_is_file.call_count)
-+        self.assertEqual(0, mock_listdir.call_count)
-+
-+    def test_success(self, mock_is_file, mock_listdir, mock_isdir):
-         def mock_is_file_fn(file_name):
--            if file_name in ["dir.cong", "dir"]:
-+            if file_name in [
-+                os.path.join(BOOTH_CONFIG_DIR, name)
-+                for name in ("dir.cong", "dir")
-+            ]:
-                 return False
-             elif file_name in [
--                "name1", "name2.conf", "name.conf.conf", ".conf", "name3.conf"
-+                os.path.join(BOOTH_CONFIG_DIR, name)
-+                for name in (
-+                    "name1", "name2.conf", "name.conf.conf", ".conf",
-+                    "name3.conf"
-+                )
-             ]:
-                 return True
-             else:
-                 raise AssertionError("unexpected input")
- 
-+        mock_isdir.return_value = True
-         mock_is_file.side_effect = mock_is_file_fn
-         mock_listdir.return_value = [
-             "name1", "name2.conf", "name.conf.conf", ".conf", "name3.conf",
-@@ -59,7 +77,7 @@ class ReadConfigTest(TestCase):
- 
-         self.assertEqual(
-             [
--                mock.call(join(BOOTH_CONFIG_DIR, "my-file.conf"), "r"),
-+                mock.call(os.path.join(BOOTH_CONFIG_DIR, "my-file.conf"), "r"),
-                 mock.call().__enter__(),
-                 mock.call().read(),
-                 mock.call().__exit__(None, None, None)
-@@ -193,7 +211,7 @@ class ReadAuthfileTest(TestCase):
-         self.maxDiff = None
- 
-     def test_success(self):
--        path = join(BOOTH_CONFIG_DIR, "file.key")
-+        path = os.path.join(BOOTH_CONFIG_DIR, "file.key")
-         mock_open = mock.mock_open(read_data="key")
- 
-         with patch_config_files("open", mock_open, create=True):
-@@ -248,7 +266,7 @@ class ReadAuthfileTest(TestCase):
- 
-     @patch_config_files("format_environment_error", return_value="reason")
-     def test_read_failure(self, _):
--        path = join(BOOTH_CONFIG_DIR, "file.key")
-+        path = os.path.join(BOOTH_CONFIG_DIR, "file.key")
-         mock_open = mock.mock_open()
-         mock_open().read.side_effect = EnvironmentError()
- 
-diff --git a/pcs/lib/booth/test/test_config_parser.py b/pcs/lib/booth/test/test_config_parser.py
-index 684fc79..c04f451 100644
---- a/pcs/lib/booth/test/test_config_parser.py
-+++ b/pcs/lib/booth/test/test_config_parser.py
-@@ -24,6 +24,7 @@ class BuildTest(TestCase):
-                 'ticket = "TA"',
-                 'ticket = "TB"',
-                 "  timeout = 10",
-+                "", #newline at the end
-             ]),
-             config_parser.build([
-                 ConfigItem("authfile", "/path/to/auth.file"),
-@@ -105,6 +106,7 @@ class ParseRawLinesTest(TestCase):
-                 "arbitrator=3.3.3.3",
-                 "syntactically_correct = nonsense",
-                 "line-with = hash#literal",
-+                "",
-             ]))
-         )
- 
-diff --git a/pcs/lib/booth/test/test_config_structure.py b/pcs/lib/booth/test/test_config_structure.py
-index 27faca5..1dd07cb 100644
---- a/pcs/lib/booth/test/test_config_structure.py
-+++ b/pcs/lib/booth/test/test_config_structure.py
-@@ -47,6 +47,46 @@ class ValidateTicketUniqueTest(TestCase):
-     def test_do_not_raises_when_no_duplicated_ticket(self):
-         config_structure.validate_ticket_unique([], "A")
- 
-+class ValidateTicketOptionsTest(TestCase):
-+    def test_raises_on_invalid_options(self):
-+        assert_raise_library_error(
-+            lambda: config_structure.validate_ticket_options({
-+                "site": "a",
-+                "port": "b",
-+                "timeout": " ",
-+            }),
-+            (
-+                severities.ERROR,
-+                report_codes.INVALID_OPTION,
-+                {
-+                    "option_name": "site",
-+                    "option_type": "booth ticket",
-+                    "allowed": list(config_structure.TICKET_KEYS),
-+                },
-+            ),
-+            (
-+                severities.ERROR,
-+                report_codes.INVALID_OPTION,
-+                {
-+                    "option_name": "port",
-+                    "option_type": "booth ticket",
-+                    "allowed": list(config_structure.TICKET_KEYS),
-+                },
-+            ),
-+            (
-+                severities.ERROR,
-+                report_codes.INVALID_OPTION_VALUE,
-+                {
-+                    "option_name": "timeout",
-+                    "option_value": " ",
-+                    "allowed_values": "no-empty",
-+                },
-+            ),
-+        )
-+
-+    def test_success_on_valid_options(self):
-+        config_structure.validate_ticket_options({"timeout": "10"})
-+
- class TicketExistsTest(TestCase):
-     def test_returns_true_if_ticket_in_structure(self):
-         self.assertTrue(config_structure.ticket_exists(
-@@ -183,10 +223,14 @@ class AddTicketTest(TestCase):
-             config_structure.ConfigItem("ticket", "some-ticket"),
-         ]
-         self.assertEqual(
--            config_structure.add_ticket(configuration, "new-ticket"),
-+            config_structure.add_ticket(configuration, "new-ticket", {
-+                "timeout": "10",
-+            }),
-             [
-                 config_structure.ConfigItem("ticket", "some-ticket"),
--                config_structure.ConfigItem("ticket", "new-ticket"),
-+                config_structure.ConfigItem("ticket", "new-ticket", [
-+                    config_structure.ConfigItem("timeout", "10"),
-+                ]),
-             ],
-         )
- 
-@@ -222,3 +266,21 @@ class SetAuthfileTest(TestCase):
-                 "/path/to/auth.file"
-             )
-         )
-+
-+class TakePeersTest(TestCase):
-+    def test_returns_site_list_and_arbitrators_list(self):
-+        self.assertEqual(
-+            (
-+                ["1.1.1.1", "2.2.2.2", "3.3.3.3"],
-+                ["4.4.4.4", "5.5.5.5"]
-+            ),
-+            config_structure.take_peers(
-+                [
-+                    config_structure.ConfigItem("site", "1.1.1.1"),
-+                    config_structure.ConfigItem("site", "2.2.2.2"),
-+                    config_structure.ConfigItem("site", "3.3.3.3"),
-+                    config_structure.ConfigItem("arbitrator", "4.4.4.4"),
-+                    config_structure.ConfigItem("arbitrator", "5.5.5.5"),
-+                ],
-+            )
-+        )
-diff --git a/pcs/lib/booth/test/test_resource.py b/pcs/lib/booth/test/test_resource.py
-index 440ddde..dd72c1e 100644
---- a/pcs/lib/booth/test/test_resource.py
-+++ b/pcs/lib/booth/test/test_resource.py
-@@ -11,6 +11,7 @@ from lxml import etree
- 
- import pcs.lib.booth.resource as booth_resource
- from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.misc import get_test_resource as rc
- 
- 
- def fixture_resources_with_booth(booth_config_file_path):
-@@ -85,73 +86,24 @@ class FindBoothResourceElementsTest(TestCase):
-         )
- 
- class RemoveFromClusterTest(TestCase):
--    def call(self, resources_section, remove_multiple=False):
-+    def call(self, element_list):
-         mock_resource_remove = mock.Mock()
--        num_of_removed_booth_resources = booth_resource.get_remover(
--            mock_resource_remove
--        )(
--            resources_section,
--            "/PATH/TO/CONF",
--            remove_multiple,
--        )
--        return (
--            mock_resource_remove,
--            num_of_removed_booth_resources
--        )
--
--    def fixture_resources_including_two_booths(self):
--        resources_section = etree.fromstring('<resources/>')
--        first = fixture_booth_element("first", "/PATH/TO/CONF")
--        second = fixture_booth_element("second", "/PATH/TO/CONF")
--        resources_section.append(first)
--        resources_section.append(second)
--        return resources_section
--
--    def test_raises_when_booth_resource_not_found(self):
--        self.assertRaises(
--            booth_resource.BoothNotFoundInCib,
--            lambda: self.call(etree.fromstring('<resources/>')),
--        )
--
--    def test_raises_when_more_booth_resources_found(self):
--        resources_section = self.fixture_resources_including_two_booths()
--        self.assertRaises(
--            booth_resource.BoothMultipleOccurenceFoundInCib,
--            lambda: self.call(resources_section),
--        )
--
--    def test_returns_number_of_removed_elements(self):
--        resources_section = self.fixture_resources_including_two_booths()
--        mock_resource_remove, num_of_removed_booth_resources = self.call(
--            resources_section,
--            remove_multiple=True
--        )
--        self.assertEqual(num_of_removed_booth_resources, 2)
--        self.assertEqual(
--            mock_resource_remove.mock_calls, [
--                mock.call('first'),
--                mock.call('second'),
--            ]
--        )
-+        booth_resource.get_remover(mock_resource_remove)(element_list)
-+        return mock_resource_remove
- 
-     def test_remove_ip_when_is_only_booth_sibling_in_group(self):
--        resources_section = etree.fromstring('''
--            <resources>
--                <group>
--                    <primitive id="ip" type="IPaddr2"/>
--                    <primitive id="booth" type="booth-site">
--                        <instance_attributes>
--                            <nvpair name="config" value="/PATH/TO/CONF"/>
--                        </instance_attributes>
--                    </primitive>
--                </group>
--            </resources>
-+        group = etree.fromstring('''
-+            <group>
-+                <primitive id="ip" type="IPaddr2"/>
-+                <primitive id="booth" type="booth-site">
-+                    <instance_attributes>
-+                        <nvpair name="config" value="/PATH/TO/CONF"/>
-+                    </instance_attributes>
-+                </primitive>
-+            </group>
-         ''')
- 
--        mock_resource_remove, _ = self.call(
--            resources_section,
--            remove_multiple=True
--        )
-+        mock_resource_remove = self.call(group.getchildren()[1:])
-         self.assertEqual(
-             mock_resource_remove.mock_calls, [
-                 mock.call('ip'),
-@@ -159,6 +111,41 @@ class RemoveFromClusterTest(TestCase):
-             ]
-         )
- 
-+class CreateInClusterTest(TestCase):
-+    def test_remove_ip_when_booth_resource_add_failed(self):
-+        mock_resource_create = mock.Mock(side_effect=[None, SystemExit(1)])
-+        mock_resource_remove = mock.Mock()
-+        mock_create_id = mock.Mock(side_effect=["ip_id","booth_id","group_id"])
-+        ip = "1.2.3.4"
-+        booth_config_file_path = rc("/path/to/booth.conf")
-+
-+        booth_resource.get_creator(mock_resource_create, mock_resource_remove)(
-+            ip,
-+            booth_config_file_path,
-+            mock_create_id
-+        )
-+        self.assertEqual(mock_resource_create.mock_calls, [
-+            mock.call(
-+                clone_opts=[],
-+                group=u'group_id',
-+                meta_values=[],
-+                op_values=[],
-+                ra_id=u'ip_id',
-+                ra_type=u'ocf:heartbeat:IPaddr2',
-+                ra_values=[u'ip=1.2.3.4'],
-+            ),
-+            mock.call(
-+                clone_opts=[],
-+                group='group_id',
-+                meta_values=[],
-+                op_values=[],
-+                ra_id='booth_id',
-+                ra_type='ocf:pacemaker:booth-site',
-+                ra_values=['config=/path/to/booth.conf'],
-+            )
-+        ])
-+        mock_resource_remove.assert_called_once_with("ip_id")
-+
- 
- class FindBindedIpTest(TestCase):
-     def fixture_resource_section(self, ip_element_list):
-diff --git a/pcs/lib/booth/test/test_sync.py b/pcs/lib/booth/test/test_sync.py
-index 58500cc..9ba6e80 100644
---- a/pcs/lib/booth/test/test_sync.py
-+++ b/pcs/lib/booth/test/test_sync.py
-@@ -74,7 +74,7 @@ class SetConfigOnNodeTest(TestCase):
-             self.mock_rep.report_item_list,
-             [(
-                 Severities.INFO,
--                report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+                report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
-                 {
-                     "node": self.node.label,
-                     "name": "cfg_name",
-@@ -104,7 +104,7 @@ class SetConfigOnNodeTest(TestCase):
-             self.mock_rep.report_item_list,
-             [(
-                 Severities.INFO,
--                report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+                report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
-                 {
-                     "node": self.node.label,
-                     "name": "cfg_name",
-@@ -175,8 +175,8 @@ class SyncConfigInCluster(TestCase):
-             self.mock_reporter.report_item_list,
-             [(
-                 Severities.INFO,
--                report_codes.BOOTH_DISTRIBUTING_CONFIG,
--                {"name": "cfg_name"}
-+                report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
-+                {}
-             )]
-         )
- 
-@@ -213,8 +213,8 @@ class SyncConfigInCluster(TestCase):
-             self.mock_reporter.report_item_list,
-             [(
-                 Severities.INFO,
--                report_codes.BOOTH_DISTRIBUTING_CONFIG,
--                {"name": "cfg_name"}
-+                report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
-+                {}
-             )]
-         )
- 
-@@ -252,8 +252,8 @@ class SyncConfigInCluster(TestCase):
-             self.mock_reporter.report_item_list,
-             [(
-                 Severities.INFO,
--                report_codes.BOOTH_DISTRIBUTING_CONFIG,
--                {"name": "cfg_name"}
-+                report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
-+                {}
-             )]
-         )
- 
-@@ -375,12 +375,12 @@ class SendAllConfigToNodeTest(TestCase):
-             [
-                 (
-                     Severities.INFO,
--                    report_codes.BOOTH_CONFIGS_SAVING_ON_NODE,
--                    {"node": self.node.label}
-+                    report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
-+                    {}
-                 ),
-                 (
-                     Severities.INFO,
--                    report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+                    report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
-                     {
-                         "node": self.node.label,
-                         "name": "name1.conf, file1.key, name2.conf, file2.key",
-@@ -489,8 +489,8 @@ class SendAllConfigToNodeTest(TestCase):
-             [
-                 (
-                     Severities.INFO,
--                    report_codes.BOOTH_CONFIGS_SAVING_ON_NODE,
--                    {"node": self.node.label}
-+                    report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
-+                    {}
-                 ),
-                 (
-                     Severities.ERROR,
-@@ -593,8 +593,8 @@ class SendAllConfigToNodeTest(TestCase):
-             [
-                 (
-                     Severities.INFO,
--                    report_codes.BOOTH_CONFIGS_SAVING_ON_NODE,
--                    {"node": self.node.label}
-+                    report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
-+                    {}
-                 ),
-                 (
-                     Severities.WARNING,
-@@ -616,7 +616,7 @@ class SendAllConfigToNodeTest(TestCase):
-                 ),
-                 (
-                     Severities.INFO,
--                    report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+                    report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
-                     {
-                         "node": self.node.label,
-                         "name": "name2.conf, file2.key",
-@@ -652,7 +652,7 @@ class SendAllConfigToNodeTest(TestCase):
-             ),
-             (
-                 Severities.ERROR,
--                report_codes.BOOTH_CONFIG_WRITE_ERROR,
-+                report_codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR,
-                 {
-                     "node": self.node.label,
-                     "name": "name1.conf",
-@@ -661,7 +661,7 @@ class SendAllConfigToNodeTest(TestCase):
-             ),
-             (
-                 Severities.ERROR,
--                report_codes.BOOTH_CONFIG_WRITE_ERROR,
-+                report_codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR,
-                 {
-                     "node": self.node.label,
-                     "name": "file1.key",
-@@ -724,12 +724,12 @@ class SendAllConfigToNodeTest(TestCase):
-             [
-                 (
-                     Severities.INFO,
--                    report_codes.BOOTH_CONFIGS_SAVING_ON_NODE,
--                    {"node": self.node.label}
-+                    report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
-+                    {}
-                 ),
-                 (
-                     Severities.ERROR,
--                    report_codes.BOOTH_CONFIG_WRITE_ERROR,
-+                    report_codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR,
-                     {
-                         "node": self.node.label,
-                         "name": "name1.conf",
-@@ -738,7 +738,7 @@ class SendAllConfigToNodeTest(TestCase):
-                 ),
-                 (
-                     Severities.ERROR,
--                    report_codes.BOOTH_CONFIG_WRITE_ERROR,
-+                    report_codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR,
-                     {
-                         "node": self.node.label,
-                         "name": "file1.key",
-@@ -1058,12 +1058,12 @@ class SendAllConfigToNodeTest(TestCase):
-             [
-                 (
-                     Severities.INFO,
--                    report_codes.BOOTH_CONFIGS_SAVING_ON_NODE,
--                    {"node": self.node.label}
-+                    report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
-+                    {}
-                 ),
-                 (
-                     Severities.INFO,
--                    report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+                    report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
-                     {
-                         "node": self.node.label,
-                         "name": "name1.conf, name2.conf, file2.key",
-@@ -1143,8 +1143,8 @@ class SendAllConfigToNodeTest(TestCase):
-             [
-                 (
-                     Severities.INFO,
--                    report_codes.BOOTH_CONFIGS_SAVING_ON_NODE,
--                    {"node": self.node.label}
-+                    report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
-+                    {}
-                 ),
-                 (
-                     Severities.WARNING,
-@@ -1155,7 +1155,7 @@ class SendAllConfigToNodeTest(TestCase):
-                 ),
-                 (
-                     Severities.INFO,
--                    report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+                    report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
-                     {
-                         "node": self.node.label,
-                         "name": "name2.conf, file2.key",
-diff --git a/pcs/lib/commands/booth.py b/pcs/lib/commands/booth.py
-index 43ea9dd..7a3d348 100644
---- a/pcs/lib/commands/booth.py
-+++ b/pcs/lib/commands/booth.py
-@@ -34,11 +34,10 @@ def config_setup(env, booth_configuration, overwrite_existing=False):
-     list arbitrator_list contains arbitrator adresses of multisite
-     """
- 
-+    config_content = config_exchange.from_exchange_format(booth_configuration)
-     config_structure.validate_peers(
--        booth_configuration.get("sites", []),
--        booth_configuration.get("arbitrators", [])
-+        *config_structure.take_peers(config_content)
-     )
--    config_content = config_exchange.from_exchange_format(booth_configuration)
- 
-     env.booth.create_key(config_files.generate_key(), overwrite_existing)
-     config_content = config_structure.set_authfile(
-@@ -99,21 +98,34 @@ def config_destroy(env, ignore_config_load_problems=False):
-         env.booth.remove_key()
-     env.booth.remove_config()
- 
--def config_show(env):
-+
-+def config_text(env, name, node_name=None):
-     """
--    return configuration as tuple of sites list and arbitrators list
-+    get configuration in raw format
-+    string name -- name of booth instance whose config should be returned
-+    string node_name -- get the config from specified node or local host if None
-     """
--    return config_exchange.to_exchange_format(
--        parse(env.booth.get_config_content())
-+    if node_name is None:
-+        # TODO add name support
-+        return env.booth.get_config_content()
-+
-+    remote_data = sync.pull_config_from_node(
-+        env.node_communicator(), NodeAddresses(node_name), name
-     )
-+    try:
-+        return remote_data["config"]["data"]
-+    except KeyError:
-+        raise LibraryError(reports.invalid_response_format(node_name))
- 
--def config_ticket_add(env, ticket_name):
-+
-+def config_ticket_add(env, ticket_name, options):
-     """
-     add ticket to booth configuration
-     """
-     booth_configuration = config_structure.add_ticket(
-         parse(env.booth.get_config_content()),
--        ticket_name
-+        ticket_name,
-+        options,
-     )
-     env.booth.push_config(build(booth_configuration))
- 
-@@ -127,7 +139,7 @@ def config_ticket_remove(env, ticket_name):
-     )
-     env.booth.push_config(build(booth_configuration))
- 
--def create_in_cluster(env, name, ip, resource_create):
-+def create_in_cluster(env, name, ip, resource_create, resource_remove):
-     #TODO resource_create is provisional hack until resources are not moved to
-     #lib
-     resources_section = get_resources(env.get_cib())
-@@ -136,7 +148,7 @@ def create_in_cluster(env, name, ip, resource_create):
-     if resource.find_for_config(resources_section, booth_config_file_path):
-         raise LibraryError(booth_reports.booth_already_in_cib(name))
- 
--    resource.get_creator(resource_create)(
-+    resource.get_creator(resource_create, resource_remove)(
-         ip,
-         booth_config_file_path,
-         create_id = partial(
-@@ -146,25 +158,20 @@ def create_in_cluster(env, name, ip, resource_create):
-         )
-     )
- 
--def remove_from_cluster(env, name, resource_remove):
-+def remove_from_cluster(env, name, resource_remove, allow_remove_multiple):
-     #TODO resource_remove is provisional hack until resources are not moved to
-     #lib
--    try:
--        num_of_removed_booth_resources = resource.get_remover(resource_remove)(
--            get_resources(env.get_cib()),
--            get_config_file_name(name),
--        )
--        if num_of_removed_booth_resources > 1:
--            env.report_processor.process(
--                booth_reports.booth_multiple_times_in_cib(
--                    name,
--                    severity=ReportItemSeverity.WARNING,
--                )
--            )
--    except resource.BoothNotFoundInCib:
--        raise LibraryError(booth_reports.booth_not_exists_in_cib(name))
--    except resource.BoothMultipleOccurenceFoundInCib:
--        raise LibraryError(booth_reports.booth_multiple_times_in_cib(name))
-+    resource.get_remover(resource_remove)(
-+        _find_resource_elements_for_operation(env, name, allow_remove_multiple)
-+    )
-+
-+def restart(env, name, resource_restart, allow_multiple):
-+    #TODO resource_restart is provisional hack until resources are not moved to
-+    #lib
-+    for booth_element in _find_resource_elements_for_operation(
-+        env, name, allow_multiple
-+    ):
-+        resource_restart([booth_element.attrib["id"]])
- 
- def ticket_operation(operation, env, name, ticket, site_ip):
-     if not site_ip:
-@@ -314,7 +321,7 @@ def pull_config(env, node_name, name):
-     name -- string, name of booth instance of which config should be fetched
-     """
-     env.report_processor.process(
--        booth_reports.booth_fetching_config_from_node(node_name, name)
-+        booth_reports.booth_fetching_config_from_node_started(node_name, name)
-     )
-     output = sync.pull_config_from_node(
-         env.node_communicator(), NodeAddresses(node_name), name
-@@ -335,7 +342,7 @@ def pull_config(env, node_name, name):
-                 True
-             )
-         env.report_processor.process(
--            booth_reports.booth_config_saved(name_list=[name])
-+            booth_reports.booth_config_accepted_by_node(name_list=[name])
-         )
-     except KeyError:
-         raise LibraryError(reports.invalid_response_format(node_name))
-@@ -347,3 +354,24 @@ def get_status(env, name=None):
-         "ticket": status.get_tickets_status(env.cmd_runner(), name),
-         "peers": status.get_peers_status(env.cmd_runner(), name),
-     }
-+
-+def _find_resource_elements_for_operation(env, name, allow_multiple):
-+    booth_element_list = resource.find_for_config(
-+        get_resources(env.get_cib()),
-+        get_config_file_name(name),
-+    )
-+
-+    if not booth_element_list:
-+        raise LibraryError(booth_reports.booth_not_exists_in_cib(name))
-+
-+    if len(booth_element_list) > 1:
-+        if not allow_multiple:
-+            raise LibraryError(booth_reports.booth_multiple_times_in_cib(name))
-+        env.report_processor.process(
-+            booth_reports.booth_multiple_times_in_cib(
-+                name,
-+                severity=ReportItemSeverity.WARNING,
-+            )
-+        )
-+
-+    return booth_element_list
-diff --git a/pcs/lib/commands/test/test_booth.py b/pcs/lib/commands/test/test_booth.py
-index 20bf06a..d2429b6 100644
---- a/pcs/lib/commands/test/test_booth.py
-+++ b/pcs/lib/commands/test/test_booth.py
-@@ -19,7 +19,6 @@ from pcs.test.tools.assertions import (
- 
- from pcs import settings
- from pcs.common import report_codes
--from pcs.lib.booth import resource as booth_resource
- from pcs.lib.env import LibraryEnvironment
- from pcs.lib.node import NodeAddresses
- from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities
-@@ -48,10 +47,10 @@ class ConfigSetupTest(TestCase):
-         env = mock.MagicMock()
-         commands.config_setup(
-             env,
--            booth_configuration={
--                "sites": ["1.1.1.1"],
--                "arbitrators": ["2.2.2.2"],
--            },
-+            booth_configuration=[
-+                {"key": "site", "value": "1.1.1.1", "details": []},
-+                {"key": "arbitrator", "value": "2.2.2.2", "details": []},
-+            ],
-         )
-         env.booth.create_config.assert_called_once_with(
-             "config content",
-@@ -426,7 +425,7 @@ class PullConfigTest(TestCase):
-                 ),
-                 (
-                     Severities.INFO,
--                    report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+                    report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
-                     {
-                         "node": None,
-                         "name": "name",
-@@ -467,7 +466,7 @@ class PullConfigTest(TestCase):
-                 ),
-                 (
-                     Severities.INFO,
--                    report_codes.BOOTH_CONFIGS_SAVED_ON_NODE,
-+                    report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
-                     {
-                         "node": None,
-                         "name": "name",
-@@ -548,7 +547,8 @@ class CreateInClusterTest(TestCase):
-     def test_raises_when_is_created_already(self):
-         assert_raise_library_error(
-             lambda: commands.create_in_cluster(
--                mock.MagicMock(), "somename", ip="1.2.3.4", resource_create=None
-+                mock.MagicMock(), "somename", ip="1.2.3.4",
-+                resource_create=None, resource_remove=None,
-             ),
-             (
-                 Severities.ERROR,
-@@ -559,14 +559,14 @@ class CreateInClusterTest(TestCase):
-             ),
-         )
- 
--class RemoveFromClusterTest(TestCase):
--    @patch_commands("resource.get_remover", mock.Mock(return_value = mock.Mock(
--        side_effect=booth_resource.BoothNotFoundInCib()
--    )))
-+class FindResourceElementsForOperationTest(TestCase):
-+    @patch_commands("resource.find_for_config", mock.Mock(return_value=[]))
-     def test_raises_when_no_booth_resource_found(self):
-         assert_raise_library_error(
--            lambda: commands.remove_from_cluster(
--                mock.MagicMock(), "somename", resource_remove=None
-+            lambda: commands._find_resource_elements_for_operation(
-+                mock.MagicMock(),
-+                "somename",
-+                allow_multiple=False
-             ),
-             (
-                 Severities.ERROR,
-@@ -577,13 +577,15 @@ class RemoveFromClusterTest(TestCase):
-             ),
-         )
- 
--    @patch_commands("resource.get_remover", mock.Mock(return_value = mock.Mock(
--        side_effect=booth_resource.BoothMultipleOccurenceFoundInCib()
--    )))
-+    @patch_commands(
-+        "resource.find_for_config", mock.Mock(return_value=["b_el1", "b_el2"])
-+    )
-     def test_raises_when_multiple_booth_resource_found(self):
-         assert_raise_library_error(
--            lambda: commands.remove_from_cluster(
--                mock.MagicMock(), "somename", resource_remove=None
-+            lambda: commands._find_resource_elements_for_operation(
-+                mock.MagicMock(),
-+                "somename",
-+                allow_multiple=False
-             ),
-             (
-                 Severities.ERROR,
-@@ -595,15 +597,15 @@ class RemoveFromClusterTest(TestCase):
-             ),
-         )
- 
--    @patch_commands("resource.get_remover", mock.Mock(return_value = mock.Mock(
--        return_value=2
--    )))
-+    @patch_commands("get_resources", mock.Mock(return_value="resources"))
-+    @patch_commands("resource.get_remover", mock.MagicMock())
-+    @patch_commands("resource.find_for_config", mock.Mock(return_value=[1, 2]))
-     def test_warn_when_multiple_booth_resources_removed(self):
-         report_processor=MockLibraryReportProcessor()
--        commands.remove_from_cluster(
-+        commands._find_resource_elements_for_operation(
-             mock.MagicMock(report_processor=report_processor),
-             "somename",
--            resource_remove=None
-+            allow_multiple=True,
-         )
-         assert_report_item_list_equal(report_processor.report_item_list, [(
-             Severities.WARNING,
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index b3c4877..270ad2d 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -590,8 +590,8 @@ Add new ticket to the current configuration.
- ticket remove <ticket>
- Remove the specified ticket from the current configuration.
- .TP
--config
--Show booth configuration.
-+config [<node>]
-+Show booth configuration from the specified node or from the current node if node not specified.
- .TP
- create ip <address>
- Make the cluster run booth service on the specified ip address as a cluster resource.  Typically this is used to run booth site.
-@@ -599,11 +599,14 @@ Make the cluster run booth service on the specified ip address as a cluster reso
- remove
- Remove booth resources created by the "pcs booth create" command.
- .TP
-+restart
-+Restart booth resources created by the "pcs booth create" command.
-+.TP
- ticket grant <ticket> [<site address>]
--Grant the ticket for the site specified by address.  Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted.
-+Grant the ticket for the site specified by address.  Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted. Cannot be run on an arbitrator.
- .TP
- ticket revoke <ticket> [<site address>]
--Revoke the ticket for the site specified by address.  Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted.
-+Revoke the ticket for the site specified by address.  Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted. Cannot be run on an arbitrator.
- .TP
- status
- Print current status of booth on the local node.
-diff --git a/pcs/test/test_booth.py b/pcs/test/test_booth.py
-index 5ddc06d..3356e71 100644
---- a/pcs/test/test_booth.py
-+++ b/pcs/test/test_booth.py
-@@ -76,10 +76,10 @@ class SetupTest(BoothMixin, unittest.TestCase):
-         self.assert_pcs_success(
-             "booth config",
-             stdout_full=console_report(
-+                "authfile = {0}".format(BOOTH_KEY_FILE),
-                 "site = 1.1.1.1",
-                 "site = 2.2.2.2",
-                 "arbitrator = 3.3.3.3",
--                "authfile = {0}".format(BOOTH_KEY_FILE),
-             )
-         )
-         with open(BOOTH_KEY_FILE) as key_file:
-@@ -187,13 +187,14 @@ class BoothTest(unittest.TestCase, BoothMixin):
- 
- class AddTicketTest(BoothTest):
-     def test_success_add_ticket(self):
--        self.assert_pcs_success("booth ticket add TicketA")
-+        self.assert_pcs_success("booth ticket add TicketA expire=10")
-         self.assert_pcs_success("booth config", stdout_full=console_report(
-+            "authfile = {0}".format(BOOTH_KEY_FILE),
-             "site = 1.1.1.1",
-             "site = 2.2.2.2",
-             "arbitrator = 3.3.3.3",
--            "authfile = {0}".format(BOOTH_KEY_FILE),
-             'ticket = "TicketA"',
-+            "  expire = 10",
-         ))
- 
-     def test_fail_on_bad_ticket_name(self):
-@@ -211,22 +212,33 @@ class AddTicketTest(BoothTest):
-             "\n"
-         )
- 
-+    def test_fail_on_invalid_options(self):
-+        self.assert_pcs_fail(
-+            "booth ticket add TicketA site=a timeout=", console_report(
-+                "Error: invalid booth ticket option 'site', allowed options"
-+                    " are: acquire-after, attr-prereq, before-acquire-handler,"
-+                    " expire, renewal-freq, retries, timeout, weights"
-+                ,
-+                "Error: '' is not a valid timeout value, use no-empty",
-+            )
-+        )
-+
- class RemoveTicketTest(BoothTest):
-     def test_success_remove_ticket(self):
-         self.assert_pcs_success("booth ticket add TicketA")
-         self.assert_pcs_success("booth config", stdout_full=console_report(
-+            "authfile = {0}".format(BOOTH_KEY_FILE),
-             "site = 1.1.1.1",
-             "site = 2.2.2.2",
-             "arbitrator = 3.3.3.3",
--            "authfile = {0}".format(BOOTH_KEY_FILE),
-             'ticket = "TicketA"',
-         ))
-         self.assert_pcs_success("booth ticket remove TicketA")
-         self.assert_pcs_success("booth config", stdout_full=console_report(
-+            "authfile = {0}".format(BOOTH_KEY_FILE),
-             "site = 1.1.1.1",
-             "site = 2.2.2.2",
-             "arbitrator = 3.3.3.3",
--            "authfile = {0}".format(BOOTH_KEY_FILE),
-         ))
- 
-     def test_fail_when_ticket_does_not_exist(self):
-@@ -286,7 +298,6 @@ class RemoveTest(BoothTest):
-             " --force to override"
-         ])
- 
--
-     def test_remove_added_booth_configuration(self):
-         self.assert_pcs_success("resource show", "NO resources configured\n")
-         self.assert_pcs_success("booth create ip 192.168.122.120")
-@@ -301,8 +312,27 @@ class RemoveTest(BoothTest):
-         ])
-         self.assert_pcs_success("resource show", "NO resources configured\n")
- 
--    def test_fail_when_booth_is_not_currently_configured(self):
--        pass
-+
-+    def test_remove_multiple_booth_configuration(self):
-+        self.assert_pcs_success("resource show", "NO resources configured\n")
-+        self.assert_pcs_success("booth create ip 192.168.122.120")
-+        self.assert_pcs_success(
-+            "resource create some-id ocf:pacemaker:booth-site"
-+            " config=/etc/booth/booth.conf"
-+        )
-+        self.assert_pcs_success("resource show", [
-+             " Resource Group: booth-booth-group",
-+             "     booth-booth-ip	(ocf::heartbeat:IPaddr2):	Stopped",
-+             "     booth-booth-service	(ocf::pacemaker:booth-site):	Stopped",
-+             " some-id	(ocf::pacemaker:booth-site):	Stopped",
-+        ])
-+        self.assert_pcs_success("booth remove --force", [
-+            "Warning: found more than one booth instance 'booth' in cib",
-+            "Deleting Resource - booth-booth-ip",
-+            "Deleting Resource (and group) - booth-booth-service",
-+            "Deleting Resource - some-id",
-+        ])
-+
- 
- class TicketGrantTest(BoothTest):
-     def test_failed_when_implicit_site_but_not_correct_confgiuration_in_cib(
-@@ -332,6 +362,7 @@ class ConfigTest(unittest.TestCase, BoothMixin):
-     def setUp(self):
-         shutil.copy(EMPTY_CIB, TEMP_CIB)
-         self.pcs_runner = PcsRunner(TEMP_CIB)
-+
-     def test_fail_when_config_file_do_not_exists(self):
-         ensure_booth_config_not_exists()
-         self.assert_pcs_fail(
-@@ -340,3 +371,33 @@ class ConfigTest(unittest.TestCase, BoothMixin):
-                 BOOTH_CONFIG_FILE
-             )
-         )
-+
-+    def test_too_much_args(self):
-+        self.assert_pcs_fail(
-+            "booth config nodename surplus",
-+            stdout_start="\nUsage: pcs booth <command>\n    config ["
-+        )
-+
-+    def test_show_unsupported_values(self):
-+        ensure_booth_config_not_exists()
-+        self.assert_pcs_success(
-+            "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3"
-+        )
-+        with open(BOOTH_CONFIG_FILE, "a") as config_file:
-+            config_file.write("some = nonsense")
-+        self.assert_pcs_success("booth ticket add TicketA")
-+        with open(BOOTH_CONFIG_FILE, "a") as config_file:
-+            config_file.write("another = nonsense")
-+
-+        self.assert_pcs_success(
-+            "booth config",
-+            stdout_full="\n".join((
-+                "authfile = {0}".format(BOOTH_KEY_FILE),
-+                "site = 1.1.1.1",
-+                "site = 2.2.2.2",
-+                "arbitrator = 3.3.3.3",
-+                "some = nonsense",
-+                'ticket = "TicketA"',
-+                "another = nonsense",
-+            ))
-+        )
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 78e340b..088dec9 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -118,6 +118,7 @@ def generate_completion_tree_from_usage():
-     tree["pcsd"] = generate_tree(pcsd([],False))
-     tree["node"] = generate_tree(node([], False))
-     tree["alert"] = generate_tree(alert([], False))
-+    tree["booth"] = generate_tree(booth([], False))
-     return tree
- 
- def generate_tree(usage_txt):
-@@ -1438,8 +1439,9 @@ Commands:
-     ticket remove <ticket>
-         Remove the specified ticket from the current configuration.
- 
--    config
--        Show booth configuration.
-+    config [<node>]
-+        Show booth configuration from the specified node or from the current
-+        node if node not specified.
- 
-     create ip <address>
-         Make the cluster run booth service on the specified ip address as
-@@ -1448,15 +1450,18 @@ Commands:
-     remove
-         Remove booth resources created by the "pcs booth create" command.
- 
-+    restart
-+        Restart booth resources created by the "pcs booth create" command.
-+
-     ticket grant <ticket> [<site address>]
-         Grant the ticket for the site specified by address.  Site address which
-         has been specified with 'pcs booth create' command is used if
--        'site address' is omitted.
-+        'site address' is omitted. Cannot be run on an arbitrator.
- 
-     ticket revoke <ticket> [<site address>]
-         Revoke the ticket for the site specified by address.  Site address which
-         has been specified with 'pcs booth create' command is used if
--        'site address' is omitted.
-+        'site address' is omitted. Cannot be run on an arbitrator.
- 
-     status
-         Print current status of booth on the local node.
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1308514-03-wider-support-for-booth-configuration-beside-mere.patch b/SOURCES/bz1308514-03-wider-support-for-booth-configuration-beside-mere.patch
deleted file mode 100644
index 95e102c..0000000
--- a/SOURCES/bz1308514-03-wider-support-for-booth-configuration-beside-mere.patch
+++ /dev/null
@@ -1,397 +0,0 @@
-From 8707ba13053e172d148ec12820a4259ffa371000 Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Wed, 14 Sep 2016 09:04:57 +0200
-Subject: [PATCH] squash bz1308514 Wider support for booth configura
-
-de5edd583d82 correct booth documentation (ticket grant/revoke)
-
-d98e6b04da8d make forceable uknown booth ticket option
----
- pcs/cli/booth/command.py                    |   6 +-
- pcs/cli/booth/test/test_command.py          |   3 +-
- pcs/lib/booth/config_structure.py           |  31 ++++++--
- pcs/lib/booth/test/test_config_structure.py | 107 ++++++++++++++++++++++++----
- pcs/lib/commands/booth.py                   |   7 +-
- pcs/pcs.8                                   |   7 +-
- pcs/test/test_booth.py                      |  17 +++++
- pcs/usage.py                                |  11 +--
- 8 files changed, 161 insertions(+), 28 deletions(-)
-
-diff --git a/pcs/cli/booth/command.py b/pcs/cli/booth/command.py
-index 0b71a01..72b2c73 100644
---- a/pcs/cli/booth/command.py
-+++ b/pcs/cli/booth/command.py
-@@ -64,7 +64,11 @@ def config_ticket_add(lib, arg_list, modifiers):
-     """
-     if not arg_list:
-         raise CmdLineInputError
--    lib.booth.config_ticket_add(arg_list[0], prepare_options(arg_list[1:]))
-+    lib.booth.config_ticket_add(
-+        arg_list[0],
-+        prepare_options(arg_list[1:]),
-+        allow_unknown_options=modifiers["force"]
-+    )
- 
- def config_ticket_remove(lib, arg_list, modifiers):
-     """
-diff --git a/pcs/cli/booth/test/test_command.py b/pcs/cli/booth/test/test_command.py
-index 44d7a12..8ba2c0e 100644
---- a/pcs/cli/booth/test/test_command.py
-+++ b/pcs/cli/booth/test/test_command.py
-@@ -45,9 +45,10 @@ class ConfigTicketAddTest(TestCase):
-         command.config_ticket_add(
-             lib,
-             arg_list=["TICKET_A", "timeout=10"],
--            modifiers={}
-+            modifiers={"force": True}
-         )
-         lib.booth.config_ticket_add.assert_called_once_with(
-             "TICKET_A",
-             {"timeout": "10"},
-+            allow_unknown_options=True
-         )
-diff --git a/pcs/lib/booth/config_structure.py b/pcs/lib/booth/config_structure.py
-index 8977b7a..09ff1a7 100644
---- a/pcs/lib/booth/config_structure.py
-+++ b/pcs/lib/booth/config_structure.py
-@@ -9,7 +9,8 @@ import re
- 
- import pcs.lib.reports as common_reports
- from pcs.lib.booth import reports
--from pcs.lib.errors import LibraryError
-+from pcs.lib.errors import LibraryError, ReportItemSeverity as severities
-+from pcs.common import report_codes
- from collections import namedtuple
- 
- GLOBAL_KEYS = (
-@@ -83,10 +84,13 @@ def remove_ticket(booth_configuration, ticket_name):
-         if config_item.key != "ticket" or config_item.value != ticket_name
-     ]
- 
--def add_ticket(booth_configuration, ticket_name, options):
-+def add_ticket(
-+    report_processor, booth_configuration, ticket_name, options,
-+    allow_unknown_options
-+):
-     validate_ticket_name(ticket_name)
-     validate_ticket_unique(booth_configuration, ticket_name)
--    validate_ticket_options(options)
-+    validate_ticket_options(report_processor, options, allow_unknown_options)
-     return booth_configuration + [
-         ConfigItem("ticket", ticket_name, [
-             ConfigItem(key, value) for key, value in options.items()
-@@ -101,7 +105,7 @@ def validate_ticket_unique(booth_configuration, ticket_name):
-     if ticket_exists(booth_configuration, ticket_name):
-         raise LibraryError(reports.booth_ticket_duplicate(ticket_name))
- 
--def validate_ticket_options(options):
-+def validate_ticket_options(report_processor, options, allow_unknown_options):
-     reports = []
-     for key in sorted(options):
-         if key in GLOBAL_KEYS:
-@@ -109,6 +113,22 @@ def validate_ticket_options(options):
-                 common_reports.invalid_option(key, TICKET_KEYS, "booth ticket")
-             )
- 
-+        elif key not in TICKET_KEYS:
-+            reports.append(
-+                common_reports.invalid_option(
-+                    key, TICKET_KEYS,
-+                    "booth ticket",
-+                    severity=(
-+                        severities.WARNING if allow_unknown_options
-+                        else severities.ERROR
-+                    ),
-+                    forceable=(
-+                        None if allow_unknown_options
-+                        else report_codes.FORCE_OPTIONS
-+                    ),
-+                )
-+            )
-+
-         if not options[key].strip():
-             reports.append(common_reports.invalid_option_value(
-                 key,
-@@ -116,8 +136,7 @@ def validate_ticket_options(options):
-                 "no-empty",
-             ))
- 
--    if reports:
--        raise LibraryError(*reports)
-+    report_processor.process_list(reports)
- 
- def ticket_exists(booth_configuration, ticket_name):
-     return any(
-diff --git a/pcs/lib/booth/test/test_config_structure.py b/pcs/lib/booth/test/test_config_structure.py
-index 5e7ac68..40618b2 100644
---- a/pcs/lib/booth/test/test_config_structure.py
-+++ b/pcs/lib/booth/test/test_config_structure.py
-@@ -10,7 +10,11 @@ from pcs.test.tools.pcs_unittest import TestCase
- from pcs.common import report_codes
- from pcs.lib.booth import config_structure
- from pcs.lib.errors import ReportItemSeverity as severities
--from pcs.test.tools.assertions import assert_raise_library_error
-+from pcs.test.tools.assertions import (
-+    assert_raise_library_error,
-+    assert_report_item_list_equal,
-+)
-+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
- from pcs.test.tools.pcs_unittest import mock
- 
- 
-@@ -49,12 +53,8 @@ class ValidateTicketUniqueTest(TestCase):
- 
- class ValidateTicketOptionsTest(TestCase):
-     def test_raises_on_invalid_options(self):
--        assert_raise_library_error(
--            lambda: config_structure.validate_ticket_options({
--                "site": "a",
--                "port": "b",
--                "timeout": " ",
--            }),
-+        report_processor = MockLibraryReportProcessor()
-+        expected_errors = [
-             (
-                 severities.ERROR,
-                 report_codes.INVALID_OPTION,
-@@ -82,10 +82,81 @@ class ValidateTicketOptionsTest(TestCase):
-                     "allowed_values": "no-empty",
-                 },
-             ),
-+            (
-+                severities.ERROR,
-+                report_codes.INVALID_OPTION,
-+                {
-+                    "option_name": "unknown",
-+                    "option_type": "booth ticket",
-+                    "allowed": list(config_structure.TICKET_KEYS),
-+                },
-+                report_codes.FORCE_OPTIONS
-+            ),
-+        ]
-+        assert_raise_library_error(
-+            lambda: config_structure.validate_ticket_options(
-+                report_processor,
-+                {
-+                    "site": "a",
-+                    "port": "b",
-+                    "timeout": " ",
-+                    "unknown": "c",
-+                },
-+                allow_unknown_options=False,
-+            ),
-+            *expected_errors
-+        )
-+        assert_report_item_list_equal(
-+            report_processor.report_item_list,
-+            expected_errors
-+        )
-+
-+    def test_unknown_options_are_forceable(self):
-+        report_processor = MockLibraryReportProcessor()
-+        expected_errors = [
-+            (
-+                severities.ERROR,
-+                report_codes.INVALID_OPTION,
-+                {
-+                    "option_name": "site",
-+                    "option_type": "booth ticket",
-+                    "allowed": list(config_structure.TICKET_KEYS),
-+                },
-+            ),
-+        ]
-+        assert_raise_library_error(
-+            lambda: config_structure.validate_ticket_options(
-+                report_processor, {
-+                    "site": "a",
-+                    "unknown": "c",
-+                },
-+                allow_unknown_options=True,
-+            ),
-+            *expected_errors
-+        )
-+        assert_report_item_list_equal(
-+            report_processor.report_item_list,
-+            expected_errors + [
-+                (
-+                    severities.WARNING,
-+                    report_codes.INVALID_OPTION,
-+                    {
-+                        "option_name": "unknown",
-+                        "option_type": "booth ticket",
-+                        "allowed": list(config_structure.TICKET_KEYS),
-+                    },
-+                ),
-+            ]
-         )
- 
-     def test_success_on_valid_options(self):
--        config_structure.validate_ticket_options({"timeout": "10"})
-+        report_processor = MockLibraryReportProcessor()
-+        config_structure.validate_ticket_options(
-+            report_processor,
-+            {"timeout": "10"},
-+            allow_unknown_options=False,
-+        )
-+        assert_report_item_list_equal(report_processor.report_item_list, [])
- 
- class TicketExistsTest(TestCase):
-     def test_returns_true_if_ticket_in_structure(self):
-@@ -214,18 +285,25 @@ class RemoveTicketTest(TestCase):
-         )
- 
- class AddTicketTest(TestCase):
-+    @mock.patch("pcs.lib.booth.config_structure.validate_ticket_options")
-     @mock.patch("pcs.lib.booth.config_structure.validate_ticket_unique")
-     @mock.patch("pcs.lib.booth.config_structure.validate_ticket_name")
-     def test_successfully_add_ticket(
--        self, mock_validate_name, mock_validate_uniq
-+        self, mock_validate_name, mock_validate_uniq, mock_validate_options
-     ):
-         configuration = [
-             config_structure.ConfigItem("ticket", "some-ticket"),
-         ]
-+
-         self.assertEqual(
--            config_structure.add_ticket(configuration, "new-ticket", {
--                "timeout": "10",
--            }),
-+            config_structure.add_ticket(
-+                None, configuration,
-+                "new-ticket",
-+                {
-+                    "timeout": "10",
-+                },
-+                allow_unknown_options=False,
-+            ),
-             [
-                 config_structure.ConfigItem("ticket", "some-ticket"),
-                 config_structure.ConfigItem("ticket", "new-ticket", [
-@@ -236,6 +314,11 @@ class AddTicketTest(TestCase):
- 
-         mock_validate_name.assert_called_once_with("new-ticket")
-         mock_validate_uniq.assert_called_once_with(configuration, "new-ticket")
-+        mock_validate_options.assert_called_once_with(
-+            None,
-+            {"timeout": "10"},
-+            False
-+        )
- 
- class SetAuthfileTest(TestCase):
-     def test_add_authfile(self):
-diff --git a/pcs/lib/commands/booth.py b/pcs/lib/commands/booth.py
-index bea966c..705900a 100644
---- a/pcs/lib/commands/booth.py
-+++ b/pcs/lib/commands/booth.py
-@@ -119,14 +119,19 @@ def config_text(env, name, node_name=None):
-         raise LibraryError(reports.invalid_response_format(node_name))
- 
- 
--def config_ticket_add(env, ticket_name, options):
-+def config_ticket_add(env, ticket_name, options, allow_unknown_options):
-     """
-     add ticket to booth configuration
-+    dict options contains options for ticket
-+    bool allow_unknown_options decide if can be used options not listed in
-+        ticket options nor global options
-     """
-     booth_configuration = config_structure.add_ticket(
-+        env.report_processor,
-         parse(env.booth.get_config_content()),
-         ticket_name,
-         options,
-+        allow_unknown_options,
-     )
-     env.booth.push_config(build(booth_configuration))
- 
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 270ad2d..61abe67 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -585,7 +585,8 @@ destroy
- Remove booth configuration files.
- .TP
- ticket add <ticket>
--Add new ticket to the current configuration.
-+Add new ticket to the current configuration. Ticket options are specified in booth manpage.
-+
- .TP
- ticket remove <ticket>
- Remove the specified ticket from the current configuration.
-@@ -603,10 +604,10 @@ restart
- Restart booth resources created by the "pcs booth create" command.
- .TP
- ticket grant <ticket> [<site address>]
--Grant the ticket for the site specified by address.  Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted. Cannot be run on an arbitrator.
-+Grant the ticket for the site specified by address.  Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted.  Specifying site address is mandatory when running this command on an arbitrator.
- .TP
- ticket revoke <ticket> [<site address>]
--Revoke the ticket for the site specified by address.  Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted. Cannot be run on an arbitrator.
-+Revoke the ticket for the site specified by address.  Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted.  Specifying site address is mandatory when running this command on an arbitrator.
- .TP
- status
- Print current status of booth on the local node.
-diff --git a/pcs/test/test_booth.py b/pcs/test/test_booth.py
-index 3356e71..c12391b 100644
---- a/pcs/test/test_booth.py
-+++ b/pcs/test/test_booth.py
-@@ -223,6 +223,23 @@ class AddTicketTest(BoothTest):
-             )
-         )
- 
-+    def test_forceable_fail_on_unknown_options(self):
-+        msg = (
-+            "invalid booth ticket option 'unknown', allowed options"
-+            " are: acquire-after, attr-prereq, before-acquire-handler,"
-+            " expire, renewal-freq, retries, timeout, weights"
-+        )
-+        self.assert_pcs_fail(
-+            "booth ticket add TicketA unknown=a", console_report(
-+                "Error: "+msg+", use --force to override",
-+            )
-+        )
-+        self.assert_pcs_success(
-+            "booth ticket add TicketA unknown=a --force",
-+            "Warning: {0}\n".format(msg),
-+        )
-+
-+
- class RemoveTicketTest(BoothTest):
-     def test_success_remove_ticket(self):
-         self.assert_pcs_success("booth ticket add TicketA")
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 088dec9..9d4617f 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -1433,8 +1433,9 @@ Commands:
-     destroy
-         Remove booth configuration files.
- 
--    ticket add <ticket>
--        Add new ticket to the current configuration.
-+    ticket add <ticket> [<name>=<value> ...]
-+        Add new ticket to the current configuration. Ticket options are
-+        specified in booth manpage.
- 
-     ticket remove <ticket>
-         Remove the specified ticket from the current configuration.
-@@ -1456,12 +1457,14 @@ Commands:
-     ticket grant <ticket> [<site address>]
-         Grant the ticket for the site specified by address.  Site address which
-         has been specified with 'pcs booth create' command is used if
--        'site address' is omitted. Cannot be run on an arbitrator.
-+        'site address' is omitted.  Specifying site address is mandatory when
-+        running this command on an arbitrator.
- 
-     ticket revoke <ticket> [<site address>]
-         Revoke the ticket for the site specified by address.  Site address which
-         has been specified with 'pcs booth create' command is used if
--        'site address' is omitted. Cannot be run on an arbitrator.
-+        'site address' is omitted.  Specifying site address is mandatory when
-+        running this command on an arbitrator.
- 
-     status
-         Print current status of booth on the local node.
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1315371-01-add-support-for-pacemaker-alerts.patch b/SOURCES/bz1315371-01-add-support-for-pacemaker-alerts.patch
deleted file mode 100644
index 969a63b..0000000
--- a/SOURCES/bz1315371-01-add-support-for-pacemaker-alerts.patch
+++ /dev/null
@@ -1,4195 +0,0 @@
-From ae514b04a95cadb3ac1819a9097dbee694f4596b Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Tue, 21 Jun 2016 15:23:07 +0200
-Subject: [PATCH] bz1315371-01-add support for pacemaker alerts
-
----
- .pylintrc                            |   2 +-
- pcs/alert.py                         | 237 +++++++++
- pcs/app.py                           |   6 +
- pcs/cli/common/env.py                |   1 +
- pcs/cli/common/lib_wrapper.py        |  28 +-
- pcs/cli/common/middleware.py         |   2 +-
- pcs/common/report_codes.py           |   6 +
- pcs/config.py                        |   4 +
- pcs/lib/cib/alert.py                 | 281 +++++++++++
- pcs/lib/cib/nvpair.py                |  90 ++++
- pcs/lib/cib/test/test_alert.py       | 931 +++++++++++++++++++++++++++++++++++
- pcs/lib/cib/test/test_nvpair.py      | 206 ++++++++
- pcs/lib/cib/tools.py                 | 127 +++++
- pcs/lib/commands/alert.py            | 169 +++++++
- pcs/lib/commands/test/test_alert.py  | 639 ++++++++++++++++++++++++
- pcs/lib/commands/test/test_ticket.py |   2 +-
- pcs/lib/env.py                       |  32 +-
- pcs/lib/pacemaker.py                 |  17 +-
- pcs/lib/reports.py                   |  91 ++++
- pcs/pcs.8                            |  25 +
- pcs/test/resources/cib-empty-2.5.xml |  10 +
- pcs/test/test_alert.py               | 363 ++++++++++++++
- pcs/test/test_lib_cib_tools.py       | 181 ++++++-
- pcs/test/test_lib_env.py             | 140 +++++-
- pcs/test/test_lib_pacemaker.py       |  24 +-
- pcs/test/test_resource.py            |   6 +
- pcs/test/test_stonith.py             |   3 +
- pcs/test/tools/color_text_runner.py  |  10 +
- pcs/usage.py                         |  43 ++
- pcs/utils.py                         |   9 +-
- 30 files changed, 3649 insertions(+), 36 deletions(-)
- create mode 100644 pcs/alert.py
- create mode 100644 pcs/lib/cib/alert.py
- create mode 100644 pcs/lib/cib/nvpair.py
- create mode 100644 pcs/lib/cib/test/test_alert.py
- create mode 100644 pcs/lib/cib/test/test_nvpair.py
- create mode 100644 pcs/lib/commands/alert.py
- create mode 100644 pcs/lib/commands/test/test_alert.py
- create mode 100644 pcs/test/resources/cib-empty-2.5.xml
- create mode 100644 pcs/test/test_alert.py
-
-diff --git a/.pylintrc b/.pylintrc
-index 661f3d2..e378e6a 100644
---- a/.pylintrc
-+++ b/.pylintrc
-@@ -92,7 +92,7 @@ dummy-variables-rgx=_$|dummy
- 
- [FORMAT]
- # Maximum number of lines in a module
--max-module-lines=4571
-+max-module-lines=4577
- # Maximum number of characters on a single line.
- max-line-length=1291
- 
-diff --git a/pcs/alert.py b/pcs/alert.py
-new file mode 100644
-index 0000000..d3a6e28
---- /dev/null
-+++ b/pcs/alert.py
-@@ -0,0 +1,237 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+import sys
-+
-+from pcs import (
-+    usage,
-+    utils,
-+)
-+from pcs.cli.common.errors import CmdLineInputError
-+from pcs.cli.common.parse_args import prepare_options
-+from pcs.cli.common.console_report import indent
-+from pcs.lib.errors import LibraryError
-+
-+
-+def alert_cmd(*args):
-+    argv = args[1]
-+    if not argv:
-+        sub_cmd = "config"
-+    else:
-+        sub_cmd = argv.pop(0)
-+    try:
-+        if sub_cmd == "help":
-+            usage.alert(argv)
-+        elif sub_cmd == "create":
-+            alert_add(*args)
-+        elif sub_cmd == "update":
-+            alert_update(*args)
-+        elif sub_cmd == "remove":
-+            alert_remove(*args)
-+        elif sub_cmd == "config" or sub_cmd == "show":
-+            print_alert_config(*args)
-+        elif sub_cmd == "recipient":
-+            recipient_cmd(*args)
-+        else:
-+            raise CmdLineInputError()
-+    except LibraryError as e:
-+        utils.process_library_reports(e.args)
-+    except CmdLineInputError as e:
-+        utils.exit_on_cmdline_input_errror(e, "alert", sub_cmd)
-+
-+
-+def recipient_cmd(*args):
-+    argv = args[1]
-+
-+    if not argv:
-+        usage.alert(["recipient"])
-+        sys.exit(1)
-+
-+    sub_cmd = argv.pop(0)
-+    try:
-+        if sub_cmd == "help":
-+            usage.alert(["recipient"])
-+        elif sub_cmd == "add":
-+            recipient_add(*args)
-+        elif sub_cmd == "update":
-+            recipient_update(*args)
-+        elif sub_cmd == "remove":
-+            recipient_remove(*args)
-+    except CmdLineInputError as e:
-+        utils.exit_on_cmdline_input_errror(
-+            e, "alert", "recipient {0}".format(sub_cmd)
-+        )
-+
-+
-+def parse_cmd_sections(arg_list, section_list):
-+    output = dict([(section, []) for section in section_list + ["main"]])
-+    cur_section = "main"
-+    for arg in arg_list:
-+        if arg in section_list:
-+            cur_section = arg
-+            continue
-+        output[cur_section].append(arg)
-+
-+    return output
-+
-+
-+def ensure_only_allowed_options(parameter_dict, allowed_list):
-+    for arg, value in parameter_dict.items():
-+        if arg not in allowed_list:
-+            raise CmdLineInputError(
-+                "Unexpected parameter '{0}={1}'".format(arg, value)
-+            )
-+
-+
-+def alert_add(lib, argv, modifiers):
-+    if not argv:
-+        raise CmdLineInputError()
-+
-+    sections = parse_cmd_sections(argv, ["options", "meta"])
-+    main_args = prepare_options(sections["main"])
-+    ensure_only_allowed_options(main_args, ["id", "description", "path"])
-+
-+    lib.alert.create_alert(
-+        main_args.get("id", None),
-+        main_args.get("path", None),
-+        prepare_options(sections["options"]),
-+        prepare_options(sections["meta"]),
-+        main_args.get("description", None)
-+    )
-+
-+
-+def alert_update(lib, argv, modifiers):
-+    if not argv:
-+        raise CmdLineInputError()
-+
-+    alert_id = argv[0]
-+
-+    sections = parse_cmd_sections(argv[1:], ["options", "meta"])
-+    main_args = prepare_options(sections["main"])
-+    ensure_only_allowed_options(main_args, ["description", "path"])
-+
-+    lib.alert.update_alert(
-+        alert_id,
-+        main_args.get("path", None),
-+        prepare_options(sections["options"]),
-+        prepare_options(sections["meta"]),
-+        main_args.get("description", None)
-+    )
-+
-+
-+def alert_remove(lib, argv, modifiers):
-+    if len(argv) != 1:
-+        raise CmdLineInputError()
-+
-+    lib.alert.remove_alert(argv[0])
-+
-+
-+def recipient_add(lib, argv, modifiers):
-+    if len(argv) < 2:
-+        raise CmdLineInputError()
-+
-+    alert_id = argv[0]
-+    recipient_value = argv[1]
-+
-+    sections = parse_cmd_sections(argv[2:], ["options", "meta"])
-+    main_args = prepare_options(sections["main"])
-+    ensure_only_allowed_options(main_args, ["description"])
-+
-+    lib.alert.add_recipient(
-+        alert_id,
-+        recipient_value,
-+        prepare_options(sections["options"]),
-+        prepare_options(sections["meta"]),
-+        main_args.get("description", None)
-+    )
-+
-+
-+def recipient_update(lib, argv, modifiers):
-+    if len(argv) < 2:
-+        raise CmdLineInputError()
-+
-+    alert_id = argv[0]
-+    recipient_value = argv[1]
-+
-+    sections = parse_cmd_sections(argv[2:], ["options", "meta"])
-+    main_args = prepare_options(sections["main"])
-+    ensure_only_allowed_options(main_args, ["description"])
-+
-+    lib.alert.update_recipient(
-+        alert_id,
-+        recipient_value,
-+        prepare_options(sections["options"]),
-+        prepare_options(sections["meta"]),
-+        main_args.get("description", None)
-+    )
-+
-+
-+def recipient_remove(lib, argv, modifiers):
-+    if len(argv) != 2:
-+        raise CmdLineInputError()
-+
-+    lib.alert.remove_recipient(argv[0], argv[1])
-+
-+
-+def _nvset_to_str(nvset_obj):
-+    output = []
-+    for nvpair_obj in nvset_obj:
-+        output.append("{key}={value}".format(
-+            key=nvpair_obj["name"], value=nvpair_obj["value"]
-+        ))
-+    return " ".join(output)
-+
-+
-+def __description_attributes_to_str(obj):
-+    output = []
-+    if obj.get("description"):
-+        output.append("Description: {desc}".format(desc=obj["description"]))
-+    if obj.get("instance_attributes"):
-+        output.append("Options: {attributes}".format(
-+            attributes=_nvset_to_str(obj["instance_attributes"])
-+        ))
-+    if obj.get("meta_attributes"):
-+        output.append("Meta options: {attributes}".format(
-+            attributes=_nvset_to_str(obj["meta_attributes"])
-+        ))
-+    return output
-+
-+
-+def _alert_to_str(alert):
-+    content = []
-+    content.extend(__description_attributes_to_str(alert))
-+
-+    recipients = []
-+    for recipient in alert.get("recipient_list", []):
-+        recipients.extend( _recipient_to_str(recipient))
-+
-+    if recipients:
-+        content.append("Recipients:")
-+        content.extend(indent(recipients, 1))
-+
-+    return ["Alert: {alert_id} (path={path})".format(
-+        alert_id=alert["id"], path=alert["path"]
-+    )] + indent(content, 1)
-+
-+
-+def _recipient_to_str(recipient):
-+    return ["Recipient: {value}".format(value=recipient["value"])] + indent(
-+        __description_attributes_to_str(recipient), 1
-+    )
-+
-+
-+def print_alert_config(lib, argv, modifiers):
-+    if argv:
-+        raise CmdLineInputError()
-+
-+    print("Alerts:")
-+    alert_list = lib.alert.get_all_alerts()
-+    if alert_list:
-+        for alert in alert_list:
-+            print("\n".join(indent(_alert_to_str(alert), 1)))
-+    else:
-+        print(" No alerts defined")
-diff --git a/pcs/app.py b/pcs/app.py
-index 3c4865f..3758ee4 100644
---- a/pcs/app.py
-+++ b/pcs/app.py
-@@ -27,6 +27,7 @@ from pcs import (
-     stonith,
-     usage,
-     utils,
-+    alert,
- )
- 
- from pcs.cli.common import completion
-@@ -193,6 +194,11 @@ def main(argv=None):
-             argv,
-             utils.get_modificators()
-         ),
-+        "alert": lambda args: alert.alert_cmd(
-+            utils.get_library_wrapper(),
-+            args,
-+            utils.get_modificators()
-+        ),
-     }
-     if command not in cmd_map:
-         usage.main()
-diff --git a/pcs/cli/common/env.py b/pcs/cli/common/env.py
-index f407981..2ba4f70 100644
---- a/pcs/cli/common/env.py
-+++ b/pcs/cli/common/env.py
-@@ -8,6 +8,7 @@ from __future__ import (
- class Env(object):
-     def __init__(self):
-         self.cib_data = None
-+        self.cib_upgraded = False
-         self.user = None
-         self.groups = None
-         self.corosync_conf_data = None
-diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
-index 909b435..2ba5602 100644
---- a/pcs/cli/common/lib_wrapper.py
-+++ b/pcs/cli/common/lib_wrapper.py
-@@ -19,6 +19,7 @@ from pcs.lib.commands import (
-     quorum,
-     qdevice,
-     sbd,
-+    alert,
- )
- from pcs.cli.common.reports import (
-     LibraryReportProcessorToConsole as LibraryReportProcessorToConsole,
-@@ -42,6 +43,14 @@ def cli_env_to_lib_env(cli_env):
-         cli_env.auth_tokens_getter,
-     )
- 
-+def lib_env_to_cli_env(lib_env, cli_env):
-+    if not lib_env.is_cib_live:
-+        cli_env.cib_data = lib_env._get_cib_xml()
-+        cli_env.cib_upgraded = lib_env.cib_upgraded
-+    if not lib_env.is_corosync_conf_live:
-+        cli_env.corosync_conf_data = lib_env.get_corosync_conf_data()
-+    return cli_env
-+
- def bind(cli_env, run_with_middleware, run_library_command):
-     def run(cli_env, *args, **kwargs):
-         lib_env = cli_env_to_lib_env(cli_env)
-@@ -50,10 +59,7 @@ def bind(cli_env, run_with_middleware, run_library_command):
- 
-         #midlewares needs finish its work and they see only cli_env
-         #so we need reflect some changes to cli_env
--        if not lib_env.is_cib_live:
--            cli_env.cib_data = lib_env.get_cib_xml()
--        if not lib_env.is_corosync_conf_live:
--            cli_env.corosync_conf_data = lib_env.get_corosync_conf_data()
-+        lib_env_to_cli_env(lib_env, cli_env)
- 
-         return lib_call_result
-     return partial(run_with_middleware, run, cli_env)
-@@ -140,6 +146,20 @@ def load_module(env, middleware_factory, name):
-                 "get_local_sbd_config": sbd.get_local_sbd_config,
-             }
-         )
-+    if name == "alert":
-+        return bind_all(
-+            env,
-+            middleware.build(middleware_factory.cib),
-+            {
-+                "create_alert": alert.create_alert,
-+                "update_alert": alert.update_alert,
-+                "remove_alert": alert.remove_alert,
-+                "add_recipient": alert.add_recipient,
-+                "update_recipient": alert.update_recipient,
-+                "remove_recipient": alert.remove_recipient,
-+                "get_all_alerts": alert.get_all_alerts,
-+            }
-+        )
- 
-     raise Exception("No library part '{0}'".format(name))
- 
-diff --git a/pcs/cli/common/middleware.py b/pcs/cli/common/middleware.py
-index 16618e1..e53e138 100644
---- a/pcs/cli/common/middleware.py
-+++ b/pcs/cli/common/middleware.py
-@@ -34,7 +34,7 @@ def cib(use_local_cib, load_cib_content, write_cib):
-         result_of_next = next_in_line(env, *args, **kwargs)
- 
-         if use_local_cib:
--            write_cib(env.cib_data)
-+            write_cib(env.cib_data, env.cib_upgraded)
- 
-         return result_of_next
-     return apply
-diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
-index 927df35..bda982a 100644
---- a/pcs/common/report_codes.py
-+++ b/pcs/common/report_codes.py
-@@ -20,11 +20,17 @@ SKIP_OFFLINE_NODES = "SKIP_OFFLINE_NODES"
- AGENT_GENERAL_ERROR = "AGENT_GENERAL_ERROR"
- AGENT_NOT_FOUND = "AGENT_NOT_FOUND"
- BAD_CLUSTER_STATE_FORMAT = 'BAD_CLUSTER_STATE_FORMAT'
-+CIB_ALERT_NOT_FOUND = "CIB_ALERT_NOT_FOUND"
-+CIB_ALERT_RECIPIENT_ALREADY_EXISTS = "CIB_ALERT_RECIPIENT_ALREADY_EXISTS"
-+CIB_ALERT_RECIPIENT_NOT_FOUND = "CIB_ALERT_RECIPIENT_NOT_FOUND"
- CIB_CANNOT_FIND_MANDATORY_SECTION = "CIB_CANNOT_FIND_MANDATORY_SECTION"
- CIB_LOAD_ERROR_BAD_FORMAT = "CIB_LOAD_ERROR_BAD_FORMAT"
- CIB_LOAD_ERROR = "CIB_LOAD_ERROR"
- CIB_LOAD_ERROR_SCOPE_MISSING = "CIB_LOAD_ERROR_SCOPE_MISSING"
- CIB_PUSH_ERROR = "CIB_PUSH_ERROR"
-+CIB_UPGRADE_FAILED = "CIB_UPGRADE_FAILED"
-+CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION = "CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION"
-+CIB_UPGRADE_SUCCESSFUL = "CIB_UPGRADE_SUCCESSFUL"
- CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES = "CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES"
- CMAN_BROADCAST_ALL_RINGS = 'CMAN_BROADCAST_ALL_RINGS'
- CMAN_UDPU_RESTART_REQUIRED = 'CMAN_UDPU_RESTART_REQUIRED'
-diff --git a/pcs/config.py b/pcs/config.py
-index 51de822..4659c5b 100644
---- a/pcs/config.py
-+++ b/pcs/config.py
-@@ -38,6 +38,7 @@ from pcs import (
-     stonith,
-     usage,
-     utils,
-+    alert,
- )
- from pcs.lib.errors import LibraryError
- from pcs.lib.commands import quorum as lib_quorum
-@@ -123,6 +124,9 @@ def config_show_cib():
-     ticket_command.show(lib, [], modificators)
- 
-     print()
-+    alert.print_alert_config(lib, [], modificators)
-+
-+    print()
-     del utils.pcs_options["--all"]
-     print("Resources Defaults:")
-     resource.show_defaults("rsc_defaults", indent=" ")
-diff --git a/pcs/lib/cib/alert.py b/pcs/lib/cib/alert.py
-new file mode 100644
-index 0000000..6b72996
---- /dev/null
-+++ b/pcs/lib/cib/alert.py
-@@ -0,0 +1,281 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from lxml import etree
-+
-+from pcs.lib import reports
-+from pcs.lib.errors import LibraryError
-+from pcs.lib.cib.nvpair import update_nvset, get_nvset
-+from pcs.lib.cib.tools import (
-+    check_new_id_applicable,
-+    get_sub_element,
-+    find_unique_id,
-+    get_alerts,
-+)
-+
-+
-+def update_instance_attributes(tree, element, attribute_dict):
-+    """
-+    Updates instance attributes of element. Returns updated instance
-+    attributes element.
-+
-+    tree -- cib etree node
-+    element -- parent element of instance attributes
-+    attribute_dict -- dictionary of nvpairs
-+    """
-+    return update_nvset("instance_attributes", tree, element, attribute_dict)
-+
-+
-+def update_meta_attributes(tree, element, attribute_dict):
-+    """
-+    Updates meta attributes of element. Returns updated meta attributes element.
-+
-+    tree -- cib etree node
-+    element -- parent element of meta attributes
-+    attribute_dict -- dictionary of nvpairs
-+    """
-+    return update_nvset("meta_attributes", tree, element, attribute_dict)
-+
-+
-+def _update_optional_attribute(element, attribute, value):
-+    """
-+    Update optional attribute of element. Remove existing element if value
-+    is empty.
-+
-+    element -- parent element of specified attribute
-+    attribute -- attribute to be updated
-+    value -- new value
-+    """
-+    if value is None:
-+        return
-+    if value:
-+        element.set(attribute, value)
-+    elif attribute in element.attrib:
-+        del element.attrib[attribute]
-+
-+
-+def get_alert_by_id(tree, alert_id):
-+    """
-+    Returns alert element with specified id.
-+    Raises AlertNotFound if alert with specified id doesn't exist.
-+
-+    tree -- cib etree node
-+    alert_id -- id of alert
-+    """
-+    alert = get_alerts(tree).find("./alert[@id='{0}']".format(alert_id))
-+    if alert is None:
-+        raise LibraryError(reports.cib_alert_not_found(alert_id))
-+    return alert
-+
-+
-+def get_recipient(alert, recipient_value):
-+    """
-+    Returns recipient element with value recipient_value which belong to
-+    specified alert.
-+    Raises RecipientNotFound if recipient doesn't exist.
-+
-+    alert -- parent element of required recipient
-+    recipient_value -- value of recipient
-+    """
-+    recipient = alert.find(
-+        "./recipient[@value='{0}']".format(recipient_value)
-+    )
-+    if recipient is None:
-+        raise LibraryError(reports.cib_alert_recipient_not_found(
-+            alert.get("id"), recipient_value
-+        ))
-+    return recipient
-+
-+
-+def create_alert(tree, alert_id, path, description=""):
-+    """
-+    Create new alert element. Returns newly created element.
-+    Raises LibraryError if element with specified id already exists.
-+
-+    tree -- cib etree node
-+    alert_id -- id of new alert, it will be generated if it is None
-+    path -- path to script
-+    description -- description
-+    """
-+    if alert_id:
-+        check_new_id_applicable(tree, "alert-id", alert_id)
-+    else:
-+        alert_id = find_unique_id(tree, "alert")
-+
-+    alert = etree.SubElement(get_alerts(tree), "alert", id=alert_id, path=path)
-+    if description:
-+        alert.set("description", description)
-+
-+    return alert
-+
-+
-+def update_alert(tree, alert_id, path, description=None):
-+    """
-+    Update existing alert. Return updated alert element.
-+    Raises AlertNotFound if alert with specified id doesn't exist.
-+
-+    tree -- cib etree node
-+    alert_id -- id of alert to be updated
-+    path -- new value of path, stay unchanged if None
-+    description -- new value of description, stay unchanged if None, remove
-+        if empty
-+    """
-+    alert = get_alert_by_id(tree, alert_id)
-+    if path:
-+        alert.set("path", path)
-+    _update_optional_attribute(alert, "description", description)
-+    return alert
-+
-+
-+def remove_alert(tree, alert_id):
-+    """
-+    Remove alert with specified id.
-+    Raises AlertNotFound if alert with specified id doesn't exist.
-+
-+    tree -- cib etree node
-+    alert_id -- id of alert which should be removed
-+    """
-+    alert = get_alert_by_id(tree, alert_id)
-+    alert.getparent().remove(alert)
-+
-+
-+def add_recipient(
-+    tree,
-+    alert_id,
-+    recipient_value,
-+    description=""
-+):
-+    """
-+    Add recipient to alert with specified id. Returns added recipient element.
-+    Raises AlertNotFound if alert with specified id doesn't exist.
-+    Raises LibraryError if recipient already exists.
-+
-+    tree -- cib etree node
-+    alert_id -- id of alert which should be parent of new recipient
-+    recipient_value -- value of recipient
-+    description -- description of recipient
-+    """
-+    alert = get_alert_by_id(tree, alert_id)
-+
-+    recipient = alert.find(
-+        "./recipient[@value='{0}']".format(recipient_value)
-+    )
-+    if recipient is not None:
-+        raise LibraryError(reports.cib_alert_recipient_already_exists(
-+            alert_id, recipient_value
-+        ))
-+
-+    recipient = etree.SubElement(
-+        alert,
-+        "recipient",
-+        id=find_unique_id(tree, "{0}-recipient".format(alert_id)),
-+        value=recipient_value
-+    )
-+
-+    if description:
-+        recipient.set("description", description)
-+
-+    return recipient
-+
-+
-+def update_recipient(tree, alert_id, recipient_value, description):
-+    """
-+    Update specified recipient. Returns updated recipient element.
-+    Raises AlertNotFound if alert with specified id doesn't exist.
-+    Raises RecipientNotFound if recipient doesn't exist.
-+
-+    tree -- cib etree node
-+    alert_id -- id of alert, parent element of recipient
-+    recipient_value -- recipient value
-+    description -- description, if empty it will be removed, stay unchanged
-+        if None
-+    """
-+    recipient = get_recipient(
-+        get_alert_by_id(tree, alert_id), recipient_value
-+    )
-+    _update_optional_attribute(recipient, "description", description)
-+    return recipient
-+
-+
-+def remove_recipient(tree, alert_id, recipient_value):
-+    """
-+    Remove specified recipient.
-+    Raises AlertNotFound if alert with specified id doesn't exist.
-+    Raises RecipientNotFound if recipient doesn't exist.
-+
-+    tree -- cib etree node
-+    alert_id -- id of alert, parent element of recipient
-+    recipient_value -- recipient value
-+    """
-+    recipient = get_recipient(
-+        get_alert_by_id(tree, alert_id), recipient_value
-+    )
-+    recipient.getparent().remove(recipient)
-+
-+
-+def get_all_recipients(alert):
-+    """
-+    Returns list of all recipient of specified alert. Format:
-+    [
-+        {
-+            "id": <id of recipient>,
-+            "value": <value of recipient>,
-+            "description": <recipient description>,
-+            "instance_attributes": <list of nvpairs>,
-+            "meta_attributes": <list of nvpairs>
-+        }
-+    ]
-+
-+    alert -- parent element of recipients to return
-+    """
-+    recipient_list = []
-+    for recipient in alert.findall("./recipient"):
-+        recipient_list.append({
-+            "id": recipient.get("id"),
-+            "value": recipient.get("value"),
-+            "description": recipient.get("description", ""),
-+            "instance_attributes": get_nvset(
-+                get_sub_element(recipient, "instance_attributes")
-+            ),
-+            "meta_attributes": get_nvset(
-+                get_sub_element(recipient, "meta_attributes")
-+            )
-+        })
-+    return recipient_list
-+
-+
-+def get_all_alerts(tree):
-+    """
-+    Returns list of all alerts specified in tree. Format:
-+    [
-+        {
-+            "id": <id of alert>,
-+            "path": <path to script>,
-+            "description": <alert description>,
-+            "instance_attributes": <list of nvpairs>,
-+            "meta_attributes": <list of nvpairs>,
-+            "recipients_list": <list of alert's recipients>
-+        }
-+    ]
-+
-+    tree -- cib etree node
-+    """
-+    alert_list = []
-+    for alert in get_alerts(tree).findall("./alert"):
-+        alert_list.append({
-+            "id": alert.get("id"),
-+            "path": alert.get("path"),
-+            "description": alert.get("description", ""),
-+            "instance_attributes": get_nvset(
-+                get_sub_element(alert, "instance_attributes")
-+            ),
-+            "meta_attributes": get_nvset(
-+                get_sub_element(alert, "meta_attributes")
-+            ),
-+            "recipient_list": get_all_recipients(alert)
-+        })
-+    return alert_list
-diff --git a/pcs/lib/cib/nvpair.py b/pcs/lib/cib/nvpair.py
-new file mode 100644
-index 0000000..d1a0cae
---- /dev/null
-+++ b/pcs/lib/cib/nvpair.py
-@@ -0,0 +1,90 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from lxml import etree
-+
-+from pcs.lib.cib.tools import (
-+    get_sub_element,
-+    find_unique_id,
-+)
-+
-+
-+def update_nvpair(tree, element, name, value):
-+    """
-+    Update nvpair, create new if it doesn't yet exist or remove existing
-+    nvpair if value is empty. Returns created/updated/removed nvpair element.
-+
-+    tree -- cib etree node
-+    element -- element in which nvpair should be added/updated/removed
-+    name -- name of nvpair
-+    value -- value of nvpair
-+    """
-+    nvpair = element.find("./nvpair[@name='{0}']".format(name))
-+    if nvpair is None:
-+        if not value:
-+            return None
-+        nvpair_id = find_unique_id(
-+            tree, "{0}-{1}".format(element.get("id"), name)
-+        )
-+        nvpair = etree.SubElement(
-+            element, "nvpair", id=nvpair_id, name=name, value=value
-+        )
-+    else:
-+        if value:
-+            nvpair.set("value", value)
-+        else:
-+            # remove nvpair if value is empty
-+            element.remove(nvpair)
-+    return nvpair
-+
-+
-+def update_nvset(tag_name, tree, element, attribute_dict):
-+    """
-+    This method updates nvset specified by tag_name. If specified nvset
-+    doesn't exist it will be created. Returns updated nvset element or None if
-+    attribute_dict is empty.
-+
-+    tag_name -- tag name of nvset element
-+    tree -- cib etree node
-+    element -- parent element of nvset
-+    attribute_dict -- dictionary of nvpairs
-+    """
-+    if not attribute_dict:
-+        return None
-+
-+    attributes = get_sub_element(element, tag_name, find_unique_id(
-+        tree, "{0}-{1}".format(element.get("id"), tag_name)
-+    ), 0)
-+
-+    for name, value in sorted(attribute_dict.items()):
-+        update_nvpair(tree, attributes, name, value)
-+
-+    return attributes
-+
-+
-+def get_nvset(nvset):
-+    """
-+    Returns nvset element as list of nvpairs with format:
-+    [
-+        {
-+            "id": <id of nvpair>,
-+            "name": <name of nvpair>,
-+            "value": <value of nvpair>
-+        },
-+        ...
-+    ]
-+
-+    nvset -- nvset element
-+    """
-+    nvpair_list = []
-+    for nvpair in nvset.findall("./nvpair"):
-+        nvpair_list.append({
-+            "id": nvpair.get("id"),
-+            "name": nvpair.get("name"),
-+            "value": nvpair.get("value", "")
-+        })
-+    return nvpair_list
-diff --git a/pcs/lib/cib/test/test_alert.py b/pcs/lib/cib/test/test_alert.py
-new file mode 100644
-index 0000000..c387aaf
---- /dev/null
-+++ b/pcs/lib/cib/test/test_alert.py
-@@ -0,0 +1,931 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from unittest import TestCase
-+
-+from lxml import etree
-+
-+from pcs.common import report_codes
-+from pcs.lib.cib import alert
-+from pcs.lib.errors import ReportItemSeverity as severities
-+from pcs.test.tools.assertions import(
-+    assert_raise_library_error,
-+    assert_xml_equal,
-+)
-+from pcs.test.tools.pcs_mock import mock
-+
-+
-+@mock.patch("pcs.lib.cib.alert.update_nvset")
-+class UpdateInstanceAttributesTest(TestCase):
-+    def test_success(self, mock_update_nvset):
-+        ret_val = etree.Element("nvset")
-+        tree = etree.Element("tree")
-+        element = etree.Element("element")
-+        attributes = {"a": 1}
-+        mock_update_nvset.return_value = ret_val
-+        self.assertEqual(
-+            alert.update_instance_attributes(tree, element, attributes),
-+            ret_val
-+        )
-+        mock_update_nvset.assert_called_once_with(
-+            "instance_attributes", tree, element, attributes
-+        )
-+
-+
-+@mock.patch("pcs.lib.cib.alert.update_nvset")
-+class UpdateMetaAttributesTest(TestCase):
-+    def test_success(self, mock_update_nvset):
-+        ret_val = etree.Element("nvset")
-+        tree = etree.Element("tree")
-+        element = etree.Element("element")
-+        attributes = {"a": 1}
-+        mock_update_nvset.return_value = ret_val
-+        self.assertEqual(
-+            alert.update_meta_attributes(tree, element, attributes),
-+            ret_val
-+        )
-+        mock_update_nvset.assert_called_once_with(
-+            "meta_attributes", tree, element, attributes
-+        )
-+
-+
-+class UpdateOptionalAttributeTest(TestCase):
-+    def test_add(self):
-+        element = etree.Element("element")
-+        alert._update_optional_attribute(element, "attr", "value1")
-+        self.assertEqual(element.get("attr"), "value1")
-+
-+    def test_update(self):
-+        element = etree.Element("element", attr="value")
-+        alert._update_optional_attribute(element, "attr", "value1")
-+        self.assertEqual(element.get("attr"), "value1")
-+
-+    def test_remove(self):
-+        element = etree.Element("element", attr="value")
-+        alert._update_optional_attribute(element, "attr", "")
-+        self.assertTrue(element.get("attr") is None)
-+
-+
-+class GetAlertByIdTest(TestCase):
-+    def test_found(self):
-+        xml = """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert-1"/>
-+                        <alert id="alert-2"/>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+        """
-+        assert_xml_equal(
-+            '<alert id="alert-2"/>',
-+            etree.tostring(
-+                alert.get_alert_by_id(etree.XML(xml), "alert-2")
-+            ).decode()
-+        )
-+
-+    def test_different_place(self):
-+        xml = """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert-1"/>
-+                    </alerts>
-+                    <alert id="alert-2"/>
-+                </configuration>
-+            </cib>
-+        """
-+        assert_raise_library_error(
-+            lambda: alert.get_alert_by_id(etree.XML(xml), "alert-2"),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_ALERT_NOT_FOUND,
-+                {"alert": "alert-2"}
-+            )
-+        )
-+
-+    def test_not_exist(self):
-+        xml = """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert-1"/>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+        """
-+        assert_raise_library_error(
-+            lambda: alert.get_alert_by_id(etree.XML(xml), "alert-2"),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_ALERT_NOT_FOUND,
-+                {"alert": "alert-2"}
-+            )
-+        )
-+
-+
-+class GetRecipientTest(TestCase):
-+    def setUp(self):
-+        self.xml = etree.XML(
-+            """
-+                <alert id="alert-1">
-+                    <recipient id="rec-1" value="value1"/>
-+                    <recipient id="rec-2" value="value2"/>
-+                    <not_recipient value="value3"/>
-+                    <recipients>
-+                        <recipient id="rec-4" value="value4"/>
-+                    </recipients>
-+                </alert>
-+            """
-+        )
-+
-+    def test_exist(self):
-+        assert_xml_equal(
-+            '<recipient id="rec-2" value="value2"/>',
-+            etree.tostring(alert.get_recipient(self.xml, "value2")).decode()
-+        )
-+
-+    def test_different_place(self):
-+        assert_raise_library_error(
-+            lambda: alert.get_recipient(self.xml, "value4"),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
-+                {
-+                    "alert": "alert-1",
-+                    "recipient": "value4"
-+                }
-+            )
-+        )
-+
-+    def test_not_recipient(self):
-+        assert_raise_library_error(
-+            lambda: alert.get_recipient(self.xml, "value3"),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
-+                {
-+                    "alert": "alert-1",
-+                    "recipient": "value3"
-+                }
-+            )
-+        )
-+
-+
-+class CreateAlertTest(TestCase):
-+    def setUp(self):
-+        self.tree = etree.XML(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert"/>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """
-+        )
-+
-+    def test_no_alerts(self):
-+        tree = etree.XML(
-+            """
-+            <cib>
-+                <configuration/>
-+            </cib>
-+            """
-+        )
-+        assert_xml_equal(
-+            '<alert id="my-alert" path="/test/path"/>',
-+            etree.tostring(
-+                alert.create_alert(tree, "my-alert", "/test/path")
-+            ).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="my-alert" path="/test/path"/>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(tree).decode()
-+        )
-+
-+    def test_alerts_exists(self):
-+        assert_xml_equal(
-+            '<alert id="my-alert" path="/test/path"/>',
-+            etree.tostring(
-+                alert.create_alert(self.tree, "my-alert", "/test/path")
-+            ).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert"/>
-+                        <alert id="my-alert" path="/test/path"/>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+
-+    def test_alerts_exists_with_description(self):
-+        assert_xml_equal(
-+            '<alert id="my-alert" path="/test/path" description="nothing"/>',
-+            etree.tostring(alert.create_alert(
-+                self.tree, "my-alert", "/test/path", "nothing"
-+            )).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert"/>
-+                        <alert
-+                            id="my-alert"
-+                            path="/test/path"
-+                            description="nothing"
-+                        />
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+
-+    def test_invalid_id(self):
-+        assert_raise_library_error(
-+            lambda: alert.create_alert(self.tree, "1alert", "/path"),
-+            (
-+                severities.ERROR,
-+                report_codes.INVALID_ID,
-+                {
-+                    "id": "1alert",
-+                    "id_description": "alert-id",
-+                    "invalid_character": "1",
-+                    "reason": "invalid first character"
-+                }
-+            )
-+        )
-+
-+    def test_id_exists(self):
-+        assert_raise_library_error(
-+            lambda: alert.create_alert(self.tree, "alert", "/path"),
-+            (
-+                severities.ERROR,
-+                report_codes.ID_ALREADY_EXISTS,
-+                {"id": "alert"}
-+            )
-+        )
-+
-+    def test_no_id(self):
-+        assert_xml_equal(
-+            '<alert id="alert-1" path="/test/path"/>',
-+            etree.tostring(
-+                alert.create_alert(self.tree, None, "/test/path")
-+            ).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert"/>
-+                        <alert id="alert-1" path="/test/path"/>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+
-+
-+class UpdateAlertTest(TestCase):
-+    def setUp(self):
-+        self.tree = etree.XML(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path"/>
-+                        <alert id="alert1" path="/path1" description="nothing"/>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """
-+        )
-+
-+    def test_update_path(self):
-+        assert_xml_equal(
-+            '<alert id="alert" path="/test/path"/>',
-+            etree.tostring(
-+                alert.update_alert(self.tree, "alert", "/test/path")
-+            ).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/test/path"/>
-+                        <alert id="alert1" path="/path1" description="nothing"/>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+
-+    def test_remove_path(self):
-+        assert_xml_equal(
-+            '<alert id="alert" path="/path"/>',
-+            etree.tostring(alert.update_alert(self.tree, "alert", "")).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path"/>
-+                        <alert id="alert1" path="/path1" description="nothing"/>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+
-+    def test_update_description(self):
-+        assert_xml_equal(
-+            '<alert id="alert" path="/path" description="desc"/>',
-+            etree.tostring(
-+                alert.update_alert(self.tree, "alert", None, "desc")
-+            ).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path" description="desc"/>
-+                        <alert id="alert1" path="/path1" description="nothing"/>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+
-+    def test_remove_description(self):
-+        assert_xml_equal(
-+            '<alert id="alert1" path="/path1"/>',
-+            etree.tostring(
-+                alert.update_alert(self.tree, "alert1", None, "")
-+            ).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path"/>
-+                        <alert id="alert1" path="/path1"/>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+
-+    def test_id_not_exists(self):
-+        assert_raise_library_error(
-+            lambda: alert.update_alert(self.tree, "alert0", "/test"),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_ALERT_NOT_FOUND,
-+                {"alert": "alert0"}
-+            )
-+        )
-+
-+
-+class RemoveAlertTest(TestCase):
-+    def setUp(self):
-+        self.tree = etree.XML(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path"/>
-+                        <alert id="alert-1" path="/next"/>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """
-+        )
-+
-+    def test_success(self):
-+        alert.remove_alert(self.tree, "alert")
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert-1" path="/next"/>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+
-+    def test_not_existing_id(self):
-+        assert_raise_library_error(
-+            lambda: alert.remove_alert(self.tree, "not-existing-id"),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_ALERT_NOT_FOUND,
-+                {"alert": "not-existing-id"}
-+            )
-+        )
-+
-+
-+class AddRecipientTest(TestCase):
-+    def setUp(self):
-+        self.tree = etree.XML(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path">
-+                            <recipient id="alert-recipient" value="test_val"/>
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """
-+        )
-+
-+    def test_success(self):
-+        assert_xml_equal(
-+            '<recipient id="alert-recipient-1" value="value1"/>',
-+            etree.tostring(
-+                alert.add_recipient(self.tree, "alert", "value1")
-+            ).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path">
-+                            <recipient id="alert-recipient" value="test_val"/>
-+                            <recipient id="alert-recipient-1" value="value1"/>
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+
-+    def test_recipient_exist(self):
-+        assert_raise_library_error(
-+            lambda: alert.add_recipient(self.tree, "alert", "test_val"),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
-+                {
-+                    "recipient": "test_val",
-+                    "alert": "alert"
-+                }
-+            )
-+        )
-+
-+    def test_alert_not_exist(self):
-+        assert_raise_library_error(
-+            lambda: alert.add_recipient(self.tree, "alert1", "test_val"),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_ALERT_NOT_FOUND,
-+                {"alert": "alert1"}
-+            )
-+        )
-+
-+    def test_with_description(self):
-+        assert_xml_equal(
-+            """
-+            <recipient
-+                id="alert-recipient-1"
-+                value="value1"
-+                description="desc"
-+            />
-+            """,
-+            etree.tostring(alert.add_recipient(
-+                self.tree, "alert", "value1", "desc"
-+            )).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path">
-+                            <recipient id="alert-recipient" value="test_val"/>
-+                            <recipient
-+                                id="alert-recipient-1"
-+                                value="value1"
-+                                description="desc"
-+                            />
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+
-+
-+class UpdateRecipientTest(TestCase):
-+    def setUp(self):
-+        self.tree = etree.XML(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path">
-+                            <recipient id="alert-recipient" value="test_val"/>
-+                            <recipient
-+                                id="alert-recipient-1"
-+                                value="value1"
-+                                description="desc"
-+                            />
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """
-+        )
-+
-+    def test_add_description(self):
-+        assert_xml_equal(
-+            """
-+            <recipient
-+                id="alert-recipient" value="test_val" description="description"
-+            />
-+            """,
-+            etree.tostring(alert.update_recipient(
-+                self.tree, "alert", "test_val", "description"
-+            )).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path">
-+                            <recipient
-+                                id="alert-recipient"
-+                                value="test_val"
-+                                description="description"
-+                            />
-+                            <recipient
-+                                id="alert-recipient-1"
-+                                value="value1"
-+                                description="desc"
-+                            />
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+
-+    def test_update_description(self):
-+        assert_xml_equal(
-+            """
-+            <recipient
-+                id="alert-recipient-1" value="value1" description="description"
-+            />
-+            """,
-+            etree.tostring(alert.update_recipient(
-+                self.tree, "alert", "value1", "description"
-+            )).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path">
-+                            <recipient id="alert-recipient" value="test_val"/>
-+                            <recipient
-+                                id="alert-recipient-1"
-+                                value="value1"
-+                                description="description"
-+                            />
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+
-+    def test_remove_description(self):
-+        assert_xml_equal(
-+            """
-+                <recipient id="alert-recipient-1" value="value1"/>
-+            """,
-+            etree.tostring(
-+               alert.update_recipient(self.tree, "alert", "value1", "")
-+            ).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path">
-+                            <recipient id="alert-recipient" value="test_val"/>
-+                            <recipient id="alert-recipient-1" value="value1"/>
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+
-+    def test_alert_not_exists(self):
-+        assert_raise_library_error(
-+            lambda: alert.update_recipient(self.tree, "alert1", "test_val", ""),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_ALERT_NOT_FOUND,
-+                {"alert": "alert1"}
-+            )
-+        )
-+
-+    def test_recipient_not_exists(self):
-+        assert_raise_library_error(
-+            lambda: alert.update_recipient(self.tree, "alert", "unknown", ""),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
-+                {
-+                    "alert": "alert",
-+                    "recipient": "unknown"
-+                }
-+            )
-+        )
-+
-+
-+class RemoveRecipientTest(TestCase):
-+    def setUp(self):
-+        self.tree = etree.XML(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path">
-+                            <recipient id="alert-recipient" value="test_val"/>
-+                            <recipient id="alert-recipient-2" value="val"/>
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """
-+        )
-+
-+    def test_success(self):
-+        alert.remove_recipient(self.tree, "alert", "val")
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path">
-+                            <recipient id="alert-recipient" value="test_val"/>
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+
-+    def test_alert_not_exists(self):
-+        assert_raise_library_error(
-+            lambda: alert.remove_recipient(self.tree, "alert1", "test_val"),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_ALERT_NOT_FOUND,
-+                {"alert": "alert1"}
-+            )
-+        )
-+
-+    def test_recipient_not_exists(self):
-+        assert_raise_library_error(
-+            lambda: alert.remove_recipient(self.tree, "alert", "unknown"),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
-+                {
-+                    "alert": "alert",
-+                    "recipient": "unknown"
-+                }
-+            )
-+        )
-+
-+
-+class GetAllRecipientsTest(TestCase):
-+    def test_success(self):
-+        alert_obj = etree.XML(
-+            """
-+            <alert id="alert" path="/path">
-+                <recipient id="alert-recipient" value="test_val">
-+                    <instance_attributes>
-+                        <nvpair
-+                            id="nvset-name1-value1" name="name1" value="value1"
-+                        />
-+                        <nvpair
-+                            id="nvset-name2-value2" name="name2" value="value2"
-+                        />
-+                    </instance_attributes>
-+                    <meta_attributes>
-+                        <nvpair id="nvset-name3" name="name3"/>
-+                    </meta_attributes>
-+                </recipient>
-+                <recipient
-+                    id="alert-recipient-1" value="value1" description="desc"
-+                />
-+            </alert>
-+            """
-+        )
-+        self.assertEqual(
-+            [
-+                {
-+                    "id": "alert-recipient",
-+                    "value": "test_val",
-+                    "description": "",
-+                    "instance_attributes": [
-+                        {
-+                            "id": "nvset-name1-value1",
-+                            "name": "name1",
-+                            "value": "value1"
-+                        },
-+                        {
-+                            "id": "nvset-name2-value2",
-+                            "name": "name2",
-+                            "value": "value2"
-+                        }
-+                    ],
-+                    "meta_attributes": [
-+                        {
-+                            "id": "nvset-name3",
-+                            "name": "name3",
-+                            "value": ""
-+                        }
-+                    ]
-+                },
-+                {
-+                    "id": "alert-recipient-1",
-+                    "value": "value1",
-+                    "description": "desc",
-+                    "instance_attributes": [],
-+                    "meta_attributes": []
-+                }
-+            ],
-+            alert.get_all_recipients(alert_obj)
-+        )
-+
-+
-+class GetAllAlertsTest(TestCase):
-+    def test_success(self):
-+        alerts = etree.XML(
-+            """
-+<cib>
-+    <configuration>
-+        <alerts>
-+            <alert id="alert" path="/path">
-+                <recipient id="alert-recipient" value="test_val">
-+                    <instance_attributes>
-+                        <nvpair
-+                            id="instance_attributes-name1-value1"
-+                            name="name1"
-+                            value="value1"
-+                        />
-+                        <nvpair
-+                            id="instance_attributes-name2-value2"
-+                            name="name2"
-+                            value="value2"
-+                        />
-+                    </instance_attributes>
-+                    <meta_attributes>
-+                        <nvpair id="meta_attributes-name3" name="name3"/>
-+                    </meta_attributes>
-+                </recipient>
-+                <recipient
-+                    id="alert-recipient-1" value="value1" description="desc"
-+                />
-+            </alert>
-+            <alert id="alert1" path="/test/path" description="desc">
-+                <instance_attributes>
-+                    <nvpair
-+                        id="alert1-name1-value1" name="name1" value="value1"
-+                    />
-+                    <nvpair
-+                        id="alert1-name2-value2" name="name2" value="value2"
-+                    />
-+                </instance_attributes>
-+                <meta_attributes>
-+                    <nvpair id="alert1-name3" name="name3"/>
-+                </meta_attributes>
-+            </alert>
-+        </alerts>
-+    </configuration>
-+</cib>
-+            """
-+        )
-+        self.assertEqual(
-+            [
-+                {
-+                    "id": "alert",
-+                    "path": "/path",
-+                    "description": "",
-+                    "instance_attributes": [],
-+                    "meta_attributes": [],
-+                    "recipient_list": [
-+                        {
-+                            "id": "alert-recipient",
-+                            "value": "test_val",
-+                            "description": "",
-+                            "instance_attributes": [
-+                                {
-+                                    "id": "instance_attributes-name1-value1",
-+                                    "name": "name1",
-+                                    "value": "value1"
-+                                },
-+                                {
-+                                    "id": "instance_attributes-name2-value2",
-+                                    "name": "name2",
-+                                    "value": "value2"
-+                                }
-+                            ],
-+                            "meta_attributes": [
-+                                {
-+                                    "id": "meta_attributes-name3",
-+                                    "name": "name3",
-+                                    "value": ""
-+                                }
-+                            ]
-+                        },
-+                        {
-+                            "id": "alert-recipient-1",
-+                            "value": "value1",
-+                            "description": "desc",
-+                            "instance_attributes": [],
-+                            "meta_attributes": []
-+                        }
-+                    ]
-+                },
-+                {
-+                    "id": "alert1",
-+                    "path": "/test/path",
-+                    "description": "desc",
-+                    "instance_attributes": [
-+                        {
-+                            "id": "alert1-name1-value1",
-+                            "name": "name1",
-+                            "value": "value1"
-+                        },
-+                        {
-+                            "id": "alert1-name2-value2",
-+                            "name": "name2",
-+                            "value": "value2"
-+                        }
-+                    ],
-+                    "meta_attributes": [
-+                        {
-+                            "id": "alert1-name3",
-+                            "name": "name3",
-+                            "value": ""
-+                        }
-+                    ],
-+                    "recipient_list": []
-+                }
-+            ],
-+            alert.get_all_alerts(alerts)
-+        )
-diff --git a/pcs/lib/cib/test/test_nvpair.py b/pcs/lib/cib/test/test_nvpair.py
-new file mode 100644
-index 0000000..6907f25
---- /dev/null
-+++ b/pcs/lib/cib/test/test_nvpair.py
-@@ -0,0 +1,206 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from unittest import TestCase
-+
-+from lxml import etree
-+
-+from pcs.lib.cib import nvpair
-+from pcs.test.tools.assertions import assert_xml_equal
-+
-+
-+class UpdateNvpairTest(TestCase):
-+    def setUp(self):
-+        self.nvset = etree.Element("nvset", id="nvset")
-+        etree.SubElement(
-+            self.nvset, "nvpair", id="nvset-attr", name="attr", value="1"
-+        )
-+        etree.SubElement(
-+            self.nvset, "nvpair", id="nvset-attr2", name="attr2", value="2"
-+        )
-+        etree.SubElement(
-+            self.nvset, "notnvpair", id="nvset-test", name="test", value="0"
-+        )
-+
-+    def test_update(self):
-+        assert_xml_equal(
-+            "<nvpair id='nvset-attr' name='attr' value='10'/>",
-+            etree.tostring(
-+                nvpair.update_nvpair(self.nvset, self.nvset, "attr", "10")
-+            ).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <nvset id="nvset">
-+                <nvpair id="nvset-attr" name="attr" value="10"/>
-+                <nvpair id="nvset-attr2" name="attr2" value="2"/>
-+                <notnvpair id="nvset-test" name="test" value="0"/>
-+            </nvset>
-+            """,
-+            etree.tostring(self.nvset).decode()
-+        )
-+
-+    def test_add(self):
-+        assert_xml_equal(
-+            "<nvpair id='nvset-test-1' name='test' value='0'/>",
-+            etree.tostring(
-+                nvpair.update_nvpair(self.nvset, self.nvset, "test", "0")
-+            ).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <nvset id="nvset">
-+                <nvpair id="nvset-attr" name="attr" value="1"/>
-+                <nvpair id="nvset-attr2" name="attr2" value="2"/>
-+                <notnvpair id="nvset-test" name="test" value="0"/>
-+                <nvpair id="nvset-test-1" name="test" value="0"/>
-+            </nvset>
-+            """,
-+            etree.tostring(self.nvset).decode()
-+        )
-+
-+    def test_remove(self):
-+        assert_xml_equal(
-+            "<nvpair id='nvset-attr2' name='attr2' value='2'/>",
-+            etree.tostring(
-+                nvpair.update_nvpair(self.nvset, self.nvset, "attr2", "")
-+            ).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <nvset id="nvset">
-+                <nvpair id="nvset-attr" name="attr" value="1"/>
-+                <notnvpair id="nvset-test" name="test" value="0"/>
-+            </nvset>
-+            """,
-+            etree.tostring(self.nvset).decode()
-+        )
-+
-+    def test_remove_not_existing(self):
-+        self.assertTrue(
-+            nvpair.update_nvpair(self.nvset, self.nvset, "attr3", "") is None
-+        )
-+        assert_xml_equal(
-+            """
-+            <nvset id="nvset">
-+                <nvpair id="nvset-attr" name="attr" value="1"/>
-+                <nvpair id="nvset-attr2" name="attr2" value="2"/>
-+                <notnvpair id="nvset-test" name="test" value="0"/>
-+            </nvset>
-+            """,
-+            etree.tostring(self.nvset).decode()
-+        )
-+
-+
-+class UpdateNvsetTest(TestCase):
-+    def setUp(self):
-+        self.root = etree.Element("root", id="root")
-+        self.nvset = etree.SubElement(self.root, "nvset", id="nvset")
-+        etree.SubElement(
-+            self.nvset, "nvpair", id="nvset-attr", name="attr", value="1"
-+        )
-+        etree.SubElement(
-+            self.nvset, "nvpair", id="nvset-attr2", name="attr2", value="2"
-+        )
-+        etree.SubElement(
-+            self.nvset, "notnvpair", id="nvset-test", name="test", value="0"
-+        )
-+
-+    def test_None(self):
-+        self.assertTrue(
-+            nvpair.update_nvset("nvset", self.root, self.root, None) is None
-+        )
-+
-+    def test_empty(self):
-+        self.assertTrue(
-+            nvpair.update_nvset("nvset", self.root, self.root, {}) is None
-+        )
-+
-+    def test_existing(self):
-+        self.assertEqual(
-+            self.nvset,
-+            nvpair.update_nvset("nvset", self.root, self.root, {
-+                "attr": "10",
-+                "new_one": "20",
-+                "test": "0",
-+                "attr2": ""
-+            })
-+        )
-+        assert_xml_equal(
-+            """
-+            <nvset id="nvset">
-+                <nvpair id="nvset-attr" name="attr" value="10"/>
-+                <notnvpair id="nvset-test" name="test" value="0"/>
-+                <nvpair id="nvset-new_one" name="new_one" value="20"/>
-+                <nvpair id="nvset-test-1" name="test" value="0"/>
-+            </nvset>
-+            """,
-+            etree.tostring(self.nvset).decode()
-+        )
-+
-+    def test_new(self):
-+        root = etree.Element("root", id="root")
-+        assert_xml_equal(
-+            """
-+            <nvset id="root-nvset">
-+                <nvpair id="root-nvset-attr" name="attr" value="10"/>
-+                <nvpair id="root-nvset-new_one" name="new_one" value="20"/>
-+                <nvpair id="root-nvset-test" name="test" value="0"/>
-+            </nvset>
-+            """,
-+            etree.tostring(nvpair.update_nvset("nvset", root, root, {
-+                "attr": "10",
-+                "new_one": "20",
-+                "test": "0",
-+                "attr2": ""
-+            })).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <root id="root">
-+                <nvset id="root-nvset">
-+                    <nvpair id="root-nvset-attr" name="attr" value="10"/>
-+                    <nvpair id="root-nvset-new_one" name="new_one" value="20"/>
-+                    <nvpair id="root-nvset-test" name="test" value="0"/>
-+                </nvset>
-+            </root>
-+            """,
-+            etree.tostring(root).decode()
-+        )
-+
-+
-+class GetNvsetTest(TestCase):
-+    def test_success(self):
-+        nvset = etree.XML(
-+            """
-+            <nvset>
-+                <nvpair id="nvset-name1" name="name1" value="value1"/>
-+                <nvpair id="nvset-name2" name="name2" value="value2"/>
-+                <nvpair id="nvset-name3" name="name3"/>
-+            </nvset>
-+            """
-+        )
-+        self.assertEqual(
-+            [
-+                {
-+                    "id": "nvset-name1",
-+                    "name": "name1",
-+                    "value": "value1"
-+                },
-+                {
-+                    "id": "nvset-name2",
-+                    "name": "name2",
-+                    "value": "value2"
-+                },
-+                {
-+                    "id": "nvset-name3",
-+                    "name": "name3",
-+                    "value": ""
-+                }
-+            ],
-+            nvpair.get_nvset(nvset)
-+        )
-diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
-index dfe31fc..b59d50d 100644
---- a/pcs/lib/cib/tools.py
-+++ b/pcs/lib/cib/tools.py
-@@ -5,8 +5,12 @@ from __future__ import (
-     unicode_literals,
- )
- 
-+import os
-+import re
-+import tempfile
- from lxml import etree
- 
-+from pcs import settings
- from pcs.lib import reports
- from pcs.lib.errors import LibraryError
- from pcs.lib.pacemaker_values import validate_id
-@@ -71,6 +75,15 @@ def get_acls(tree):
-         acls = etree.SubElement(get_configuration(tree), "acls")
-     return acls
- 
-+
-+def get_alerts(tree):
-+    """
-+    Return 'alerts' element from tree, create a new one if missing
-+    tree -- cib etree node
-+    """
-+    return get_sub_element(get_configuration(tree), "alerts")
-+
-+
- def get_constraints(tree):
-     """
-     Return 'constraint' element from tree
-@@ -87,3 +100,117 @@ def find_parent(element, tag_names):
- 
- def export_attributes(element):
-     return  dict((key, value) for key, value in element.attrib.items())
-+
-+
-+def get_sub_element(element, sub_element_tag, new_id=None, new_index=None):
-+    """
-+    Returns sub-element sub_element_tag of element. It will create new
-+    element if such doesn't exist yet. Id of new element will be new_if if
-+    it's not None. new_index specify where will be new element added, if None
-+    it will be appended.
-+
-+    element -- parent element
-+    sub_element_tag -- tag of wanted element
-+    new_id -- id of new element
-+    new_index -- index for new element
-+    """
-+    sub_element = element.find("./{0}".format(sub_element_tag))
-+    if sub_element is None:
-+        sub_element = etree.Element(sub_element_tag)
-+        if new_id:
-+            sub_element.set("id", new_id)
-+        if new_index is None:
-+            element.append(sub_element)
-+        else:
-+            element.insert(new_index, sub_element)
-+    return sub_element
-+
-+
-+def get_pacemaker_version_by_which_cib_was_validated(cib):
-+    """
-+    Return version of pacemaker which validated specified cib as tree.
-+    Version is returned as tuple of integers: (<major>, <minor>, <revision>).
-+    Raises LibraryError on any failure.
-+
-+    cib -- cib etree
-+    """
-+    version = cib.get("validate-with")
-+    if version is None:
-+        raise LibraryError(reports.cib_load_error_invalid_format())
-+
-+    regexp = re.compile(
-+        r"pacemaker-(?P<major>\d+)\.(?P<minor>\d+)(\.(?P<rev>\d+))?"
-+    )
-+    match = regexp.match(version)
-+    if not match:
-+        raise LibraryError(reports.cib_load_error_invalid_format())
-+    return (
-+        int(match.group("major")),
-+        int(match.group("minor")),
-+        int(match.group("rev") or 0)
-+    )
-+
-+
-+def upgrade_cib(cib, runner):
-+    """
-+    Upgrade CIB to the latest schema of installed pacemaker. Returns upgraded
-+    CIB as string.
-+    Raises LibraryError on any failure.
-+
-+    cib -- cib etree
-+    runner -- CommandRunner
-+    """
-+    temp_file = tempfile.NamedTemporaryFile("w+", suffix=".pcs")
-+    temp_file.write(etree.tostring(cib).decode())
-+    temp_file.flush()
-+    output, retval = runner.run(
-+        [
-+            os.path.join(settings.pacemaker_binaries, "cibadmin"),
-+            "--upgrade",
-+            "--force"
-+        ],
-+        env_extend={"CIB_file": temp_file.name}
-+    )
-+
-+    if retval != 0:
-+        temp_file.close()
-+        LibraryError(reports.cib_upgrade_failed(output))
-+
-+    try:
-+        temp_file.seek(0)
-+        return etree.fromstring(temp_file.read())
-+    except (EnvironmentError, etree.XMLSyntaxError, etree.DocumentInvalid) as e:
-+        LibraryError(reports.cib_upgrade_failed(str(e)))
-+    finally:
-+        temp_file.close()
-+
-+
-+def ensure_cib_version(runner, cib, version):
-+    """
-+    This method ensures that specified cib is verified by pacemaker with
-+    version 'version' or newer. If cib doesn't correspond to this version,
-+    method will try to upgrade cib.
-+    Returns cib which was verified by pacemaker version 'version' or later.
-+    Raises LibraryError on any failure.
-+
-+    runner -- CommandRunner
-+    cib -- cib tree
-+    version -- tuple of integers (<major>, <minor>, <revision>)
-+    """
-+    current_version = get_pacemaker_version_by_which_cib_was_validated(
-+        cib
-+    )
-+    if current_version >= version:
-+        return None
-+
-+    upgraded_cib = upgrade_cib(cib, runner)
-+    current_version = get_pacemaker_version_by_which_cib_was_validated(
-+        upgraded_cib
-+    )
-+
-+    if current_version >= version:
-+        return upgraded_cib
-+
-+    raise LibraryError(reports.unable_to_upgrade_cib_to_required_version(
-+        current_version, version
-+    ))
-diff --git a/pcs/lib/commands/alert.py b/pcs/lib/commands/alert.py
-new file mode 100644
-index 0000000..7371fbc
---- /dev/null
-+++ b/pcs/lib/commands/alert.py
-@@ -0,0 +1,169 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+from pcs.lib import reports
-+from pcs.lib.cib import alert
-+from pcs.lib.errors import LibraryError
-+
-+
-+REQUIRED_CIB_VERSION = (2, 5, 0)
-+
-+
-+def create_alert(
-+    lib_env,
-+    alert_id,
-+    path,
-+    instance_attribute_dict,
-+    meta_attribute_dict,
-+    description=None
-+):
-+    """
-+    Create new alert.
-+    Raises LibraryError if path is not specified, or any other failure.
-+
-+    lib_env -- LibraryEnvironment
-+    alert_id -- id of alert to be created, if None it will be generated
-+    path -- path to script for alert
-+    instance_attribute_dict -- dictionary of instance attributes
-+    meta_attribute_dict -- dictionary of meta attributes
-+    description -- alert description description
-+    """
-+    if not path:
-+        raise LibraryError(reports.required_option_is_missing("path"))
-+
-+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
-+
-+    alert_el = alert.create_alert(cib, alert_id, path, description)
-+    alert.update_instance_attributes(cib, alert_el, instance_attribute_dict)
-+    alert.update_meta_attributes(cib, alert_el, meta_attribute_dict)
-+
-+    lib_env.push_cib(cib)
-+
-+
-+def update_alert(
-+    lib_env,
-+    alert_id,
-+    path,
-+    instance_attribute_dict,
-+    meta_attribute_dict,
-+    description=None
-+):
-+    """
-+    Update existing alert with specified id.
-+
-+    lib_env -- LibraryEnvironment
-+    alert_id -- id of alert to be updated
-+    path -- new path, if None old value will stay unchanged
-+    instance_attribute_dict -- dictionary of instance attributes to update
-+    meta_attribute_dict -- dictionary of meta attributes to update
-+    description -- new description, if empty string, old description will be
-+        deleted, if None old value will stay unchanged
-+    """
-+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
-+
-+    alert_el = alert.update_alert(cib, alert_id, path, description)
-+    alert.update_instance_attributes(cib, alert_el, instance_attribute_dict)
-+    alert.update_meta_attributes(cib, alert_el, meta_attribute_dict)
-+
-+    lib_env.push_cib(cib)
-+
-+
-+def remove_alert(lib_env, alert_id):
-+    """
-+    Remove alert with specified id.
-+
-+    lib_env -- LibraryEnvironment
-+    alert_id -- id of alert which should be removed
-+    """
-+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
-+    alert.remove_alert(cib, alert_id)
-+    lib_env.push_cib(cib)
-+
-+
-+def add_recipient(
-+    lib_env,
-+    alert_id,
-+    recipient_value,
-+    instance_attribute_dict,
-+    meta_attribute_dict,
-+    description=None
-+):
-+    """
-+    Add new recipient to alert witch id alert_id.
-+
-+    lib_env -- LibraryEnvironment
-+    alert_id -- id of alert to which new recipient should be added
-+    recipient_value -- value of new recipient
-+    instance_attribute_dict -- dictionary of instance attributes to update
-+    meta_attribute_dict -- dictionary of meta attributes to update
-+    description -- recipient description
-+    """
-+    if not recipient_value:
-+        raise LibraryError(
-+            reports.required_option_is_missing("value")
-+        )
-+
-+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
-+    recipient = alert.add_recipient(
-+        cib, alert_id, recipient_value, description
-+    )
-+    alert.update_instance_attributes(cib, recipient, instance_attribute_dict)
-+    alert.update_meta_attributes(cib, recipient, meta_attribute_dict)
-+
-+    lib_env.push_cib(cib)
-+
-+
-+def update_recipient(
-+    lib_env,
-+    alert_id,
-+    recipient_value,
-+    instance_attribute_dict,
-+    meta_attribute_dict,
-+    description=None
-+):
-+    """
-+    Update existing recipient.
-+
-+    lib_env -- LibraryEnvironment
-+    alert_id -- id of alert to which recipient belong
-+    recipient_value -- recipient to be updated
-+    instance_attribute_dict -- dictionary of instance attributes to update
-+    meta_attribute_dict -- dictionary of meta attributes to update
-+    description -- new description, if empty string, old description will be
-+        deleted, if None old value will stay unchanged
-+    """
-+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
-+    recipient = alert.update_recipient(
-+        cib, alert_id, recipient_value, description
-+    )
-+    alert.update_instance_attributes(cib, recipient, instance_attribute_dict)
-+    alert.update_meta_attributes(cib, recipient, meta_attribute_dict)
-+
-+    lib_env.push_cib(cib)
-+
-+
-+def remove_recipient(lib_env, alert_id, recipient_value):
-+    """
-+    Remove existing recipient.
-+
-+    lib_env -- LibraryEnvironment
-+    alert_id -- id of alert to which recipient belong
-+    recipient_value -- recipient to be removed
-+    """
-+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
-+    alert.remove_recipient(cib, alert_id, recipient_value)
-+    lib_env.push_cib(cib)
-+
-+
-+def get_all_alerts(lib_env):
-+    """
-+    Returns list of all alerts. See docs of pcs.lib.cib.alert.get_all_alerts for
-+    description of data format.
-+
-+    lib_env -- LibraryEnvironment
-+    """
-+    return alert.get_all_alerts(lib_env.get_cib())
-diff --git a/pcs/lib/commands/test/test_alert.py b/pcs/lib/commands/test/test_alert.py
-new file mode 100644
-index 0000000..34813df
---- /dev/null
-+++ b/pcs/lib/commands/test/test_alert.py
-@@ -0,0 +1,639 @@
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+import logging
-+from lxml import etree
-+
-+from unittest import TestCase
-+
-+from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.assertions import (
-+    assert_raise_library_error,
-+    assert_xml_equal,
-+)
-+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-+
-+from pcs.common import report_codes
-+from pcs.lib.errors import ReportItemSeverity as Severities
-+from pcs.lib.env import LibraryEnvironment
-+from pcs.lib.external import CommandRunner
-+
-+import pcs.lib.commands.alert as cmd_alert
-+
-+
-+@mock.patch("pcs.lib.cib.tools.upgrade_cib")
-+class CreateAlertTest(TestCase):
-+    def setUp(self):
-+        self.mock_log = mock.MagicMock(spec_set=logging.Logger)
-+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
-+        self.mock_rep = MockLibraryReportProcessor()
-+        self.mock_env = LibraryEnvironment(
-+            self.mock_log, self.mock_rep, cib_data="<cib/>"
-+        )
-+
-+    def test_no_path(self, mock_upgrade_cib):
-+        assert_raise_library_error(
-+            lambda: cmd_alert.create_alert(
-+                self.mock_env, None, None, None, None
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.REQUIRED_OPTION_IS_MISSING,
-+                {"option_name": "path"}
-+            )
-+        )
-+        self.assertEqual(0, mock_upgrade_cib.call_count)
-+
-+    def test_upgrade_needed(self, mock_upgrade_cib):
-+        self.mock_env._push_cib_xml(
-+            """
-+            <cib validate-with="pacemaker-2.4.1">
-+                <configuration>
-+                </configuration>
-+            </cib>
-+            """
-+        )
-+        mock_upgrade_cib.return_value = etree.XML(
-+            """
-+            <cib validate-with="pacemaker-2.5.0">
-+                <configuration>
-+                </configuration>
-+            </cib>
-+            """
-+        )
-+        cmd_alert.create_alert(
-+            self.mock_env,
-+            "my-alert",
-+            "/my/path",
-+            {
-+                "instance": "value",
-+                "another": "val"
-+            },
-+            {"meta1": "val1"},
-+            "my description"
-+        )
-+        assert_xml_equal(
-+            """
-+<cib validate-with="pacemaker-2.5.0">
-+    <configuration>
-+        <alerts>
-+            <alert id="my-alert" path="/my/path" description="my description">
-+                <meta_attributes id="my-alert-meta_attributes">
-+                    <nvpair
-+                        id="my-alert-meta_attributes-meta1"
-+                        name="meta1"
-+                        value="val1"
-+                    />
-+                </meta_attributes>
-+                <instance_attributes id="my-alert-instance_attributes">
-+                    <nvpair
-+                        id="my-alert-instance_attributes-another"
-+                        name="another"
-+                        value="val"
-+                    />
-+                    <nvpair
-+                        id="my-alert-instance_attributes-instance"
-+                        name="instance"
-+                        value="value"
-+                    />
-+                </instance_attributes>
-+            </alert>
-+        </alerts>
-+    </configuration>
-+</cib>
-+            """,
-+            self.mock_env._get_cib_xml()
-+        )
-+        self.assertEqual(1, mock_upgrade_cib.call_count)
-+
-+
-+class UpdateAlertTest(TestCase):
-+    def setUp(self):
-+        self.mock_log = mock.MagicMock(spec_set=logging.Logger)
-+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
-+        self.mock_rep = MockLibraryReportProcessor()
-+        self.mock_env = LibraryEnvironment(
-+            self.mock_log, self.mock_rep, cib_data="<cib/>"
-+        )
-+
-+    def test_update_all(self):
-+        self.mock_env._push_cib_xml(
-+            """
-+<cib validate-with="pacemaker-2.5">
-+    <configuration>
-+        <alerts>
-+            <alert id="my-alert" path="/my/path" description="my description">
-+                <instance_attributes id="my-alert-instance_attributes">
-+                    <nvpair
-+                        id="my-alert-instance_attributes-instance"
-+                        name="instance"
-+                        value="value"
-+                    />
-+                    <nvpair
-+                        id="my-alert-instance_attributes-another"
-+                        name="another"
-+                        value="val"
-+                    />
-+                </instance_attributes>
-+                <meta_attributes id="my-alert-meta_attributes">
-+                    <nvpair
-+                        id="my-alert-meta_attributes-meta1"
-+                        name="meta1"
-+                        value="val1"
-+                    />
-+                </meta_attributes>
-+            </alert>
-+        </alerts>
-+    </configuration>
-+</cib>
-+            """
-+        )
-+        cmd_alert.update_alert(
-+            self.mock_env,
-+            "my-alert",
-+            "/another/one",
-+            {
-+                "instance": "",
-+                "my-attr": "its_val"
-+            },
-+            {"meta1": "val2"},
-+            ""
-+        )
-+        assert_xml_equal(
-+            """
-+<cib validate-with="pacemaker-2.5">
-+    <configuration>
-+        <alerts>
-+            <alert id="my-alert" path="/another/one">
-+                <instance_attributes id="my-alert-instance_attributes">
-+                    <nvpair
-+                        id="my-alert-instance_attributes-another"
-+                        name="another"
-+                        value="val"
-+                    />
-+                    <nvpair
-+                        id="my-alert-instance_attributes-my-attr"
-+                        name="my-attr"
-+                        value="its_val"
-+                    />
-+                </instance_attributes>
-+                <meta_attributes id="my-alert-meta_attributes">
-+                    <nvpair
-+                        id="my-alert-meta_attributes-meta1"
-+                        name="meta1"
-+                        value="val2"
-+                    />
-+                </meta_attributes>
-+            </alert>
-+        </alerts>
-+    </configuration>
-+</cib>
-+            """,
-+            self.mock_env._get_cib_xml()
-+        )
-+
-+    def test_update_instance_attribute(self):
-+        self.mock_env._push_cib_xml(
-+            """
-+<cib validate-with="pacemaker-2.5">
-+    <configuration>
-+        <alerts>
-+            <alert id="my-alert" path="/my/path" description="my description">
-+                <instance_attributes id="my-alert-instance_attributes">
-+                    <nvpair
-+                        id="my-alert-instance_attributes-instance"
-+                        name="instance"
-+                        value="value"
-+                    />
-+                </instance_attributes>
-+            </alert>
-+        </alerts>
-+    </configuration>
-+</cib>
-+            """
-+        )
-+        cmd_alert.update_alert(
-+            self.mock_env,
-+            "my-alert",
-+            None,
-+            {"instance": "new_val"},
-+            {},
-+            None
-+        )
-+        assert_xml_equal(
-+            """
-+<cib validate-with="pacemaker-2.5">
-+    <configuration>
-+        <alerts>
-+            <alert id="my-alert" path="/my/path" description="my description">
-+                <instance_attributes id="my-alert-instance_attributes">
-+                    <nvpair
-+                        id="my-alert-instance_attributes-instance"
-+                        name="instance"
-+                        value="new_val"
-+                    />
-+                </instance_attributes>
-+            </alert>
-+        </alerts>
-+    </configuration>
-+</cib>
-+            """,
-+            self.mock_env._get_cib_xml()
-+        )
-+
-+    def test_alert_doesnt_exist(self):
-+        self.mock_env._push_cib_xml(
-+            """
-+            <cib validate-with="pacemaker-2.5">
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="path"/>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """
-+        )
-+        assert_raise_library_error(
-+            lambda: cmd_alert.update_alert(
-+                self.mock_env, "unknown", "test", {}, {}, None
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.CIB_ALERT_NOT_FOUND,
-+                {"alert": "unknown"}
-+            )
-+        )
-+
-+
-+class RemoveAlertTest(TestCase):
-+    def setUp(self):
-+        self.mock_log = mock.MagicMock(spec_set=logging.Logger)
-+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
-+        self.mock_rep = MockLibraryReportProcessor()
-+        cib = """
-+            <cib validate-with="pacemaker-2.5">
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="path"/>
-+                        <alert id="alert-1" path="/path"/>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+        """
-+        self.mock_env = LibraryEnvironment(
-+            self.mock_log, self.mock_rep, cib_data=cib
-+        )
-+
-+    def test_success(self):
-+        cmd_alert.remove_alert(self.mock_env, "alert")
-+        assert_xml_equal(
-+            """
-+                <cib validate-with="pacemaker-2.5">
-+                    <configuration>
-+                        <alerts>
-+                            <alert id="alert-1" path="/path"/>
-+                        </alerts>
-+                    </configuration>
-+                </cib>
-+            """,
-+            self.mock_env._get_cib_xml()
-+        )
-+
-+    def test_not_existing_alert(self):
-+        assert_raise_library_error(
-+            lambda: cmd_alert.remove_alert(self.mock_env, "unknown"),
-+            (
-+                Severities.ERROR,
-+                report_codes.CIB_ALERT_NOT_FOUND,
-+                {"alert": "unknown"}
-+            )
-+        )
-+
-+
-+class AddRecipientTest(TestCase):
-+    def setUp(self):
-+        self.mock_log = mock.MagicMock(spec_set=logging.Logger)
-+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
-+        self.mock_rep = MockLibraryReportProcessor()
-+        cib = """
-+            <cib validate-with="pacemaker-2.5">
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="path">
-+                            <recipient id="alert-recipient" value="value1"/>
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+        """
-+        self.mock_env = LibraryEnvironment(
-+            self.mock_log, self.mock_rep, cib_data=cib
-+        )
-+
-+    def test_alert_not_found(self):
-+        assert_raise_library_error(
-+            lambda: cmd_alert.add_recipient(
-+                self.mock_env, "unknown", "recipient", {}, {}
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.CIB_ALERT_NOT_FOUND,
-+                {"alert": "unknown"}
-+            )
-+        )
-+
-+    def test_value_not_defined(self):
-+        assert_raise_library_error(
-+            lambda: cmd_alert.add_recipient(
-+                self.mock_env, "unknown", "", {}, {}
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.REQUIRED_OPTION_IS_MISSING,
-+                {"option_name": "value"}
-+            )
-+        )
-+
-+    def test_recipient_already_exists(self):
-+        assert_raise_library_error(
-+            lambda: cmd_alert.add_recipient(
-+                self.mock_env, "alert", "value1", {}, {}
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
-+                {
-+                    "recipient": "value1",
-+                    "alert": "alert"
-+                }
-+            )
-+        )
-+
-+    def test_success(self):
-+        cmd_alert.add_recipient(
-+            self.mock_env,
-+            "alert",
-+            "value",
-+            {"attr1": "val1"},
-+            {
-+                "attr2": "val2",
-+                "attr1": "val1"
-+            }
-+        )
-+        assert_xml_equal(
-+            """
-+<cib validate-with="pacemaker-2.5">
-+    <configuration>
-+        <alerts>
-+            <alert id="alert" path="path">
-+                <recipient id="alert-recipient" value="value1"/>
-+                <recipient id="alert-recipient-1" value="value">
-+                    <meta_attributes
-+                        id="alert-recipient-1-meta_attributes"
-+                    >
-+                        <nvpair
-+                            id="alert-recipient-1-meta_attributes-attr1"
-+                            name="attr1"
-+                            value="val1"
-+                        />
-+                        <nvpair
-+                            id="alert-recipient-1-meta_attributes-attr2"
-+                            name="attr2"
-+                            value="val2"
-+                        />
-+                    </meta_attributes>
-+                    <instance_attributes
-+                        id="alert-recipient-1-instance_attributes"
-+                    >
-+                        <nvpair
-+                            id="alert-recipient-1-instance_attributes-attr1"
-+                            name="attr1"
-+                            value="val1"
-+                        />
-+                    </instance_attributes>
-+                </recipient>
-+            </alert>
-+        </alerts>
-+    </configuration>
-+</cib>
-+            """,
-+            self.mock_env._get_cib_xml()
-+        )
-+
-+
-+class UpdateRecipientTest(TestCase):
-+    def setUp(self):
-+        self.mock_log = mock.MagicMock(spec_set=logging.Logger)
-+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
-+        self.mock_rep = MockLibraryReportProcessor()
-+        cib = """
-+<cib validate-with="pacemaker-2.5">
-+    <configuration>
-+        <alerts>
-+            <alert id="alert" path="path">
-+                <recipient id="alert-recipient" value="value1"/>
-+                <recipient id="alert-recipient-1" value="value" description="d">
-+                    <meta_attributes
-+                        id="alert-recipient-1-meta_attributes"
-+                    >
-+                        <nvpair
-+                            id="alert-recipient-1-meta_attributes-attr1"
-+                            name="attr1"
-+                            value="val1"
-+                        />
-+                        <nvpair
-+                            id="alert-recipient-1-meta_attributes-attr2"
-+                            name="attr2"
-+                            value="val2"
-+                        />
-+                    </meta_attributes>
-+                    <instance_attributes
-+                        id="alert-recipient-1-instance_attributes"
-+                    >
-+                        <nvpair
-+                            id="alert-recipient-1-instance_attributes-attr1"
-+                            name="attr1"
-+                            value="val1"
-+                        />
-+                    </instance_attributes>
-+                </recipient>
-+            </alert>
-+        </alerts>
-+    </configuration>
-+</cib>
-+        """
-+        self.mock_env = LibraryEnvironment(
-+            self.mock_log, self.mock_rep, cib_data=cib
-+        )
-+
-+    def test_alert_not_found(self):
-+        assert_raise_library_error(
-+            lambda: cmd_alert.update_recipient(
-+                self.mock_env, "unknown", "recipient", {}, {}
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.CIB_ALERT_NOT_FOUND,
-+                {"alert": "unknown"}
-+            )
-+        )
-+
-+    def test_recipient_not_found(self):
-+        assert_raise_library_error(
-+            lambda: cmd_alert.update_recipient(
-+                self.mock_env, "alert", "recipient", {}, {}
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
-+                {
-+                    "recipient": "recipient",
-+                    "alert": "alert"
-+                }
-+            )
-+        )
-+
-+    def test_update_all(self):
-+        cmd_alert.update_recipient(
-+            self.mock_env,
-+            "alert",
-+            "value",
-+            {"attr1": "value"},
-+            {
-+                "attr1": "",
-+                "attr3": "new_val"
-+            },
-+            "desc"
-+        )
-+        assert_xml_equal(
-+            """
-+<cib validate-with="pacemaker-2.5">
-+    <configuration>
-+        <alerts>
-+            <alert id="alert" path="path">
-+                <recipient id="alert-recipient" value="value1"/>
-+                <recipient
-+                    id="alert-recipient-1"
-+                    value="value"
-+                    description="desc"
-+                >
-+                    <meta_attributes
-+                        id="alert-recipient-1-meta_attributes"
-+                    >
-+                        <nvpair
-+                            id="alert-recipient-1-meta_attributes-attr2"
-+                            name="attr2"
-+                            value="val2"
-+                        />
-+                        <nvpair
-+                            id="alert-recipient-1-meta_attributes-attr3"
-+                            name="attr3"
-+                            value="new_val"
-+                        />
-+                    </meta_attributes>
-+                    <instance_attributes
-+                        id="alert-recipient-1-instance_attributes"
-+                    >
-+                        <nvpair
-+                            id="alert-recipient-1-instance_attributes-attr1"
-+                            name="attr1"
-+                            value="value"
-+                        />
-+                    </instance_attributes>
-+                </recipient>
-+            </alert>
-+        </alerts>
-+    </configuration>
-+</cib>
-+            """,
-+            self.mock_env._get_cib_xml()
-+        )
-+
-+
-+class RemoveRecipientTest(TestCase):
-+    def setUp(self):
-+        self.mock_log = mock.MagicMock(spec_set=logging.Logger)
-+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
-+        self.mock_rep = MockLibraryReportProcessor()
-+        cib = """
-+            <cib validate-with="pacemaker-2.5">
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="path">
-+                            <recipient id="alert-recipient" value="value1"/>
-+                            <recipient id="alert-recipient-1" value="value"/>
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+        """
-+        self.mock_env = LibraryEnvironment(
-+            self.mock_log, self.mock_rep, cib_data=cib
-+        )
-+
-+    def test_alert_not_found(self):
-+        assert_raise_library_error(
-+            lambda: cmd_alert.remove_recipient(
-+                self.mock_env, "unknown", "recipient"
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.CIB_ALERT_NOT_FOUND,
-+                {"alert": "unknown"}
-+            )
-+        )
-+
-+    def test_recipient_not_found(self):
-+        assert_raise_library_error(
-+            lambda: cmd_alert.remove_recipient(
-+                self.mock_env, "alert", "recipient"
-+            ),
-+            (
-+                Severities.ERROR,
-+                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
-+                {
-+                    "recipient": "recipient",
-+                    "alert": "alert"
-+                }
-+            )
-+        )
-+
-+    def test_success(self):
-+        cmd_alert.remove_recipient(self.mock_env, "alert", "value1")
-+        assert_xml_equal(
-+            """
-+            <cib validate-with="pacemaker-2.5">
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="path">
-+                            <recipient id="alert-recipient-1" value="value"/>
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            self.mock_env._get_cib_xml()
-+        )
-+
-+
-+@mock.patch("pcs.lib.cib.alert.get_all_alerts")
-+class GetAllAlertsTest(TestCase):
-+    def setUp(self):
-+        self.mock_log = mock.MagicMock(spec_set=logging.Logger)
-+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
-+        self.mock_rep = MockLibraryReportProcessor()
-+        self.mock_env = LibraryEnvironment(
-+            self.mock_log, self.mock_rep, cib_data='<cib/>'
-+        )
-+
-+    def test_success(self, mock_alerts):
-+        mock_alerts.return_value = [{"id": "alert"}]
-+        self.assertEqual(
-+            [{"id": "alert"}],
-+            cmd_alert.get_all_alerts(self.mock_env)
-+        )
-+        self.assertEqual(1, mock_alerts.call_count)
-diff --git a/pcs/lib/commands/test/test_ticket.py b/pcs/lib/commands/test/test_ticket.py
-index a22a014..751001b 100644
---- a/pcs/lib/commands/test/test_ticket.py
-+++ b/pcs/lib/commands/test/test_ticket.py
-@@ -44,7 +44,7 @@ class CreateTest(TestCase):
-         })
- 
-         assert_xml_equal(
--            env.get_cib_xml(),
-+            env._get_cib_xml(),
-             str(cib.append_to_first_tag_name(
-                 'constraints', """
-                     <rsc_ticket
-diff --git a/pcs/lib/env.py b/pcs/lib/env.py
-index 99e3397..1151891 100644
---- a/pcs/lib/env.py
-+++ b/pcs/lib/env.py
-@@ -27,6 +27,7 @@ from pcs.lib.pacemaker import (
-     get_cib_xml,
-     replace_cib_configuration_xml,
- )
-+from pcs.lib.cib.tools import ensure_cib_version
- 
- 
- class LibraryEnvironment(object):
-@@ -54,6 +55,7 @@ class LibraryEnvironment(object):
-         # related code currently - it's in pcsd
-         self._auth_tokens_getter = auth_tokens_getter
-         self._auth_tokens = None
-+        self._cib_upgraded = False
- 
-     @property
-     def logger(self):
-@@ -77,27 +79,45 @@ class LibraryEnvironment(object):
-             self._is_cman_cluster = is_cman_cluster(self.cmd_runner())
-         return self._is_cman_cluster
- 
--    def get_cib_xml(self):
-+    @property
-+    def cib_upgraded(self):
-+        return self._cib_upgraded
-+
-+    def _get_cib_xml(self):
-         if self.is_cib_live:
-             return get_cib_xml(self.cmd_runner())
-         else:
-             return self._cib_data
- 
--    def get_cib(self):
--        return get_cib(self.get_cib_xml())
-+    def get_cib(self, minimal_version=None):
-+        cib = get_cib(self._get_cib_xml())
-+        if minimal_version is not None:
-+            upgraded_cib = ensure_cib_version(
-+                self.cmd_runner(), cib, minimal_version
-+            )
-+            if upgraded_cib is not None:
-+                cib = upgraded_cib
-+                self._cib_upgraded = True
-+        return cib
- 
--    def push_cib_xml(self, cib_data):
-+    def _push_cib_xml(self, cib_data):
-         if self.is_cib_live:
--            replace_cib_configuration_xml(self.cmd_runner(), cib_data)
-+            replace_cib_configuration_xml(
-+                self.cmd_runner(), cib_data, self._cib_upgraded
-+            )
-+            if self._cib_upgraded:
-+                self._cib_upgraded = False
-+                self.report_processor.process(reports.cib_upgrade_successful())
-         else:
-             self._cib_data = cib_data
- 
-+
-     def push_cib(self, cib):
-         #etree returns bytes: b'xml'
-         #python 3 removed .encode() from bytes
-         #run(...) calls subprocess.Popen.communicate which calls encode...
-         #so here is bytes to str conversion
--        self.push_cib_xml(etree.tostring(cib).decode())
-+        self._push_cib_xml(etree.tostring(cib).decode())
- 
-     @property
-     def is_cib_live(self):
-diff --git a/pcs/lib/pacemaker.py b/pcs/lib/pacemaker.py
-index 14745c5..fd6f97b 100644
---- a/pcs/lib/pacemaker.py
-+++ b/pcs/lib/pacemaker.py
-@@ -55,24 +55,21 @@ def get_cib(xml):
-     except (etree.XMLSyntaxError, etree.DocumentInvalid):
-         raise LibraryError(reports.cib_load_error_invalid_format())
- 
--def replace_cib_configuration_xml(runner, xml):
--    output, retval = runner.run(
--        [
--            __exec("cibadmin"),
--            "--replace", "--scope", "configuration", "--verbose", "--xml-pipe"
--        ],
--        stdin_string=xml
--    )
-+def replace_cib_configuration_xml(runner, xml, cib_upgraded=False):
-+    cmd = [__exec("cibadmin"), "--replace",  "--verbose", "--xml-pipe"]
-+    if not cib_upgraded:
-+        cmd += ["--scope", "configuration"]
-+    output, retval = runner.run(cmd, stdin_string=xml)
-     if retval != 0:
-         raise LibraryError(reports.cib_push_error(retval, output))
- 
--def replace_cib_configuration(runner, tree):
-+def replace_cib_configuration(runner, tree, cib_upgraded=False):
-     #etree returns bytes: b'xml'
-     #python 3 removed .encode() from bytes
-     #run(...) calls subprocess.Popen.communicate which calls encode...
-     #so here is bytes to str conversion
-     xml = etree.tostring(tree).decode()
--    return replace_cib_configuration_xml(runner, xml)
-+    return replace_cib_configuration_xml(runner, xml, cib_upgraded)
- 
- def get_local_node_status(runner):
-     try:
-diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
-index 4f4f580..490b4ff 100644
---- a/pcs/lib/reports.py
-+++ b/pcs/lib/reports.py
-@@ -1436,3 +1436,94 @@ def cluster_restart_required_to_apply_changes():
-         report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES,
-         "Cluster restart is required in order to apply these changes."
-     )
-+
-+
-+def cib_alert_recipient_already_exists(alert_id, recipient_value):
-+    """
-+    Error that recipient already exists.
-+
-+    alert_id -- id of alert to which recipient belongs
-+    recipient_value -- value of recipient
-+    """
-+    return ReportItem.error(
-+        report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
-+        "Recipient '{recipient}' in alert '{alert}' already exists.",
-+        info={
-+            "recipient": recipient_value,
-+            "alert": alert_id
-+        }
-+    )
-+
-+
-+def cib_alert_recipient_not_found(alert_id, recipient_value):
-+    """
-+    Specified recipient not found.
-+
-+    alert_id -- id of alert to which recipient should belong
-+    recipient_value -- recipient value
-+    """
-+    return ReportItem.error(
-+        report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
-+        "Recipient '{recipient}' not found in alert '{alert}'.",
-+        info={
-+            "recipient": recipient_value,
-+            "alert": alert_id
-+        }
-+    )
-+
-+
-+def cib_alert_not_found(alert_id):
-+    """
-+    Alert with specified id doesn't exist.
-+
-+    alert_id -- id of alert
-+    """
-+    return ReportItem.error(
-+        report_codes.CIB_ALERT_NOT_FOUND,
-+        "Alert '{alert}' not found.",
-+        info={"alert": alert_id}
-+    )
-+
-+
-+def cib_upgrade_successful():
-+    """
-+    Upgrade of CIB schema was successful.
-+    """
-+    return ReportItem.info(
-+        report_codes.CIB_UPGRADE_SUCCESSFUL,
-+        "CIB has been upgraded to the latest schema version."
-+    )
-+
-+
-+def cib_upgrade_failed(reason):
-+    """
-+    Upgrade of CIB schema failed.
-+
-+    reason -- reason of failure
-+    """
-+    return ReportItem.error(
-+        report_codes.CIB_UPGRADE_FAILED,
-+        "Upgrading of CIB to the latest schema failed: {reason}",
-+        info={"reason": reason}
-+    )
-+
-+
-+def unable_to_upgrade_cib_to_required_version(
-+    current_version, required_version
-+):
-+    """
-+    Unable to upgrade CIB to minimal required schema version.
-+
-+    current_version -- current version of CIB schema
-+    required_version -- required version of CIB schema
-+    """
-+    return ReportItem.error(
-+        report_codes.CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION,
-+        "Unable to upgrade CIB to required schema version {required_version} "
-+        "or higher. Current version is {current_version}. Newer version of "
-+        "pacemaker is needed.",
-+        info={
-+            "required_version": "{0}.{1}.{2}".format(*required_version),
-+            "current_version": "{0}.{1}.{2}".format(*current_version)
-+        }
-+    )
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 0e230b7..425b613 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -56,6 +56,9 @@ Manage pcs daemon.
- .TP
- node
- Manage cluster nodes.
-+.TP
-+alert
-+Manage pacemaker alerts.
- .SS "resource"
- .TP
- [show [resource id]] [\fB\-\-full\fR] [\fB\-\-groups\fR]
-@@ -635,6 +638,28 @@ Remove node from standby mode (the node specified will now be able to host resou
- .TP
- utilization [<node> [<name>=<value> ...]]
- Add specified utilization options to specified node. If node is not specified, shows utilization of all nodes. If utilization options are not specified, shows utilization of specified node. Utilization option should be in format name=value, value has to be integer. Options may be removed by setting an option without a value. Example: pcs node utilization node1 cpu=4 ram=
-+.SS "alert"
-+.TP
-+[config|show]
-+Show all configured alerts.
-+.TP
-+create path=<path> [id=<alert\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
-+Create new alert with specified path. Id will be automatically generated if it is not specified.
-+.TP
-+update <alert\-id> [path=<path>] [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
-+Update existing alert with specified id.
-+.TP
-+remove <alert\-id>
-+Remove alert with specified id.
-+.TP
-+recipient add <alert\-id> <recipient\-value> [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
-+Add new recipient to specified alert.
-+.TP
-+recipient update <alert\-id> <recipient\-value> [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
-+Update existing recipient identified by alert and it's value.
-+.TP
-+recipient remove <alert\-id> <recipient\-value>
-+Remove specified recipient.
- .SH EXAMPLES
- .TP
- Show all resources
-diff --git a/pcs/test/resources/cib-empty-2.5.xml b/pcs/test/resources/cib-empty-2.5.xml
-new file mode 100644
-index 0000000..1b4fb0a
---- /dev/null
-+++ b/pcs/test/resources/cib-empty-2.5.xml
-@@ -0,0 +1,10 @@
-+<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.5" crm_feature_set="3.0.9" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
-+  <configuration>
-+    <crm_config/>
-+    <nodes>
-+    </nodes>
-+    <resources/>
-+    <constraints/>
-+  </configuration>
-+  <status/>
-+</cib>
-diff --git a/pcs/test/test_alert.py b/pcs/test/test_alert.py
-new file mode 100644
-index 0000000..905dc9f
---- /dev/null
-+++ b/pcs/test/test_alert.py
-@@ -0,0 +1,363 @@
-+
-+from __future__ import (
-+    absolute_import,
-+    division,
-+    print_function,
-+    unicode_literals,
-+)
-+
-+import shutil
-+import sys
-+
-+from pcs.test.tools.misc import (
-+    get_test_resource as rc,
-+    is_minimum_pacemaker_version,
-+)
-+from pcs.test.tools.assertions import AssertPcsMixin
-+from pcs.test.tools.pcs_runner import PcsRunner
-+
-+major, minor = sys.version_info[:2]
-+if major == 2 and minor == 6:
-+    import unittest2 as unittest
-+else:
-+    import unittest
-+
-+
-+old_cib = rc("cib-empty.xml")
-+empty_cib = rc("cib-empty-2.5.xml")
-+temp_cib = rc("temp-cib.xml")
-+
-+
-+ALERTS_SUPPORTED = is_minimum_pacemaker_version(1, 1, 15)
-+ALERTS_NOT_SUPPORTED_MSG = "Pacemaker version is too old (must be >= 1.1.15)" +\
-+    " to test alerts"
-+
-+
-+class PcsAlertTest(unittest.TestCase, AssertPcsMixin):
-+    def setUp(self):
-+        shutil.copy(empty_cib, temp_cib)
-+        self.pcs_runner = PcsRunner(temp_cib)
-+
-+
-+@unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
-+class AlertCibUpgradeTest(unittest.TestCase, AssertPcsMixin):
-+    def setUp(self):
-+        shutil.copy(old_cib, temp_cib)
-+        self.pcs_runner = PcsRunner(temp_cib)
-+
-+    def test_cib_upgrade(self):
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ No alerts defined
-+"""
-+        )
-+
-+        self.assert_pcs_success(
-+            "alert create path=test",
-+            "CIB has been upgraded to the latest schema version.\n"
-+        )
-+
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert (path=test)
-+"""
-+        )
-+
-+
-+@unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
-+class CreateAlertTest(PcsAlertTest):
-+    def test_create_multiple_without_id(self):
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ No alerts defined
-+"""
-+        )
-+
-+        self.assert_pcs_success("alert create path=test")
-+        self.assert_pcs_success("alert create path=test")
-+        self.assert_pcs_success("alert create path=test2")
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert (path=test)
-+ Alert: alert-1 (path=test)
-+ Alert: alert-2 (path=test2)
-+"""
-+        )
-+
-+    def test_create_multiple_with_id(self):
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ No alerts defined
-+"""
-+        )
-+        self.assert_pcs_success("alert create id=alert1 path=test")
-+        self.assert_pcs_success(
-+            "alert create id=alert2 description=desc path=test"
-+        )
-+        self.assert_pcs_success(
-+            "alert create description=desc2 path=test2 id=alert3"
-+        )
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert1 (path=test)
-+ Alert: alert2 (path=test)
-+  Description: desc
-+ Alert: alert3 (path=test2)
-+  Description: desc2
-+"""
-+        )
-+
-+    def test_create_with_options(self):
-+        self.assert_pcs_success(
-+            "alert create id=alert1 description=desc path=test "
-+            "options opt1=val1 opt2=val2 meta m1=v1 m2=v2"
-+        )
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert1 (path=test)
-+  Description: desc
-+  Options: opt1=val1 opt2=val2
-+  Meta options: m1=v1 m2=v2
-+"""
-+        )
-+
-+    def test_already_exists(self):
-+        self.assert_pcs_success("alert create id=alert1 path=test")
-+        self.assert_pcs_fail(
-+            "alert create id=alert1 path=test",
-+            "Error: 'alert1' already exists\n"
-+        )
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert1 (path=test)
-+"""
-+        )
-+
-+    def test_path_is_required(self):
-+        self.assert_pcs_fail(
-+            "alert create id=alert1",
-+            "Error: required option 'path' is missing\n"
-+        )
-+
-+
-+@unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
-+class UpdateAlertTest(PcsAlertTest):
-+    def test_update_everything(self):
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ No alerts defined
-+"""
-+        )
-+        self.assert_pcs_success(
-+            "alert create id=alert1 description=desc path=test "
-+            "options opt1=val1 opt2=val2 meta m1=v1 m2=v2"
-+        )
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert1 (path=test)
-+  Description: desc
-+  Options: opt1=val1 opt2=val2
-+  Meta options: m1=v1 m2=v2
-+"""
-+        )
-+        self.assert_pcs_success(
-+            "alert update alert1 description=new_desc path=/new/path "
-+            "options opt1= opt2=test opt3=1 meta m1= m2=v m3=3"
-+        )
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert1 (path=/new/path)
-+  Description: new_desc
-+  Options: opt2=test opt3=1
-+  Meta options: m2=v m3=3
-+"""
-+        )
-+
-+    def test_not_existing_alert(self):
-+        self.assert_pcs_fail(
-+            "alert update alert1", "Error: Alert 'alert1' not found.\n"
-+        )
-+
-+
-+@unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
-+class RemoveAlertTest(PcsAlertTest):
-+    def test_not_existing_alert(self):
-+        self.assert_pcs_fail(
-+            "alert remove alert1", "Error: Alert 'alert1' not found.\n"
-+        )
-+
-+    def test_success(self):
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ No alerts defined
-+"""
-+        )
-+
-+        self.assert_pcs_success("alert create path=test")
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert (path=test)
-+"""
-+        )
-+        self.assert_pcs_success("alert remove alert")
-+
-+
-+@unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
-+class AddRecipientTest(PcsAlertTest):
-+    def test_success(self):
-+        self.assert_pcs_success("alert create path=test")
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert (path=test)
-+"""
-+        )
-+        self.assert_pcs_success("alert recipient add alert rec_value")
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert (path=test)
-+  Recipients:
-+   Recipient: rec_value
-+"""
-+        )
-+        self.assert_pcs_success(
-+            "alert recipient add alert rec_value2 description=description "
-+            "options o1=1 o2=2 meta m1=v1 m2=v2"
-+        )
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert (path=test)
-+  Recipients:
-+   Recipient: rec_value
-+   Recipient: rec_value2
-+    Description: description
-+    Options: o1=1 o2=2
-+    Meta options: m1=v1 m2=v2
-+"""
-+        )
-+
-+    def test_no_alert(self):
-+        self.assert_pcs_fail(
-+            "alert recipient add alert rec_value",
-+            "Error: Alert 'alert' not found.\n"
-+        )
-+
-+    def test_already_exists(self):
-+        self.assert_pcs_success("alert create path=test")
-+        self.assert_pcs_success("alert recipient add alert rec_value")
-+        self.assert_pcs_fail(
-+            "alert recipient add alert rec_value",
-+            "Error: Recipient 'rec_value' in alert 'alert' already exists.\n"
-+        )
-+
-+
-+@unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
-+class UpdateRecipientAlert(PcsAlertTest):
-+    def test_success(self):
-+        self.assert_pcs_success("alert create path=test")
-+        self.assert_pcs_success(
-+            "alert recipient add alert rec_value description=description "
-+            "options o1=1 o2=2 meta m1=v1 m2=v2"
-+        )
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert (path=test)
-+  Recipients:
-+   Recipient: rec_value
-+    Description: description
-+    Options: o1=1 o2=2
-+    Meta options: m1=v1 m2=v2
-+"""
-+        )
-+        self.assert_pcs_success(
-+            "alert recipient update alert rec_value description=desc "
-+            "options o1= o2=v2 o3=3 meta m1= m2=2 m3=3"
-+        )
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert (path=test)
-+  Recipients:
-+   Recipient: rec_value
-+    Description: desc
-+    Options: o2=v2 o3=3
-+    Meta options: m2=2 m3=3
-+"""
-+        )
-+
-+    def test_no_alert(self):
-+        self.assert_pcs_fail(
-+            "alert recipient update alert rec_value description=desc",
-+            "Error: Alert 'alert' not found.\n"
-+        )
-+
-+    def test_no_recipient(self):
-+        self.assert_pcs_success("alert create path=test")
-+        self.assert_pcs_fail(
-+            "alert recipient update alert rec_value description=desc",
-+            "Error: Recipient 'rec_value' not found in alert 'alert'.\n"
-+        )
-+
-+
-+@unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
-+class RemoveRecipientTest(PcsAlertTest):
-+    def test_success(self):
-+        self.assert_pcs_success("alert create path=test")
-+        self.assert_pcs_success("alert recipient add alert rec_value")
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert (path=test)
-+  Recipients:
-+   Recipient: rec_value
-+"""
-+        )
-+        self.assert_pcs_success("alert recipient remove alert rec_value")
-+
-+    def test_no_alert(self):
-+        self.assert_pcs_fail(
-+            "alert recipient remove alert rec_value",
-+            "Error: Alert 'alert' not found.\n"
-+        )
-+
-+    def test_no_recipient(self):
-+        self.assert_pcs_success("alert create path=test")
-+        self.assert_pcs_fail(
-+            "alert recipient remove alert rec_value",
-+            "Error: Recipient 'rec_value' not found in alert 'alert'.\n"
-+        )
-diff --git a/pcs/test/test_lib_cib_tools.py b/pcs/test/test_lib_cib_tools.py
-index 405a270..1149a3f 100644
---- a/pcs/test/test_lib_cib_tools.py
-+++ b/pcs/test/test_lib_cib_tools.py
-@@ -7,12 +7,18 @@ from __future__ import (
- 
- from unittest import TestCase
- 
--from pcs.test.tools.assertions import assert_raise_library_error
-+from lxml import etree
-+
-+from pcs.test.tools.assertions import (
-+    assert_raise_library_error,
-+    assert_xml_equal,
-+)
- from pcs.test.tools.misc import get_test_resource as rc
- from pcs.test.tools.pcs_mock import mock
- from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
- 
- from pcs.common import report_codes
-+from pcs.lib.external import CommandRunner
- from pcs.lib.errors import ReportItemSeverity as severities
- 
- from pcs.lib.cib import tools as lib
-@@ -145,3 +151,176 @@ class ValidateIdDoesNotExistsTest(TestCase):
-             ),
-         )
-         does_id_exists.assert_called_once_with("tree", "some-id")
-+
-+
-+class GetSubElementTest(TestCase):
-+    def setUp(self):
-+        self.root = etree.Element("root")
-+        self.sub = etree.SubElement(self.root, "sub_element")
-+
-+    def test_sub_element_exists(self):
-+        self.assertEqual(
-+            self.sub, lib.get_sub_element(self.root, "sub_element")
-+        )
-+
-+    def test_new_no_id(self):
-+        assert_xml_equal(
-+            '<new_element/>',
-+            etree.tostring(
-+                lib.get_sub_element(self.root, "new_element")
-+            ).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <root>
-+                <sub_element/>
-+                <new_element/>
-+            </root>
-+            """,
-+            etree.tostring(self.root).decode()
-+        )
-+
-+    def test_new_with_id(self):
-+        assert_xml_equal(
-+            '<new_element id="new_id"/>',
-+            etree.tostring(
-+                lib.get_sub_element(self.root, "new_element", "new_id")
-+            ).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <root>
-+                <sub_element/>
-+                <new_element id="new_id"/>
-+            </root>
-+            """,
-+            etree.tostring(self.root).decode()
-+        )
-+
-+    def test_new_first(self):
-+        lib.get_sub_element(self.root, "new_element", "new_id", 0)
-+        assert_xml_equal(
-+            """
-+            <root>
-+                <new_element id="new_id"/>
-+                <sub_element/>
-+            </root>
-+            """,
-+            etree.tostring(self.root).decode()
-+        )
-+
-+    def test_new_last(self):
-+        lib.get_sub_element(self.root, "new_element", "new_id", None)
-+        assert_xml_equal(
-+            """
-+            <root>
-+                <sub_element/>
-+                <new_element id="new_id"/>
-+            </root>
-+            """,
-+            etree.tostring(self.root).decode()
-+        )
-+
-+
-+class GetPacemakerVersionByWhichCibWasValidatedTest(TestCase):
-+    def test_missing_attribute(self):
-+        assert_raise_library_error(
-+            lambda: lib.get_pacemaker_version_by_which_cib_was_validated(
-+                etree.XML("<cib/>")
-+            ),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_LOAD_ERROR_BAD_FORMAT,
-+                {}
-+            )
-+        )
-+
-+    def test_invalid_version(self):
-+        assert_raise_library_error(
-+            lambda: lib.get_pacemaker_version_by_which_cib_was_validated(
-+                etree.XML('<cib validate-with="something-1.2.3"/>')
-+            ),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_LOAD_ERROR_BAD_FORMAT,
-+                {}
-+            )
-+        )
-+
-+    def test_no_revision(self):
-+        self.assertEqual(
-+            (1, 2, 0),
-+            lib.get_pacemaker_version_by_which_cib_was_validated(
-+                etree.XML('<cib validate-with="pacemaker-1.2"/>')
-+            )
-+        )
-+
-+    def test_with_revision(self):
-+        self.assertEqual(
-+            (1, 2, 3),
-+            lib.get_pacemaker_version_by_which_cib_was_validated(
-+                etree.XML('<cib validate-with="pacemaker-1.2.3"/>')
-+            )
-+        )
-+
-+
-+@mock.patch("pcs.lib.cib.tools.upgrade_cib")
-+class EnsureCibVersionTest(TestCase):
-+    def setUp(self):
-+        self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
-+        self.cib = etree.XML('<cib validate-with="pacemaker-2.3.4"/>')
-+
-+    def test_same_version(self, mock_upgrade_cib):
-+        self.assertTrue(
-+            lib.ensure_cib_version(
-+                self.mock_runner, self.cib, (2, 3, 4)
-+            ) is None
-+        )
-+        self.assertEqual(0, mock_upgrade_cib.run.call_count)
-+
-+    def test_higher_version(self, mock_upgrade_cib):
-+        self.assertTrue(
-+            lib.ensure_cib_version(
-+                self.mock_runner, self.cib, (2, 3, 3)
-+            ) is None
-+        )
-+        self.assertEqual(0, mock_upgrade_cib.call_count)
-+
-+    def test_upgraded_same_version(self, mock_upgrade_cib):
-+        upgraded_cib = etree.XML('<cib validate-with="pacemaker-2.3.5"/>')
-+        mock_upgrade_cib.return_value = upgraded_cib
-+        self.assertEqual(
-+            upgraded_cib,
-+            lib.ensure_cib_version(
-+                self.mock_runner, self.cib, (2, 3, 5)
-+            )
-+        )
-+        mock_upgrade_cib.assert_called_once_with(self.cib, self.mock_runner)
-+
-+    def test_upgraded_higher_version(self, mock_upgrade_cib):
-+        upgraded_cib = etree.XML('<cib validate-with="pacemaker-2.3.6"/>')
-+        mock_upgrade_cib.return_value = upgraded_cib
-+        self.assertEqual(
-+            upgraded_cib,
-+            lib.ensure_cib_version(
-+                self.mock_runner, self.cib, (2, 3, 5)
-+            )
-+        )
-+        mock_upgrade_cib.assert_called_once_with(self.cib, self.mock_runner)
-+
-+    def test_upgraded_lower_version(self, mock_upgrade_cib):
-+        mock_upgrade_cib.return_value = self.cib
-+        assert_raise_library_error(
-+            lambda: lib.ensure_cib_version(
-+                self.mock_runner, self.cib, (2, 3, 5)
-+            ),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION,
-+                {
-+                    "required_version": "2.3.5",
-+                    "current_version": "2.3.4"
-+                }
-+            )
-+        )
-+        mock_upgrade_cib.assert_called_once_with(self.cib, self.mock_runner)
-diff --git a/pcs/test/test_lib_env.py b/pcs/test/test_lib_env.py
-index fbaac09..95f7a00 100644
---- a/pcs/test/test_lib_env.py
-+++ b/pcs/test/test_lib_env.py
-@@ -7,8 +7,13 @@ from __future__ import (
- 
- from unittest import TestCase
- import logging
-+from lxml import etree
- 
--from pcs.test.tools.assertions import assert_raise_library_error
-+from pcs.test.tools.assertions import (
-+    assert_raise_library_error,
-+    assert_xml_equal,
-+    assert_report_item_list_equal,
-+)
- from pcs.test.tools.custom_mock import MockLibraryReportProcessor
- from pcs.test.tools.misc import get_test_resource as rc
- from pcs.test.tools.pcs_mock import mock
-@@ -82,13 +87,13 @@ class LibraryEnvironmentTest(TestCase):
- 
-         self.assertFalse(env.is_cib_live)
- 
--        self.assertEqual(cib_data, env.get_cib_xml())
-+        self.assertEqual(cib_data, env._get_cib_xml())
-         self.assertEqual(0, mock_get_cib.call_count)
- 
--        env.push_cib_xml(new_cib_data)
-+        env._push_cib_xml(new_cib_data)
-         self.assertEqual(0, mock_push_cib.call_count)
- 
--        self.assertEqual(new_cib_data, env.get_cib_xml())
-+        self.assertEqual(new_cib_data, env._get_cib_xml())
-         self.assertEqual(0, mock_get_cib.call_count)
- 
-     @mock.patch("pcs.lib.env.replace_cib_configuration_xml")
-@@ -101,12 +106,135 @@ class LibraryEnvironmentTest(TestCase):
- 
-         self.assertTrue(env.is_cib_live)
- 
--        self.assertEqual(cib_data, env.get_cib_xml())
-+        self.assertEqual(cib_data, env._get_cib_xml())
-         self.assertEqual(1, mock_get_cib.call_count)
- 
--        env.push_cib_xml(new_cib_data)
-+        env._push_cib_xml(new_cib_data)
-         self.assertEqual(1, mock_push_cib.call_count)
- 
-+    @mock.patch("pcs.lib.env.ensure_cib_version")
-+    @mock.patch("pcs.lib.env.get_cib_xml")
-+    def test_get_cib_no_version_live(
-+            self, mock_get_cib_xml, mock_ensure_cib_version
-+    ):
-+        mock_get_cib_xml.return_value = '<cib/>'
-+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-+        assert_xml_equal('<cib/>', etree.tostring(env.get_cib()).decode())
-+        self.assertEqual(1, mock_get_cib_xml.call_count)
-+        self.assertEqual(0, mock_ensure_cib_version.call_count)
-+        self.assertFalse(env.cib_upgraded)
-+
-+    @mock.patch("pcs.lib.env.ensure_cib_version")
-+    @mock.patch("pcs.lib.env.get_cib_xml")
-+    def test_get_cib_upgrade_live(
-+        self, mock_get_cib_xml, mock_ensure_cib_version
-+    ):
-+        mock_get_cib_xml.return_value = '<cib/>'
-+        mock_ensure_cib_version.return_value = etree.XML('<new_cib/>')
-+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-+        assert_xml_equal(
-+            '<new_cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode()
-+        )
-+        self.assertEqual(1, mock_get_cib_xml.call_count)
-+        self.assertEqual(1, mock_ensure_cib_version.call_count)
-+        self.assertTrue(env.cib_upgraded)
-+
-+    @mock.patch("pcs.lib.env.ensure_cib_version")
-+    @mock.patch("pcs.lib.env.get_cib_xml")
-+    def test_get_cib_no_upgrade_live(
-+            self, mock_get_cib_xml, mock_ensure_cib_version
-+    ):
-+        mock_get_cib_xml.return_value = '<cib/>'
-+        mock_ensure_cib_version.return_value = None
-+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-+        assert_xml_equal(
-+            '<cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode()
-+        )
-+        self.assertEqual(1, mock_get_cib_xml.call_count)
-+        self.assertEqual(1, mock_ensure_cib_version.call_count)
-+        self.assertFalse(env.cib_upgraded)
-+
-+    @mock.patch("pcs.lib.env.ensure_cib_version")
-+    @mock.patch("pcs.lib.env.get_cib_xml")
-+    def test_get_cib_no_version_file(
-+            self, mock_get_cib_xml, mock_ensure_cib_version
-+    ):
-+        env = LibraryEnvironment(
-+            self.mock_logger, self.mock_reporter, cib_data='<cib/>'
-+        )
-+        assert_xml_equal('<cib/>', etree.tostring(env.get_cib()).decode())
-+        self.assertEqual(0, mock_get_cib_xml.call_count)
-+        self.assertEqual(0, mock_ensure_cib_version.call_count)
-+        self.assertFalse(env.cib_upgraded)
-+
-+    @mock.patch("pcs.lib.env.ensure_cib_version")
-+    @mock.patch("pcs.lib.env.get_cib_xml")
-+    def test_get_cib_upgrade_file(
-+            self, mock_get_cib_xml, mock_ensure_cib_version
-+    ):
-+        mock_ensure_cib_version.return_value = etree.XML('<new_cib/>')
-+        env = LibraryEnvironment(
-+            self.mock_logger, self.mock_reporter, cib_data='<cib/>'
-+        )
-+        assert_xml_equal(
-+            '<new_cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode()
-+        )
-+        self.assertEqual(0, mock_get_cib_xml.call_count)
-+        self.assertEqual(1, mock_ensure_cib_version.call_count)
-+        self.assertTrue(env.cib_upgraded)
-+
-+    @mock.patch("pcs.lib.env.ensure_cib_version")
-+    @mock.patch("pcs.lib.env.get_cib_xml")
-+    def test_get_cib_no_upgrade_file(
-+            self, mock_get_cib_xml, mock_ensure_cib_version
-+    ):
-+        mock_ensure_cib_version.return_value = None
-+        env = LibraryEnvironment(
-+            self.mock_logger, self.mock_reporter, cib_data='<cib/>'
-+        )
-+        assert_xml_equal(
-+            '<cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode()
-+        )
-+        self.assertEqual(0, mock_get_cib_xml.call_count)
-+        self.assertEqual(1, mock_ensure_cib_version.call_count)
-+        self.assertFalse(env.cib_upgraded)
-+
-+    @mock.patch("pcs.lib.env.replace_cib_configuration_xml")
-+    @mock.patch.object(
-+        LibraryEnvironment,
-+        "cmd_runner",
-+        lambda self: "mock cmd runner"
-+    )
-+    def test_push_cib_not_upgraded_live(self, mock_replace_cib):
-+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-+        env.push_cib(etree.XML('<cib/>'))
-+        mock_replace_cib.assert_called_once_with(
-+            "mock cmd runner", '<cib/>', False
-+        )
-+        self.assertEqual([], env.report_processor.report_item_list)
-+
-+    @mock.patch("pcs.lib.env.replace_cib_configuration_xml")
-+    @mock.patch.object(
-+        LibraryEnvironment,
-+        "cmd_runner",
-+        lambda self: "mock cmd runner"
-+    )
-+    def test_push_cib_upgraded_live(self, mock_replace_cib):
-+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-+        env._cib_upgraded = True
-+        env.push_cib(etree.XML('<cib/>'))
-+        mock_replace_cib.assert_called_once_with(
-+            "mock cmd runner", '<cib/>', True
-+        )
-+        assert_report_item_list_equal(
-+            env.report_processor.report_item_list,
-+            [(
-+                severity.INFO,
-+                report_codes.CIB_UPGRADE_SUCCESSFUL,
-+                {}
-+            )]
-+        )
-+
-     @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes")
-     @mock.patch("pcs.lib.env.reload_corosync_config")
-     @mock.patch("pcs.lib.env.distribute_corosync_conf")
-diff --git a/pcs/test/test_lib_pacemaker.py b/pcs/test/test_lib_pacemaker.py
-index 85d2034..0edee5c 100644
---- a/pcs/test/test_lib_pacemaker.py
-+++ b/pcs/test/test_lib_pacemaker.py
-@@ -206,12 +206,28 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest):
- 
-         mock_runner.run.assert_called_once_with(
-             [
--                self.path("cibadmin"), "--replace", "--scope", "configuration",
--                "--verbose", "--xml-pipe"
-+                self.path("cibadmin"), "--replace", "--verbose", "--xml-pipe",
-+                "--scope", "configuration"
-             ],
-             stdin_string=xml
-         )
- 
-+    def test_cib_upgraded(self):
-+        xml = "<xml/>"
-+        expected_output = "expected output"
-+        expected_retval = 0
-+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-+        mock_runner.run.return_value = (expected_output, expected_retval)
-+
-+        lib.replace_cib_configuration(
-+            mock_runner, XmlManipulation.from_str(xml).tree, True
-+        )
-+
-+        mock_runner.run.assert_called_once_with(
-+            [self.path("cibadmin"), "--replace", "--verbose", "--xml-pipe"],
-+            stdin_string=xml
-+        )
-+
-     def test_error(self):
-         xml = "<xml/>"
-         expected_error = "expected error"
-@@ -237,8 +253,8 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest):
- 
-         mock_runner.run.assert_called_once_with(
-             [
--                self.path("cibadmin"), "--replace", "--scope", "configuration",
--                "--verbose", "--xml-pipe"
-+                self.path("cibadmin"), "--replace", "--verbose", "--xml-pipe",
-+                "--scope", "configuration"
-             ],
-             stdin_string=xml
-         )
-diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
-index e8c0813..2fa5088 100644
---- a/pcs/test/test_resource.py
-+++ b/pcs/test/test_resource.py
-@@ -1541,6 +1541,9 @@ Ordering Constraints:
- Colocation Constraints:
- Ticket Constraints:
- 
-+Alerts:
-+ No alerts defined
-+
- Resources Defaults:
-  No defaults set
- Operations Defaults:
-@@ -1704,6 +1707,9 @@ Ordering Constraints:
- Colocation Constraints:
- Ticket Constraints:
- 
-+Alerts:
-+ No alerts defined
-+
- Resources Defaults:
-  No defaults set
- Operations Defaults:
-diff --git a/pcs/test/test_stonith.py b/pcs/test/test_stonith.py
-index 479c8e9..a6ee2f5 100644
---- a/pcs/test/test_stonith.py
-+++ b/pcs/test/test_stonith.py
-@@ -149,6 +149,9 @@ Ordering Constraints:
- Colocation Constraints:
- Ticket Constraints:
- 
-+Alerts:
-+ No alerts defined
-+
- Resources Defaults:
-  No defaults set
- Operations Defaults:
-diff --git a/pcs/test/tools/color_text_runner.py b/pcs/test/tools/color_text_runner.py
-index 305fe32..78a0787 100644
---- a/pcs/test/tools/color_text_runner.py
-+++ b/pcs/test/tools/color_text_runner.py
-@@ -64,6 +64,16 @@ class ColorTextTestResult(TextTestResult):
-             self.stream.write(apply(["lightred", "bold"], 'F'))
-             self.stream.flush()
- 
-+    def addSkip(self, test, reason):
-+        super(TextTestResult, self).addSkip(test, reason)
-+        if self.showAll:
-+            self.stream.writeln(
-+                apply(["blue", "bold"], "skipped {0!r}".format(reason))
-+            )
-+        elif self.dots:
-+            self.stream.write(apply(["blue", "bold"], 's'))
-+            self.stream.flush()
-+
-     def getDescription(self, test):
-         doc_first_line = test.shortDescription()
-         if self.descriptions and doc_first_line:
-diff --git a/pcs/usage.py b/pcs/usage.py
-index c4c417a..8ae6839 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -24,6 +24,7 @@ def full_usage():
-     out += strip_extras(status([],False))
-     out += strip_extras(config([],False))
-     out += strip_extras(pcsd([],False))
-+    out += strip_extras(alert([], False))
-     print(out.strip())
-     print("Examples:\n" + examples.replace(" \ ",""))
- 
-@@ -115,6 +116,7 @@ def generate_completion_tree_from_usage():
-     tree["config"] = generate_tree(config([],False))
-     tree["pcsd"] = generate_tree(pcsd([],False))
-     tree["node"] = generate_tree(node([], False))
-+    tree["alert"] = generate_tree(alert([], False))
-     return tree
- 
- def generate_tree(usage_txt):
-@@ -169,6 +171,7 @@ Commands:
-     config      View and manage cluster configuration.
-     pcsd        Manage pcs daemon.
-     node        Manage cluster nodes.
-+    alert       Set pacemaker alerts.
- """
- # Advanced usage to possibly add later
- #  --corosync_conf=<corosync file> Specify alternative corosync.conf file
-@@ -1347,9 +1350,49 @@ Commands:
-     else:
-         return output
- 
-+
-+def alert(args=[], pout=True):
-+    output = """
-+Usage: pcs alert <command>
-+Set pacemaker alerts.
-+
-+Commands:
-+    [config|show]
-+        Show all configured alerts.
-+
-+    create path=<path> [id=<alert-id>] [description=<description>]
-+            [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
-+        Create new alert with specified path. Id will be automatically
-+        generated if it is not specified.
-+
-+    update <alert-id> [path=<path>] [description=<description>]
-+            [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
-+        Update existing alert with specified id.
-+
-+    remove <alert-id>
-+        Remove alert with specified id.
-+
-+    recipient add <alert-id> <recipient-value> [description=<description>]
-+            [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
-+        Add new recipient to specified alert.
-+
-+    recipient update <alert-id> <recipient-value> [description=<description>]
-+            [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
-+        Update existing recipient identified by alert and it's value.
-+
-+    recipient remove <alert-id> <recipient-value>
-+        Remove specified recipient.
-+"""
-+    if pout:
-+        print(sub_usage(args, output))
-+    else:
-+        return output
-+
-+
- def show(main_usage_name, rest_usage_names):
-     usage_map = {
-         "acl": acl,
-+        "alert": alert,
-         "cluster": cluster,
-         "config": config,
-         "constraint": constraint,
-diff --git a/pcs/utils.py b/pcs/utils.py
-index 11bd4cf..f9cdb1c 100644
---- a/pcs/utils.py
-+++ b/pcs/utils.py
-@@ -1592,7 +1592,7 @@ def is_etree(var):
-     )
- 
- # Replace only configuration section of cib with dom passed
--def replace_cib_configuration(dom):
-+def replace_cib_configuration(dom, cib_upgraded=False):
-     if is_etree(dom):
-         #etree returns string in bytes: b'xml'
-         #python 3 removed .encode() from byte strings
-@@ -1603,7 +1603,12 @@ def replace_cib_configuration(dom):
-         new_dom = dom.toxml()
-     else:
-         new_dom = dom
--    output, retval = run(["cibadmin", "--replace", "-o", "configuration", "-V", "--xml-pipe"],False,new_dom)
-+    cmd = ["cibadmin", "--replace", "-V", "--xml-pipe"]
-+    if cib_upgraded:
-+        print("CIB has been upgraded to the latest schema version.")
-+    else:
-+        cmd += ["-o", "configuration"]
-+    output, retval = run(cmd, False, new_dom)
-     if retval != 0:
-         err("Unable to update cib\n"+output)
- 
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1315371-02-use-recipient-id-as-identifier-instead-of-its-value.patch b/SOURCES/bz1315371-02-use-recipient-id-as-identifier-instead-of-its-value.patch
deleted file mode 100644
index 2154f0e..0000000
--- a/SOURCES/bz1315371-02-use-recipient-id-as-identifier-instead-of-its-value.patch
+++ /dev/null
@@ -1,1638 +0,0 @@
-From 8eef21a7bbfdcba709515529a40fadc1f5386b70 Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Fri, 8 Jul 2016 16:43:16 +0200
-Subject: [PATCH 1/2] lib: use recipient id as identifier instead of its value
-
----
- pcs/common/report_codes.py          |   3 +-
- pcs/lib/cib/alert.py                | 129 +++++++----
- pcs/lib/cib/test/test_alert.py      | 449 +++++++++++++++++++++++++++++++-----
- pcs/lib/commands/alert.py           |  45 +++-
- pcs/lib/commands/test/test_alert.py | 111 ++++++---
- pcs/lib/reports.py                  |  29 ++-
- 6 files changed, 597 insertions(+), 169 deletions(-)
-
-diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
-index 2b39938..53f2ccb 100644
---- a/pcs/common/report_codes.py
-+++ b/pcs/common/report_codes.py
-@@ -7,6 +7,7 @@ from __future__ import (
- 
- # force cathegories
- FORCE_ACTIVE_RRP = "ACTIVE_RRP"
-+FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE = "FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE"
- FORCE_CONSTRAINT_DUPLICATE = "CONSTRAINT_DUPLICATE"
- FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE = "CONSTRAINT_MULTIINSTANCE_RESOURCE"
- FORCE_LOAD_THRESHOLD = "LOAD_THRESHOLD"
-@@ -22,7 +23,7 @@ AGENT_NOT_FOUND = "AGENT_NOT_FOUND"
- BAD_CLUSTER_STATE_FORMAT = 'BAD_CLUSTER_STATE_FORMAT'
- CIB_ALERT_NOT_FOUND = "CIB_ALERT_NOT_FOUND"
- CIB_ALERT_RECIPIENT_ALREADY_EXISTS = "CIB_ALERT_RECIPIENT_ALREADY_EXISTS"
--CIB_ALERT_RECIPIENT_NOT_FOUND = "CIB_ALERT_RECIPIENT_NOT_FOUND"
-+CIB_ALERT_RECIPIENT_VALUE_INVALID = "CIB_ALERT_RECIPIENT_VALUE_INVALID"
- CIB_CANNOT_FIND_MANDATORY_SECTION = "CIB_CANNOT_FIND_MANDATORY_SECTION"
- CIB_LOAD_ERROR_BAD_FORMAT = "CIB_LOAD_ERROR_BAD_FORMAT"
- CIB_LOAD_ERROR = "CIB_LOAD_ERROR"
-diff --git a/pcs/lib/cib/alert.py b/pcs/lib/cib/alert.py
-index 6b72996..b5fe88c 100644
---- a/pcs/lib/cib/alert.py
-+++ b/pcs/lib/cib/alert.py
-@@ -7,14 +7,16 @@ from __future__ import (
- 
- from lxml import etree
- 
-+from pcs.common import report_codes
- from pcs.lib import reports
--from pcs.lib.errors import LibraryError
-+from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities
- from pcs.lib.cib.nvpair import update_nvset, get_nvset
- from pcs.lib.cib.tools import (
-     check_new_id_applicable,
-     get_sub_element,
-     find_unique_id,
-     get_alerts,
-+    validate_id_does_not_exist,
- )
- 
- 
-@@ -61,7 +63,7 @@ def _update_optional_attribute(element, attribute, value):
- def get_alert_by_id(tree, alert_id):
-     """
-     Returns alert element with specified id.
--    Raises AlertNotFound if alert with specified id doesn't exist.
-+    Raises LibraryError if alert with specified id doesn't exist.
- 
-     tree -- cib etree node
-     alert_id -- id of alert
-@@ -72,25 +74,53 @@ def get_alert_by_id(tree, alert_id):
-     return alert
- 
- 
--def get_recipient(alert, recipient_value):
-+def get_recipient_by_id(tree, recipient_id):
-     """
-     Returns recipient element with value recipient_value which belong to
-     specified alert.
--    Raises RecipientNotFound if recipient doesn't exist.
-+    Raises LibraryError if recipient doesn't exist.
- 
--    alert -- parent element of required recipient
--    recipient_value -- value of recipient
-+    tree -- cib etree node
-+    recipient_id -- id of recipient
-     """
--    recipient = alert.find(
--        "./recipient[@value='{0}']".format(recipient_value)
-+    recipient = get_alerts(tree).find(
-+        "./alert/recipient[@id='{0}']".format(recipient_id)
-     )
-     if recipient is None:
--        raise LibraryError(reports.cib_alert_recipient_not_found(
--            alert.get("id"), recipient_value
--        ))
-+        raise LibraryError(reports.id_not_found(recipient_id, "Recipient"))
-     return recipient
- 
- 
-+def ensure_recipient_value_is_unique(
-+    reporter, alert, recipient_value, recipient_id="", allow_duplicity=False
-+):
-+    """
-+    Ensures that recipient_value is unique in alert.
-+
-+    reporter -- report processor
-+    alert -- alert
-+    recipient_value -- recipient value
-+    recipient_id -- recipient id of to which value belongs to
-+    allow_duplicity -- if True only warning will be shown if value already
-+        exists
-+    """
-+    recipient_list = alert.xpath(
-+        "./recipient[@value='{value}' and @id!='{id}']".format(
-+            value=recipient_value, id=recipient_id
-+        )
-+    )
-+    if recipient_list:
-+        reporter.process(reports.cib_alert_recipient_already_exists(
-+            alert.get("id", None),
-+            recipient_value,
-+            Severities.WARNING if allow_duplicity else Severities.ERROR,
-+            forceable=(
-+                None if allow_duplicity
-+                else report_codes.FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE
-+            )
-+        ))
-+
-+
- def create_alert(tree, alert_id, path, description=""):
-     """
-     Create new alert element. Returns newly created element.
-@@ -116,7 +146,7 @@ def create_alert(tree, alert_id, path, description=""):
- def update_alert(tree, alert_id, path, description=None):
-     """
-     Update existing alert. Return updated alert element.
--    Raises AlertNotFound if alert with specified id doesn't exist.
-+    Raises LibraryError if alert with specified id doesn't exist.
- 
-     tree -- cib etree node
-     alert_id -- id of alert to be updated
-@@ -134,7 +164,7 @@ def update_alert(tree, alert_id, path, description=None):
- def remove_alert(tree, alert_id):
-     """
-     Remove alert with specified id.
--    Raises AlertNotFound if alert with specified id doesn't exist.
-+    Raises LibraryError if alert with specified id doesn't exist.
- 
-     tree -- cib etree node
-     alert_id -- id of alert which should be removed
-@@ -144,36 +174,38 @@ def remove_alert(tree, alert_id):
- 
- 
- def add_recipient(
-+    reporter,
-     tree,
-     alert_id,
-     recipient_value,
--    description=""
-+    recipient_id=None,
-+    description="",
-+    allow_same_value=False
- ):
-     """
-     Add recipient to alert with specified id. Returns added recipient element.
--    Raises AlertNotFound if alert with specified id doesn't exist.
-+    Raises LibraryError if alert with specified recipient_id doesn't exist.
-     Raises LibraryError if recipient already exists.
- 
-+    reporter -- report processor
-     tree -- cib etree node
-     alert_id -- id of alert which should be parent of new recipient
-     recipient_value -- value of recipient
-+    recipient_id -- id of new recipient, if None it will be generated
-     description -- description of recipient
-+    allow_same_value -- if True unique recipient value is not required
-     """
--    alert = get_alert_by_id(tree, alert_id)
-+    if recipient_id is None:
-+        recipient_id = find_unique_id(tree, "{0}-recipient".format(alert_id))
-+    else:
-+        validate_id_does_not_exist(tree, recipient_id)
- 
--    recipient = alert.find(
--        "./recipient[@value='{0}']".format(recipient_value)
-+    alert = get_alert_by_id(tree, alert_id)
-+    ensure_recipient_value_is_unique(
-+        reporter, alert, recipient_value, allow_duplicity=allow_same_value
-     )
--    if recipient is not None:
--        raise LibraryError(reports.cib_alert_recipient_already_exists(
--            alert_id, recipient_value
--        ))
--
-     recipient = etree.SubElement(
--        alert,
--        "recipient",
--        id=find_unique_id(tree, "{0}-recipient".format(alert_id)),
--        value=recipient_value
-+        alert, "recipient", id=recipient_id, value=recipient_value
-     )
- 
-     if description:
-@@ -182,38 +214,49 @@ def add_recipient(
-     return recipient
- 
- 
--def update_recipient(tree, alert_id, recipient_value, description):
-+def update_recipient(
-+    reporter,
-+    tree,
-+    recipient_id,
-+    recipient_value=None,
-+    description=None,
-+    allow_same_value=False
-+):
-     """
-     Update specified recipient. Returns updated recipient element.
--    Raises AlertNotFound if alert with specified id doesn't exist.
--    Raises RecipientNotFound if recipient doesn't exist.
-+    Raises LibraryError if recipient doesn't exist.
- 
-+    reporter -- report processor
-     tree -- cib etree node
--    alert_id -- id of alert, parent element of recipient
--    recipient_value -- recipient value
-+    recipient_id -- id of recipient to be updated
-+    recipient_value -- recipient value, stay unchanged if None
-     description -- description, if empty it will be removed, stay unchanged
-         if None
-+    allow_same_value -- if True unique recipient value is not required
-     """
--    recipient = get_recipient(
--        get_alert_by_id(tree, alert_id), recipient_value
--    )
-+    recipient = get_recipient_by_id(tree, recipient_id)
-+    if recipient_value is not None:
-+        ensure_recipient_value_is_unique(
-+            reporter,
-+            recipient.getparent(),
-+            recipient_value,
-+            recipient_id=recipient_id,
-+            allow_duplicity=allow_same_value
-+        )
-+        recipient.set("value", recipient_value)
-     _update_optional_attribute(recipient, "description", description)
-     return recipient
- 
- 
--def remove_recipient(tree, alert_id, recipient_value):
-+def remove_recipient(tree, recipient_id):
-     """
-     Remove specified recipient.
--    Raises AlertNotFound if alert with specified id doesn't exist.
--    Raises RecipientNotFound if recipient doesn't exist.
-+    Raises LibraryError if recipient doesn't exist.
- 
-     tree -- cib etree node
--    alert_id -- id of alert, parent element of recipient
--    recipient_value -- recipient value
-+    recipient_id -- id of recipient to be removed
-     """
--    recipient = get_recipient(
--        get_alert_by_id(tree, alert_id), recipient_value
--    )
-+    recipient = get_recipient_by_id(tree, recipient_id)
-     recipient.getparent().remove(recipient)
- 
- 
-diff --git a/pcs/lib/cib/test/test_alert.py b/pcs/lib/cib/test/test_alert.py
-index c387aaf..50eaef6 100644
---- a/pcs/lib/cib/test/test_alert.py
-+++ b/pcs/lib/cib/test/test_alert.py
-@@ -15,8 +15,10 @@ from pcs.lib.errors import ReportItemSeverity as severities
- from pcs.test.tools.assertions import(
-     assert_raise_library_error,
-     assert_xml_equal,
-+    assert_report_item_list_equal,
- )
- from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
- 
- 
- @mock.patch("pcs.lib.cib.alert.update_nvset")
-@@ -129,54 +131,146 @@ class GetAlertByIdTest(TestCase):
-         )
- 
- 
--class GetRecipientTest(TestCase):
-+class GetRecipientByIdTest(TestCase):
-     def setUp(self):
-         self.xml = etree.XML(
-             """
--                <alert id="alert-1">
--                    <recipient id="rec-1" value="value1"/>
--                    <recipient id="rec-2" value="value2"/>
--                    <not_recipient value="value3"/>
--                    <recipients>
--                        <recipient id="rec-4" value="value4"/>
--                    </recipients>
--                </alert>
-+                <cib>
-+                    <configuration>
-+                        <alerts>
-+                            <alert id="alert-1">
-+                                <recipient id="rec-1" value="value1"/>
-+                                <not_recipient id="rec-3" value="value3"/>
-+                                <recipients>
-+                                    <recipient id="rec-4" value="value4"/>
-+                                </recipients>
-+                            </alert>
-+                            <recipient id="rec-2" value="value2"/>
-+                        </alerts>
-+                        <alert id="alert-2"/>
-+                    </configuration>
-+                </cib>
-             """
-         )
- 
-     def test_exist(self):
-         assert_xml_equal(
--            '<recipient id="rec-2" value="value2"/>',
--            etree.tostring(alert.get_recipient(self.xml, "value2")).decode()
-+            '<recipient id="rec-1" value="value1"/>',
-+            etree.tostring(
-+                alert.get_recipient_by_id(self.xml, "rec-1")
-+            ).decode()
-         )
- 
-     def test_different_place(self):
-         assert_raise_library_error(
--            lambda: alert.get_recipient(self.xml, "value4"),
-+            lambda: alert.get_recipient_by_id(self.xml, "rec-4"),
-             (
-                 severities.ERROR,
--                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
-+                report_codes.ID_NOT_FOUND,
-                 {
--                    "alert": "alert-1",
--                    "recipient": "value4"
-+                    "id": "rec-4",
-+                    "id_description": "Recipient"
-+                }
-+            )
-+        )
-+
-+    def test_not_in_alert(self):
-+        assert_raise_library_error(
-+            lambda: alert.get_recipient_by_id(self.xml, "rec-2"),
-+            (
-+                severities.ERROR,
-+                report_codes.ID_NOT_FOUND,
-+                {
-+                    "id": "rec-2",
-+                    "id_description": "Recipient"
-                 }
-             )
-         )
- 
-     def test_not_recipient(self):
-         assert_raise_library_error(
--            lambda: alert.get_recipient(self.xml, "value3"),
-+            lambda: alert.get_recipient_by_id(self.xml, "rec-3"),
-             (
-                 severities.ERROR,
--                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
-+                report_codes.ID_NOT_FOUND,
-                 {
--                    "alert": "alert-1",
--                    "recipient": "value3"
-+                    "id": "rec-3",
-+                    "id_description": "Recipient"
-                 }
-             )
-         )
- 
- 
-+class EnsureRecipientValueIsUniqueTest(TestCase):
-+    def setUp(self):
-+        self.mock_reporter = MockLibraryReportProcessor()
-+        self.alert = etree.Element("alert", id="alert-1")
-+        self.recipient = etree.SubElement(
-+            self.alert, "recipient", id="rec-1", value="value1"
-+        )
-+
-+    def test_is_unique_no_duplicity_allowed(self):
-+        alert.ensure_recipient_value_is_unique(
-+            self.mock_reporter, self.alert, "value2"
-+        )
-+        self.assertEqual(0, len(self.mock_reporter.report_item_list))
-+
-+    def test_same_recipient_no_duplicity_allowed(self):
-+        alert.ensure_recipient_value_is_unique(
-+            self.mock_reporter, self.alert, "value1", recipient_id="rec-1"
-+        )
-+        self.assertEqual(0, len(self.mock_reporter.report_item_list))
-+
-+    def test_same_recipient_duplicity_allowed(self):
-+        alert.ensure_recipient_value_is_unique(
-+            self.mock_reporter, self.alert, "value1", recipient_id="rec-1",
-+            allow_duplicity=True
-+        )
-+        self.assertEqual(0, len(self.mock_reporter.report_item_list))
-+
-+    def test_not_unique_no_duplicity_allowed(self):
-+        report_item = (
-+            severities.ERROR,
-+            report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
-+            {
-+                "alert": "alert-1",
-+                "recipient": "value1"
-+            },
-+            report_codes.FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE
-+        )
-+        assert_raise_library_error(
-+            lambda: alert.ensure_recipient_value_is_unique(
-+                self.mock_reporter, self.alert, "value1"
-+            ),
-+            report_item
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list, [report_item]
-+        )
-+
-+    def test_is_unique_duplicity_allowed(self):
-+        alert.ensure_recipient_value_is_unique(
-+            self.mock_reporter, self.alert, "value2", allow_duplicity=True
-+        )
-+        self.assertEqual(0, len(self.mock_reporter.report_item_list))
-+
-+    def test_not_unique_duplicity_allowed(self):
-+        alert.ensure_recipient_value_is_unique(
-+            self.mock_reporter, self.alert, "value1", allow_duplicity=True
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [(
-+                severities.WARNING,
-+                report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
-+                {
-+                    "alert": "alert-1",
-+                    "recipient": "value1"
-+                }
-+            )]
-+        )
-+
-+
- class CreateAlertTest(TestCase):
-     def setUp(self):
-         self.tree = etree.XML(
-@@ -462,6 +556,7 @@ class RemoveAlertTest(TestCase):
- 
- class AddRecipientTest(TestCase):
-     def setUp(self):
-+        self.mock_reporter = MockLibraryReportProcessor()
-         self.tree = etree.XML(
-             """
-             <cib>
-@@ -476,11 +571,40 @@ class AddRecipientTest(TestCase):
-             """
-         )
- 
--    def test_success(self):
-+    def test_with_id(self):
-+        assert_xml_equal(
-+            '<recipient id="my-recipient" value="value1"/>',
-+            etree.tostring(
-+                alert.add_recipient(
-+                    self.mock_reporter, self.tree, "alert", "value1",
-+                    "my-recipient"
-+                )
-+            ).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path">
-+                            <recipient id="alert-recipient" value="test_val"/>
-+                            <recipient id="my-recipient" value="value1"/>
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+    def test_without_id(self):
-         assert_xml_equal(
-             '<recipient id="alert-recipient-1" value="value1"/>',
-             etree.tostring(
--                alert.add_recipient(self.tree, "alert", "value1")
-+                alert.add_recipient(
-+                    self.mock_reporter, self.tree, "alert", "value1"
-+                )
-             ).decode()
-         )
-         assert_xml_equal(
-@@ -498,23 +622,85 @@ class AddRecipientTest(TestCase):
-             """,
-             etree.tostring(self.tree).decode()
-         )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
- 
--    def test_recipient_exist(self):
-+    def test_id_exists(self):
-         assert_raise_library_error(
--            lambda: alert.add_recipient(self.tree, "alert", "test_val"),
-+            lambda: alert.add_recipient(
-+                self.mock_reporter, self.tree, "alert", "value1",
-+                "alert-recipient"
-+            ),
-             (
-                 severities.ERROR,
-+                report_codes.ID_ALREADY_EXISTS,
-+                {"id": "alert-recipient"}
-+            )
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+    def test_duplicity_of_value_not_allowed(self):
-+        report_item = (
-+            severities.ERROR,
-+            report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
-+            {
-+                "alert": "alert",
-+                "recipient": "test_val"
-+            },
-+            report_codes.FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE
-+        )
-+        assert_raise_library_error(
-+            lambda: alert.add_recipient(
-+                self.mock_reporter, self.tree, "alert", "test_val"
-+            ),
-+            report_item
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [report_item]
-+        )
-+
-+    def test_duplicity_of_value_allowed(self):
-+        assert_xml_equal(
-+            '<recipient id="alert-recipient-1" value="test_val"/>',
-+            etree.tostring(
-+                alert.add_recipient(
-+                    self.mock_reporter, self.tree, "alert", "test_val",
-+                    allow_same_value=True
-+                )
-+            ).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path">
-+                            <recipient id="alert-recipient" value="test_val"/>
-+                            <recipient id="alert-recipient-1" value="test_val"/>
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [(
-+                severities.WARNING,
-                 report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
-                 {
--                    "recipient": "test_val",
--                    "alert": "alert"
-+                    "alert": "alert",
-+                    "recipient": "test_val"
-                 }
--            )
-+            )]
-         )
- 
-     def test_alert_not_exist(self):
-         assert_raise_library_error(
--            lambda: alert.add_recipient(self.tree, "alert1", "test_val"),
-+            lambda: alert.add_recipient(
-+                self.mock_reporter, self.tree, "alert1", "test_val"
-+            ),
-             (
-                 severities.ERROR,
-                 report_codes.CIB_ALERT_NOT_FOUND,
-@@ -532,7 +718,8 @@ class AddRecipientTest(TestCase):
-             />
-             """,
-             etree.tostring(alert.add_recipient(
--                self.tree, "alert", "value1", "desc"
-+                self.mock_reporter, self.tree, "alert", "value1",
-+                description="desc"
-             )).decode()
-         )
-         assert_xml_equal(
-@@ -554,10 +741,12 @@ class AddRecipientTest(TestCase):
-             """,
-             etree.tostring(self.tree).decode()
-         )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
- 
- 
- class UpdateRecipientTest(TestCase):
-     def setUp(self):
-+        self.mock_reporter = MockLibraryReportProcessor()
-         self.tree = etree.XML(
-             """
-             <cib>
-@@ -577,6 +766,157 @@ class UpdateRecipientTest(TestCase):
-             """
-         )
- 
-+    def test_update_value(self):
-+        assert_xml_equal(
-+            """
-+            <recipient id="alert-recipient" value="new_val"/>
-+            """,
-+            etree.tostring(alert.update_recipient(
-+                self.mock_reporter, self.tree, "alert-recipient",
-+                recipient_value="new_val"
-+            )).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path">
-+                            <recipient id="alert-recipient" value="new_val"/>
-+                            <recipient
-+                                id="alert-recipient-1"
-+                                value="value1"
-+                                description="desc"
-+                            />
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+    def test_update_same_value_no_duplicity_allowed(self):
-+        assert_xml_equal(
-+            '<recipient id="alert-recipient" value="test_val"/>',
-+            etree.tostring(alert.update_recipient(
-+                self.mock_reporter, self.tree, "alert-recipient",
-+                recipient_value="test_val"
-+            )).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path">
-+                            <recipient id="alert-recipient" value="test_val"/>
-+                            <recipient
-+                                id="alert-recipient-1"
-+                                value="value1"
-+                                description="desc"
-+                            />
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+    def test_update_same_value_duplicity_allowed(self):
-+        assert_xml_equal(
-+            '<recipient id="alert-recipient" value="test_val"/>',
-+            etree.tostring(alert.update_recipient(
-+                self.mock_reporter, self.tree, "alert-recipient",
-+                recipient_value="test_val", allow_same_value=True
-+            )).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path">
-+                            <recipient id="alert-recipient" value="test_val"/>
-+                            <recipient
-+                                id="alert-recipient-1"
-+                                value="value1"
-+                                description="desc"
-+                            />
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
-+
-+    def test_duplicity_of_value_not_allowed(self):
-+        report_item = (
-+            severities.ERROR,
-+            report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
-+            {
-+                "alert": "alert",
-+                "recipient": "value1"
-+            },
-+            report_codes.FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE
-+        )
-+        assert_raise_library_error(
-+            lambda: alert.update_recipient(
-+                self.mock_reporter, self.tree, "alert-recipient", "value1"
-+            ),
-+            report_item
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [report_item]
-+        )
-+
-+    def test_duplicity_of_value_allowed(self):
-+        assert_xml_equal(
-+            """
-+            <recipient id="alert-recipient" value="value1"/>
-+            """,
-+            etree.tostring(alert.update_recipient(
-+                self.mock_reporter, self.tree, "alert-recipient",
-+                recipient_value="value1", allow_same_value=True
-+            )).decode()
-+        )
-+        assert_xml_equal(
-+            """
-+            <cib>
-+                <configuration>
-+                    <alerts>
-+                        <alert id="alert" path="/path">
-+                            <recipient id="alert-recipient" value="value1"/>
-+                            <recipient
-+                                id="alert-recipient-1"
-+                                value="value1"
-+                                description="desc"
-+                            />
-+                        </alert>
-+                    </alerts>
-+                </configuration>
-+            </cib>
-+            """,
-+            etree.tostring(self.tree).decode()
-+        )
-+        assert_report_item_list_equal(
-+            self.mock_reporter.report_item_list,
-+            [(
-+                severities.WARNING,
-+                report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
-+                {
-+                    "alert": "alert",
-+                    "recipient": "value1"
-+                }
-+            )]
-+        )
-+
-     def test_add_description(self):
-         assert_xml_equal(
-             """
-@@ -585,7 +925,8 @@ class UpdateRecipientTest(TestCase):
-             />
-             """,
-             etree.tostring(alert.update_recipient(
--                self.tree, "alert", "test_val", "description"
-+                self.mock_reporter, self.tree, "alert-recipient",
-+                description="description"
-             )).decode()
-         )
-         assert_xml_equal(
-@@ -611,6 +952,7 @@ class UpdateRecipientTest(TestCase):
-             """,
-             etree.tostring(self.tree).decode()
-         )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
- 
-     def test_update_description(self):
-         assert_xml_equal(
-@@ -620,7 +962,8 @@ class UpdateRecipientTest(TestCase):
-             />
-             """,
-             etree.tostring(alert.update_recipient(
--                self.tree, "alert", "value1", "description"
-+                self.mock_reporter, self.tree, "alert-recipient-1",
-+                description="description"
-             )).decode()
-         )
-         assert_xml_equal(
-@@ -642,6 +985,7 @@ class UpdateRecipientTest(TestCase):
-             """,
-             etree.tostring(self.tree).decode()
-         )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
- 
-     def test_remove_description(self):
-         assert_xml_equal(
-@@ -649,7 +993,10 @@ class UpdateRecipientTest(TestCase):
-                 <recipient id="alert-recipient-1" value="value1"/>
-             """,
-             etree.tostring(
--               alert.update_recipient(self.tree, "alert", "value1", "")
-+               alert.update_recipient(
-+                   self.mock_reporter, self.tree, "alert-recipient-1",
-+                   description=""
-+               )
-             ).decode()
-         )
-         assert_xml_equal(
-@@ -667,26 +1014,18 @@ class UpdateRecipientTest(TestCase):
-             """,
-             etree.tostring(self.tree).decode()
-         )
--
--    def test_alert_not_exists(self):
--        assert_raise_library_error(
--            lambda: alert.update_recipient(self.tree, "alert1", "test_val", ""),
--            (
--                severities.ERROR,
--                report_codes.CIB_ALERT_NOT_FOUND,
--                {"alert": "alert1"}
--            )
--        )
-+        self.assertEqual([], self.mock_reporter.report_item_list)
- 
-     def test_recipient_not_exists(self):
-         assert_raise_library_error(
--            lambda: alert.update_recipient(self.tree, "alert", "unknown", ""),
-+            lambda: alert.update_recipient(
-+                self.mock_reporter, self.tree, "recipient"),
-             (
-                 severities.ERROR,
--                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
-+                report_codes.ID_NOT_FOUND,
-                 {
--                    "alert": "alert",
--                    "recipient": "unknown"
-+                    "id": "recipient",
-+                    "id_description": "Recipient"
-                 }
-             )
-         )
-@@ -710,7 +1049,7 @@ class RemoveRecipientTest(TestCase):
-         )
- 
-     def test_success(self):
--        alert.remove_recipient(self.tree, "alert", "val")
-+        alert.remove_recipient(self.tree, "alert-recipient-2")
-         assert_xml_equal(
-             """
-             <cib>
-@@ -726,25 +1065,15 @@ class RemoveRecipientTest(TestCase):
-             etree.tostring(self.tree).decode()
-         )
- 
--    def test_alert_not_exists(self):
--        assert_raise_library_error(
--            lambda: alert.remove_recipient(self.tree, "alert1", "test_val"),
--            (
--                severities.ERROR,
--                report_codes.CIB_ALERT_NOT_FOUND,
--                {"alert": "alert1"}
--            )
--        )
--
-     def test_recipient_not_exists(self):
-         assert_raise_library_error(
--            lambda: alert.remove_recipient(self.tree, "alert", "unknown"),
-+            lambda: alert.remove_recipient(self.tree, "recipient"),
-             (
-                 severities.ERROR,
--                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
-+                report_codes.ID_NOT_FOUND,
-                 {
--                    "alert": "alert",
--                    "recipient": "unknown"
-+                    "id": "recipient",
-+                    "id_description": "Recipient"
-                 }
-             )
-         )
-diff --git a/pcs/lib/commands/alert.py b/pcs/lib/commands/alert.py
-index 7371fbc..432d9d5 100644
---- a/pcs/lib/commands/alert.py
-+++ b/pcs/lib/commands/alert.py
-@@ -90,7 +90,9 @@ def add_recipient(
-     recipient_value,
-     instance_attribute_dict,
-     meta_attribute_dict,
--    description=None
-+    recipient_id=None,
-+    description=None,
-+    allow_same_value=False
- ):
-     """
-     Add new recipient to alert witch id alert_id.
-@@ -100,7 +102,9 @@ def add_recipient(
-     recipient_value -- value of new recipient
-     instance_attribute_dict -- dictionary of instance attributes to update
-     meta_attribute_dict -- dictionary of meta attributes to update
-+    recipient_id -- id of new recipient, if None it will be generated
-     description -- recipient description
-+    allow_same_value -- if True unique recipient value is not required
-     """
-     if not recipient_value:
-         raise LibraryError(
-@@ -109,7 +113,13 @@ def add_recipient(
- 
-     cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
-     recipient = alert.add_recipient(
--        cib, alert_id, recipient_value, description
-+        lib_env.report_processor,
-+        cib,
-+        alert_id,
-+        recipient_value,
-+        recipient_id=recipient_id,
-+        description=description,
-+        allow_same_value=allow_same_value
-     )
-     alert.update_instance_attributes(cib, recipient, instance_attribute_dict)
-     alert.update_meta_attributes(cib, recipient, meta_attribute_dict)
-@@ -119,26 +129,38 @@ def add_recipient(
- 
- def update_recipient(
-     lib_env,
--    alert_id,
--    recipient_value,
-+    recipient_id,
-     instance_attribute_dict,
-     meta_attribute_dict,
--    description=None
-+    recipient_value=None,
-+    description=None,
-+    allow_same_value=False
- ):
-     """
-     Update existing recipient.
- 
-     lib_env -- LibraryEnvironment
--    alert_id -- id of alert to which recipient belong
--    recipient_value -- recipient to be updated
-+    recipient_id -- id of recipient to be updated
-     instance_attribute_dict -- dictionary of instance attributes to update
-     meta_attribute_dict -- dictionary of meta attributes to update
-+    recipient_value -- new recipient value, if None old value will stay
-+        unchanged
-     description -- new description, if empty string, old description will be
-         deleted, if None old value will stay unchanged
-+    allow_same_value -- if True unique recipient value is not required
-     """
-+    if not recipient_value and recipient_value is not None:
-+        raise LibraryError(
-+            reports.cib_alert_recipient_invalid_value(recipient_value)
-+        )
-     cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
-     recipient = alert.update_recipient(
--        cib, alert_id, recipient_value, description
-+        lib_env.report_processor,
-+        cib,
-+        recipient_id,
-+        recipient_value=recipient_value,
-+        description=description,
-+        allow_same_value=allow_same_value
-     )
-     alert.update_instance_attributes(cib, recipient, instance_attribute_dict)
-     alert.update_meta_attributes(cib, recipient, meta_attribute_dict)
-@@ -146,16 +168,15 @@ def update_recipient(
-     lib_env.push_cib(cib)
- 
- 
--def remove_recipient(lib_env, alert_id, recipient_value):
-+def remove_recipient(lib_env, recipient_id):
-     """
-     Remove existing recipient.
- 
-     lib_env -- LibraryEnvironment
--    alert_id -- id of alert to which recipient belong
--    recipient_value -- recipient to be removed
-+    recipient_id -- if of recipient to be removed
-     """
-     cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
--    alert.remove_recipient(cib, alert_id, recipient_value)
-+    alert.remove_recipient(cib, recipient_id)
-     lib_env.push_cib(cib)
- 
- 
-diff --git a/pcs/lib/commands/test/test_alert.py b/pcs/lib/commands/test/test_alert.py
-index 34813df..bced45e 100644
---- a/pcs/lib/commands/test/test_alert.py
-+++ b/pcs/lib/commands/test/test_alert.py
-@@ -361,19 +361,17 @@ class AddRecipientTest(TestCase):
-     def test_recipient_already_exists(self):
-         assert_raise_library_error(
-             lambda: cmd_alert.add_recipient(
--                self.mock_env, "alert", "value1", {}, {}
-+                self.mock_env, "alert", "value1", {}, {},
-+                recipient_id="alert-recipient"
-             ),
-             (
-                 Severities.ERROR,
--                report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
--                {
--                    "recipient": "value1",
--                    "alert": "alert"
--                }
-+                report_codes.ID_ALREADY_EXISTS,
-+                {"id": "alert-recipient"}
-             )
-         )
- 
--    def test_success(self):
-+    def test_without_id(self):
-         cmd_alert.add_recipient(
-             self.mock_env,
-             "alert",
-@@ -424,6 +422,58 @@ class AddRecipientTest(TestCase):
-             self.mock_env._get_cib_xml()
-         )
- 
-+    def test_with_id(self):
-+        cmd_alert.add_recipient(
-+            self.mock_env,
-+            "alert",
-+            "value",
-+            {"attr1": "val1"},
-+            {
-+                "attr2": "val2",
-+                "attr1": "val1"
-+            },
-+            recipient_id="my-recipient"
-+        )
-+        assert_xml_equal(
-+            """
-+<cib validate-with="pacemaker-2.5">
-+    <configuration>
-+        <alerts>
-+            <alert id="alert" path="path">
-+                <recipient id="alert-recipient" value="value1"/>
-+                <recipient id="my-recipient" value="value">
-+                    <meta_attributes
-+                        id="my-recipient-meta_attributes"
-+                    >
-+                        <nvpair
-+                            id="my-recipient-meta_attributes-attr1"
-+                            name="attr1"
-+                            value="val1"
-+                        />
-+                        <nvpair
-+                            id="my-recipient-meta_attributes-attr2"
-+                            name="attr2"
-+                            value="val2"
-+                        />
-+                    </meta_attributes>
-+                    <instance_attributes
-+                        id="my-recipient-instance_attributes"
-+                    >
-+                        <nvpair
-+                            id="my-recipient-instance_attributes-attr1"
-+                            name="attr1"
-+                            value="val1"
-+                        />
-+                    </instance_attributes>
-+                </recipient>
-+            </alert>
-+        </alerts>
-+    </configuration>
-+</cib>
-+            """,
-+            self.mock_env._get_cib_xml()
-+        )
-+
- 
- class UpdateRecipientTest(TestCase):
-     def setUp(self):
-@@ -470,29 +520,29 @@ class UpdateRecipientTest(TestCase):
-             self.mock_log, self.mock_rep, cib_data=cib
-         )
- 
--    def test_alert_not_found(self):
-+    def test_empty_value(self):
-         assert_raise_library_error(
-             lambda: cmd_alert.update_recipient(
--                self.mock_env, "unknown", "recipient", {}, {}
-+                self.mock_env, "alert-recipient-1", {}, {}, recipient_value=""
-             ),
-             (
-                 Severities.ERROR,
--                report_codes.CIB_ALERT_NOT_FOUND,
--                {"alert": "unknown"}
-+                report_codes.CIB_ALERT_RECIPIENT_VALUE_INVALID,
-+                {"recipient": ""}
-             )
-         )
- 
-     def test_recipient_not_found(self):
-         assert_raise_library_error(
-             lambda: cmd_alert.update_recipient(
--                self.mock_env, "alert", "recipient", {}, {}
-+                self.mock_env, "recipient", {}, {}
-             ),
-             (
-                 Severities.ERROR,
--                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
-+                report_codes.ID_NOT_FOUND,
-                 {
--                    "recipient": "recipient",
--                    "alert": "alert"
-+                    "id": "recipient",
-+                    "id_description": "Recipient"
-                 }
-             )
-         )
-@@ -500,14 +550,14 @@ class UpdateRecipientTest(TestCase):
-     def test_update_all(self):
-         cmd_alert.update_recipient(
-             self.mock_env,
--            "alert",
--            "value",
-+            "alert-recipient-1",
-             {"attr1": "value"},
-             {
-                 "attr1": "",
-                 "attr3": "new_val"
-             },
--            "desc"
-+            recipient_value="new_val",
-+            description="desc"
-         )
-         assert_xml_equal(
-             """
-@@ -518,7 +568,7 @@ class UpdateRecipientTest(TestCase):
-                 <recipient id="alert-recipient" value="value1"/>
-                 <recipient
-                     id="alert-recipient-1"
--                    value="value"
-+                    value="new_val"
-                     description="desc"
-                 >
-                     <meta_attributes
-@@ -575,35 +625,20 @@ class RemoveRecipientTest(TestCase):
-             self.mock_log, self.mock_rep, cib_data=cib
-         )
- 
--    def test_alert_not_found(self):
--        assert_raise_library_error(
--            lambda: cmd_alert.remove_recipient(
--                self.mock_env, "unknown", "recipient"
--            ),
--            (
--                Severities.ERROR,
--                report_codes.CIB_ALERT_NOT_FOUND,
--                {"alert": "unknown"}
--            )
--        )
--
-     def test_recipient_not_found(self):
-         assert_raise_library_error(
-             lambda: cmd_alert.remove_recipient(
--                self.mock_env, "alert", "recipient"
-+                self.mock_env, "recipient"
-             ),
-             (
-                 Severities.ERROR,
--                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
--                {
--                    "recipient": "recipient",
--                    "alert": "alert"
--                }
-+                report_codes.ID_NOT_FOUND,
-+                {"id": "recipient"}
-             )
-         )
- 
-     def test_success(self):
--        cmd_alert.remove_recipient(self.mock_env, "alert", "value1")
-+        cmd_alert.remove_recipient(self.mock_env, "alert-recipient")
-         assert_xml_equal(
-             """
-             <cib validate-with="pacemaker-2.5">
-diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
-index 9ececf9..fc2670b 100644
---- a/pcs/lib/reports.py
-+++ b/pcs/lib/reports.py
-@@ -1654,40 +1654,39 @@ def cluster_restart_required_to_apply_changes():
-     )
- 
- 
--def cib_alert_recipient_already_exists(alert_id, recipient_value):
-+def cib_alert_recipient_already_exists(
-+    alert_id, recipient_value, severity=ReportItemSeverity.ERROR, forceable=None
-+):
-     """
--    Error that recipient already exists.
-+    Recipient with specified value already exists in alert with id 'alert_id'
- 
-     alert_id -- id of alert to which recipient belongs
-     recipient_value -- value of recipient
-     """
--    return ReportItem.error(
-+    return ReportItem(
-         report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
--        "Recipient '{recipient}' in alert '{alert}' already exists.",
-+        severity,
-+        "Recipient '{recipient}' in alert '{alert}' already exists",
-         info={
-             "recipient": recipient_value,
-             "alert": alert_id
--        }
-+        },
-+        forceable=forceable
-     )
- 
- 
--def cib_alert_recipient_not_found(alert_id, recipient_value):
-+def cib_alert_recipient_invalid_value(recipient_value):
-     """
--    Specified recipient not found.
-+    Invalid recipient value.
- 
--    alert_id -- id of alert to which recipient should belong
-     recipient_value -- recipient value
-     """
-     return ReportItem.error(
--        report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
--        "Recipient '{recipient}' not found in alert '{alert}'.",
--        info={
--            "recipient": recipient_value,
--            "alert": alert_id
--        }
-+        report_codes.CIB_ALERT_RECIPIENT_VALUE_INVALID,
-+        "Recipient value '{recipient}' is not valid.",
-+        info={"recipient": recipient_value}
-     )
- 
--
- def cib_alert_not_found(alert_id):
-     """
-     Alert with specified id doesn't exist.
--- 
-1.8.3.1
-
-
-From b8155a2bfa79bb71429953eb756e393c18926e4c Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Sat, 9 Jul 2016 13:54:00 +0200
-Subject: [PATCH 2/2] cli: use recipient id as identifier instead of its value
-
----
- pcs/alert.py           |  32 +++++-----
- pcs/pcs.8              |   8 +--
- pcs/test/test_alert.py | 165 ++++++++++++++++++++++++++++++++++++++++---------
- pcs/usage.py           |  14 +++--
- 4 files changed, 164 insertions(+), 55 deletions(-)
-
-diff --git a/pcs/alert.py b/pcs/alert.py
-index d3a6e28..4786f57 100644
---- a/pcs/alert.py
-+++ b/pcs/alert.py
-@@ -139,42 +139,44 @@ def recipient_add(lib, argv, modifiers):
- 
-     sections = parse_cmd_sections(argv[2:], ["options", "meta"])
-     main_args = prepare_options(sections["main"])
--    ensure_only_allowed_options(main_args, ["description"])
-+    ensure_only_allowed_options(main_args, ["description", "id"])
- 
-     lib.alert.add_recipient(
-         alert_id,
-         recipient_value,
-         prepare_options(sections["options"]),
-         prepare_options(sections["meta"]),
--        main_args.get("description", None)
-+        recipient_id=main_args.get("id", None),
-+        description=main_args.get("description", None),
-+        allow_same_value=modifiers["force"]
-     )
- 
- 
- def recipient_update(lib, argv, modifiers):
--    if len(argv) < 2:
-+    if len(argv) < 1:
-         raise CmdLineInputError()
- 
--    alert_id = argv[0]
--    recipient_value = argv[1]
-+    recipient_id = argv[0]
- 
--    sections = parse_cmd_sections(argv[2:], ["options", "meta"])
-+    sections = parse_cmd_sections(argv[1:], ["options", "meta"])
-     main_args = prepare_options(sections["main"])
--    ensure_only_allowed_options(main_args, ["description"])
-+    ensure_only_allowed_options(main_args, ["description", "value"])
- 
-     lib.alert.update_recipient(
--        alert_id,
--        recipient_value,
-+        recipient_id,
-         prepare_options(sections["options"]),
-         prepare_options(sections["meta"]),
--        main_args.get("description", None)
-+        recipient_value=main_args.get("value", None),
-+        description=main_args.get("description", None),
-+        allow_same_value=modifiers["force"]
-     )
- 
- 
- def recipient_remove(lib, argv, modifiers):
--    if len(argv) != 2:
-+    if len(argv) != 1:
-         raise CmdLineInputError()
- 
--    lib.alert.remove_recipient(argv[0], argv[1])
-+    lib.alert.remove_recipient(argv[0])
- 
- 
- def _nvset_to_str(nvset_obj):
-@@ -219,9 +221,9 @@ def _alert_to_str(alert):
- 
- 
- def _recipient_to_str(recipient):
--    return ["Recipient: {value}".format(value=recipient["value"])] + indent(
--        __description_attributes_to_str(recipient), 1
--    )
-+    return ["Recipient: {id} (value={value})".format(
-+        value=recipient["value"], id=recipient["id"]
-+    )] + indent(__description_attributes_to_str(recipient), 1)
- 
- 
- def print_alert_config(lib, argv, modifiers):
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 4426444..223ef1b 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -666,13 +666,13 @@ Update existing alert with specified id.
- remove <alert\-id>
- Remove alert with specified id.
- .TP
--recipient add <alert\-id> <recipient\-value> [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
-+recipient add <alert\-id> <recipient\-value> [id=<recipient\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
- Add new recipient to specified alert.
- .TP
--recipient update <alert\-id> <recipient\-value> [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
--Update existing recipient identified by alert and it's value.
-+recipient update <recipient\-id> [value=<recipient\-value>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
-+Update existing recipient identified by it's id.
- .TP
--recipient remove <alert\-id> <recipient\-value>
-+recipient remove <recipient\-id>
- Remove specified recipient.
- .SH EXAMPLES
- .TP
-diff --git a/pcs/test/test_alert.py b/pcs/test/test_alert.py
-index 905dc9f..bb61600 100644
---- a/pcs/test/test_alert.py
-+++ b/pcs/test/test_alert.py
-@@ -246,12 +246,12 @@ Alerts:
- Alerts:
-  Alert: alert (path=test)
-   Recipients:
--   Recipient: rec_value
-+   Recipient: alert-recipient (value=rec_value)
- """
-         )
-         self.assert_pcs_success(
--            "alert recipient add alert rec_value2 description=description "
--            "options o1=1 o2=2 meta m1=v1 m2=v2"
-+            "alert recipient add alert rec_value2 id=my-recipient "
-+            "description=description options o1=1 o2=2 meta m1=v1 m2=v2"
-         )
-         self.assert_pcs_success(
-             "alert config",
-@@ -259,26 +259,56 @@ Alerts:
- Alerts:
-  Alert: alert (path=test)
-   Recipients:
--   Recipient: rec_value
--   Recipient: rec_value2
-+   Recipient: alert-recipient (value=rec_value)
-+   Recipient: my-recipient (value=rec_value2)
-     Description: description
-     Options: o1=1 o2=2
-     Meta options: m1=v1 m2=v2
- """
-         )
- 
--    def test_no_alert(self):
-+    def test_already_exists(self):
-+        self.assert_pcs_success("alert create path=test")
-+        self.assert_pcs_success("alert recipient add alert rec_value id=rec")
-         self.assert_pcs_fail(
--            "alert recipient add alert rec_value",
--            "Error: Alert 'alert' not found.\n"
-+            "alert recipient add alert value id=rec",
-+            "Error: 'rec' already exists\n"
-+        )
-+        self.assert_pcs_fail(
-+            "alert recipient add alert value id=alert",
-+            "Error: 'alert' already exists\n"
-         )
- 
--    def test_already_exists(self):
-+    def test_same_value(self):
-         self.assert_pcs_success("alert create path=test")
--        self.assert_pcs_success("alert recipient add alert rec_value")
-+        self.assert_pcs_success("alert recipient add alert rec_value id=rec")
-         self.assert_pcs_fail(
-             "alert recipient add alert rec_value",
--            "Error: Recipient 'rec_value' in alert 'alert' already exists.\n"
-+            "Error: Recipient 'rec_value' in alert 'alert' already exists, "
-+            "use --force to override\n"
-+        )
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert (path=test)
-+  Recipients:
-+   Recipient: rec (value=rec_value)
-+"""
-+        )
-+        self.assert_pcs_success(
-+            "alert recipient add alert rec_value --force",
-+            "Warning: Recipient 'rec_value' in alert 'alert' already exists\n"
-+        )
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert (path=test)
-+  Recipients:
-+   Recipient: rec (value=rec_value)
-+   Recipient: alert-recipient (value=rec_value)
-+"""
-         )
- 
- 
-@@ -296,14 +326,14 @@ class UpdateRecipientAlert(PcsAlertTest):
- Alerts:
-  Alert: alert (path=test)
-   Recipients:
--   Recipient: rec_value
-+   Recipient: alert-recipient (value=rec_value)
-     Description: description
-     Options: o1=1 o2=2
-     Meta options: m1=v1 m2=v2
- """
-         )
-         self.assert_pcs_success(
--            "alert recipient update alert rec_value description=desc "
-+            "alert recipient update alert-recipient value=new description=desc "
-             "options o1= o2=v2 o3=3 meta m1= m2=2 m3=3"
-         )
-         self.assert_pcs_success(
-@@ -312,24 +342,99 @@ Alerts:
- Alerts:
-  Alert: alert (path=test)
-   Recipients:
--   Recipient: rec_value
-+   Recipient: alert-recipient (value=new)
-+    Description: desc
-+    Options: o2=v2 o3=3
-+    Meta options: m2=2 m3=3
-+"""
-+        )
-+        self.assert_pcs_success(
-+            "alert recipient update alert-recipient value=new"
-+        )
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert (path=test)
-+  Recipients:
-+   Recipient: alert-recipient (value=new)
-     Description: desc
-     Options: o2=v2 o3=3
-     Meta options: m2=2 m3=3
- """
-         )
- 
--    def test_no_alert(self):
-+    def test_value_exists(self):
-+        self.assert_pcs_success("alert create path=test")
-+        self.assert_pcs_success("alert recipient add alert rec_value")
-+        self.assert_pcs_success("alert recipient add alert value")
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert (path=test)
-+  Recipients:
-+   Recipient: alert-recipient (value=rec_value)
-+   Recipient: alert-recipient-1 (value=value)
-+"""
-+        )
-         self.assert_pcs_fail(
--            "alert recipient update alert rec_value description=desc",
--            "Error: Alert 'alert' not found.\n"
-+            "alert recipient update alert-recipient value=value",
-+            "Error: Recipient 'value' in alert 'alert' already exists, "
-+            "use --force to override\n"
-+        )
-+        self.assert_pcs_success(
-+            "alert recipient update alert-recipient value=value --force",
-+            "Warning: Recipient 'value' in alert 'alert' already exists\n"
-+        )
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert (path=test)
-+  Recipients:
-+   Recipient: alert-recipient (value=value)
-+   Recipient: alert-recipient-1 (value=value)
-+"""
-+        )
-+
-+    def test_value_same_as_previous(self):
-+        self.assert_pcs_success("alert create path=test")
-+        self.assert_pcs_success("alert recipient add alert rec_value")
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert (path=test)
-+  Recipients:
-+   Recipient: alert-recipient (value=rec_value)
-+"""
-+        )
-+        self.assert_pcs_success(
-+            "alert recipient update alert-recipient value=rec_value"
-+        )
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert (path=test)
-+  Recipients:
-+   Recipient: alert-recipient (value=rec_value)
-+"""
-         )
- 
-     def test_no_recipient(self):
-+        self.assert_pcs_fail(
-+            "alert recipient update rec description=desc",
-+            "Error: Recipient 'rec' does not exist\n"
-+        )
-+
-+    def test_empty_value(self):
-         self.assert_pcs_success("alert create path=test")
-+        self.assert_pcs_success("alert recipient add alert rec_value id=rec")
-         self.assert_pcs_fail(
--            "alert recipient update alert rec_value description=desc",
--            "Error: Recipient 'rec_value' not found in alert 'alert'.\n"
-+            "alert recipient update rec value=",
-+            "Error: Recipient value '' is not valid.\n"
-         )
- 
- 
-@@ -337,27 +442,27 @@ Alerts:
- class RemoveRecipientTest(PcsAlertTest):
-     def test_success(self):
-         self.assert_pcs_success("alert create path=test")
--        self.assert_pcs_success("alert recipient add alert rec_value")
-+        self.assert_pcs_success("alert recipient add alert rec_value id=rec")
-         self.assert_pcs_success(
-             "alert config",
-             """\
- Alerts:
-  Alert: alert (path=test)
-   Recipients:
--   Recipient: rec_value
-+   Recipient: rec (value=rec_value)
- """
-         )
--        self.assert_pcs_success("alert recipient remove alert rec_value")
--
--    def test_no_alert(self):
--        self.assert_pcs_fail(
--            "alert recipient remove alert rec_value",
--            "Error: Alert 'alert' not found.\n"
-+        self.assert_pcs_success("alert recipient remove rec")
-+        self.assert_pcs_success(
-+            "alert config",
-+            """\
-+Alerts:
-+ Alert: alert (path=test)
-+"""
-         )
- 
-     def test_no_recipient(self):
--        self.assert_pcs_success("alert create path=test")
-         self.assert_pcs_fail(
--            "alert recipient remove alert rec_value",
--            "Error: Recipient 'rec_value' not found in alert 'alert'.\n"
-+            "alert recipient remove rec",
-+            "Error: Recipient 'rec' does not exist\n"
-         )
-diff --git a/pcs/usage.py b/pcs/usage.py
-index ee53a2f..77b496e 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -1402,15 +1402,17 @@ Commands:
-     remove <alert-id>
-         Remove alert with specified id.
- 
--    recipient add <alert-id> <recipient-value> [description=<description>]
--            [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
-+    recipient add <alert-id> <recipient-value> [id=<recipient-id>]
-+            [description=<description>] [options [<option>=<value>]...]
-+            [meta [<meta-option>=<value>]...]
-         Add new recipient to specified alert.
- 
--    recipient update <alert-id> <recipient-value> [description=<description>]
--            [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
--        Update existing recipient identified by alert and it's value.
-+    recipient update <recipient-id> [value=<recipient-value>]
-+            [description=<description>] [options [<option>=<value>]...]
-+            [meta [<meta-option>=<value>]...]
-+        Update existing recipient identified by it's id.
- 
--    recipient remove <alert-id> <recipient-value>
-+    recipient remove <recipient-id>
-         Remove specified recipient.
- """
-     if pout:
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1315371-03-improve-alerts-help.patch b/SOURCES/bz1315371-03-improve-alerts-help.patch
deleted file mode 100644
index a26e2b9..0000000
--- a/SOURCES/bz1315371-03-improve-alerts-help.patch
+++ /dev/null
@@ -1,67 +0,0 @@
-From a315196a5f9fc70ce1cd4b56648f262048bb93f1 Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Mon, 25 Jul 2016 14:10:55 +0200
-Subject: [PATCH] improve alerts help
-
----
- pcs/pcs.8    | 8 ++++----
- pcs/usage.py | 8 ++++----
- 2 files changed, 8 insertions(+), 8 deletions(-)
-
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index f789df7..0e8e967 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -667,16 +667,16 @@ Add specified utilization options to specified node.  If node is not specified,
- Show all configured alerts.
- .TP
- create path=<path> [id=<alert\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
--Create new alert with specified path. Id will be automatically generated if it is not specified.
-+Define an alert handler with specified path. Id will be automatically generated if it is not specified.
- .TP
- update <alert\-id> [path=<path>] [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
--Update existing alert with specified id.
-+Update existing alert handler with specified id.
- .TP
- remove <alert\-id>
--Remove alert with specified id.
-+Remove alert handler with specified id.
- .TP
- recipient add <alert\-id> <recipient\-value> [id=<recipient\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
--Add new recipient to specified alert.
-+Add new recipient to specified alert handler.
- .TP
- recipient update <recipient\-id> [value=<recipient\-value>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
- Update existing recipient identified by it's id.
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 2f8f855..7cfb33e 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -1416,20 +1416,20 @@ Commands:
- 
-     create path=<path> [id=<alert-id>] [description=<description>]
-             [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
--        Create new alert with specified path. Id will be automatically
-+        Define an alert handler with specified path. Id will be automatically
-         generated if it is not specified.
- 
-     update <alert-id> [path=<path>] [description=<description>]
-             [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
--        Update existing alert with specified id.
-+        Update existing alert handler with specified id.
- 
-     remove <alert-id>
--        Remove alert with specified id.
-+        Remove alert handler with specified id.
- 
-     recipient add <alert-id> <recipient-value> [id=<recipient-id>]
-             [description=<description>] [options [<option>=<value>]...]
-             [meta [<meta-option>=<value>]...]
--        Add new recipient to specified alert.
-+        Add new recipient to specified alert handler.
- 
-     recipient update <recipient-id> [value=<recipient-value>]
-             [description=<description>] [options [<option>=<value>]...]
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1315371-04-alerts-related-fixes.patch b/SOURCES/bz1315371-04-alerts-related-fixes.patch
deleted file mode 100644
index 9c8e1ec..0000000
--- a/SOURCES/bz1315371-04-alerts-related-fixes.patch
+++ /dev/null
@@ -1,373 +0,0 @@
-From b438fe5c0eb4e6fa738e21287540c0d8f6b91c68 Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Fri, 19 Aug 2016 02:57:39 +0200
-Subject: [PATCH] squash bz1315371 [RFE] Provide configurable alerts
-
-25a25c534ff6 show help when unknown subcommand of 'pcs alert recipient' was given
-
-c352ce184093 make syntax of command 'pcs alert recipient add' more consistent
-
-8c6ec586d57c fix error handling when upgrading cib schema
----
- pcs/alert.py                   |  9 +++--
- pcs/lib/cib/tools.py           | 36 +++++++++--------
- pcs/pcs.8                      |  2 +-
- pcs/test/test_alert.py         | 44 +++++++++++++-------
- pcs/test/test_lib_cib_tools.py | 91 ++++++++++++++++++++++++++++++++++++++++++
- pcs/usage.py                   |  2 +-
- 6 files changed, 147 insertions(+), 37 deletions(-)
-
-diff --git a/pcs/alert.py b/pcs/alert.py
-index 693bb8d..17f4e8d 100644
---- a/pcs/alert.py
-+++ b/pcs/alert.py
-@@ -63,6 +63,8 @@ def recipient_cmd(*args):
-             recipient_update(*args)
-         elif sub_cmd == "remove":
-             recipient_remove(*args)
-+        else:
-+            raise CmdLineInputError()
-     except CmdLineInputError as e:
-         utils.exit_on_cmdline_input_errror(
-             e, "alert", "recipient {0}".format(sub_cmd)
-@@ -127,15 +129,14 @@ def recipient_add(lib, argv, modifiers):
-         raise CmdLineInputError()
- 
-     alert_id = argv[0]
--    recipient_value = argv[1]
- 
--    sections = parse_cmd_sections(argv[2:], set(["options", "meta"]))
-+    sections = parse_cmd_sections(argv[1:], set(["options", "meta"]))
-     main_args = prepare_options(sections["main"])
--    ensure_only_allowed_options(main_args, ["description", "id"])
-+    ensure_only_allowed_options(main_args, ["description", "id", "value"])
- 
-     lib.alert.add_recipient(
-         alert_id,
--        recipient_value,
-+        main_args.get("value", None),
-         prepare_options(sections["options"]),
-         prepare_options(sections["meta"]),
-         recipient_id=main_args.get("id", None),
-diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
-index d8ce57a..8141360 100644
---- a/pcs/lib/cib/tools.py
-+++ b/pcs/lib/cib/tools.py
-@@ -176,29 +176,31 @@ def upgrade_cib(cib, runner):
-     cib -- cib etree
-     runner -- CommandRunner
-     """
--    temp_file = tempfile.NamedTemporaryFile("w+", suffix=".pcs")
--    temp_file.write(etree.tostring(cib).decode())
--    temp_file.flush()
--    output, retval = runner.run(
--        [
--            os.path.join(settings.pacemaker_binaries, "cibadmin"),
--            "--upgrade",
--            "--force"
--        ],
--        env_extend={"CIB_file": temp_file.name}
--    )
-+    temp_file = None
-+    try:
-+        temp_file = tempfile.NamedTemporaryFile("w+", suffix=".pcs")
-+        temp_file.write(etree.tostring(cib).decode())
-+        temp_file.flush()
-+        output, retval = runner.run(
-+            [
-+                os.path.join(settings.pacemaker_binaries, "cibadmin"),
-+                "--upgrade",
-+                "--force"
-+            ],
-+            env_extend={"CIB_file": temp_file.name}
-+        )
- 
--    if retval != 0:
--        temp_file.close()
--        LibraryError(reports.cib_upgrade_failed(output))
-+        if retval != 0:
-+            temp_file.close()
-+            raise LibraryError(reports.cib_upgrade_failed(output))
- 
--    try:
-         temp_file.seek(0)
-         return etree.fromstring(temp_file.read())
-     except (EnvironmentError, etree.XMLSyntaxError, etree.DocumentInvalid) as e:
--        LibraryError(reports.cib_upgrade_failed(str(e)))
-+        raise LibraryError(reports.cib_upgrade_failed(str(e)))
-     finally:
--        temp_file.close()
-+        if temp_file:
-+            temp_file.close()
- 
- 
- def ensure_cib_version(runner, cib, version):
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 7a054ca..b3c4877 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -727,7 +727,7 @@ Update existing alert handler with specified id.
- remove <alert\-id>
- Remove alert handler with specified id.
- .TP
--recipient add <alert\-id> <recipient\-value> [id=<recipient\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
-+recipient add <alert\-id> value=<recipient\-value> [id=<recipient\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
- Add new recipient to specified alert handler.
- .TP
- recipient update <recipient\-id> [value=<recipient\-value>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
-diff --git a/pcs/test/test_alert.py b/pcs/test/test_alert.py
-index f6ea70d..d919ff6 100644
---- a/pcs/test/test_alert.py
-+++ b/pcs/test/test_alert.py
-@@ -233,7 +233,7 @@ Alerts:
-  Alert: alert (path=test)
- """
-         )
--        self.assert_pcs_success("alert recipient add alert rec_value")
-+        self.assert_pcs_success("alert recipient add alert value=rec_value")
-         self.assert_pcs_success(
-             "alert config",
-             """\
-@@ -244,7 +244,7 @@ Alerts:
- """
-         )
-         self.assert_pcs_success(
--            "alert recipient add alert rec_value2 id=my-recipient "
-+            "alert recipient add alert value=rec_value2 id=my-recipient "
-             "description=description options o1=1 o2=2 meta m1=v1 m2=v2"
-         )
-         self.assert_pcs_success(
-@@ -263,21 +263,25 @@ Alerts:
- 
-     def test_already_exists(self):
-         self.assert_pcs_success("alert create path=test")
--        self.assert_pcs_success("alert recipient add alert rec_value id=rec")
-+        self.assert_pcs_success(
-+            "alert recipient add alert value=rec_value id=rec"
-+        )
-         self.assert_pcs_fail(
--            "alert recipient add alert value id=rec",
-+            "alert recipient add alert value=value id=rec",
-             "Error: 'rec' already exists\n"
-         )
-         self.assert_pcs_fail(
--            "alert recipient add alert value id=alert",
-+            "alert recipient add alert value=value id=alert",
-             "Error: 'alert' already exists\n"
-         )
- 
-     def test_same_value(self):
-         self.assert_pcs_success("alert create path=test")
--        self.assert_pcs_success("alert recipient add alert rec_value id=rec")
-+        self.assert_pcs_success(
-+            "alert recipient add alert value=rec_value id=rec"
-+        )
-         self.assert_pcs_fail(
--            "alert recipient add alert rec_value",
-+            "alert recipient add alert value=rec_value",
-             "Error: Recipient 'rec_value' in alert 'alert' already exists, "
-             "use --force to override\n"
-         )
-@@ -291,7 +295,7 @@ Alerts:
- """
-         )
-         self.assert_pcs_success(
--            "alert recipient add alert rec_value --force",
-+            "alert recipient add alert value=rec_value --force",
-             "Warning: Recipient 'rec_value' in alert 'alert' already exists\n"
-         )
-         self.assert_pcs_success(
-@@ -305,13 +309,21 @@ Alerts:
- """
-         )
- 
-+    def test_no_value(self):
-+        self.assert_pcs_success("alert create path=test")
-+        self.assert_pcs_fail(
-+            "alert recipient add alert id=rec",
-+            "Error: required option 'value' is missing\n"
-+        )
-+
-+
- 
- @unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
- class UpdateRecipientAlert(PcsAlertTest):
-     def test_success(self):
-         self.assert_pcs_success("alert create path=test")
-         self.assert_pcs_success(
--            "alert recipient add alert rec_value description=description "
-+            "alert recipient add alert value=rec_value description=description "
-             "options o1=1 o2=2 meta m1=v1 m2=v2"
-         )
-         self.assert_pcs_success(
-@@ -360,8 +372,8 @@ Alerts:
- 
-     def test_value_exists(self):
-         self.assert_pcs_success("alert create path=test")
--        self.assert_pcs_success("alert recipient add alert rec_value")
--        self.assert_pcs_success("alert recipient add alert value")
-+        self.assert_pcs_success("alert recipient add alert value=rec_value")
-+        self.assert_pcs_success("alert recipient add alert value=value")
-         self.assert_pcs_success(
-             "alert config",
-             """\
-@@ -394,7 +406,7 @@ Alerts:
- 
-     def test_value_same_as_previous(self):
-         self.assert_pcs_success("alert create path=test")
--        self.assert_pcs_success("alert recipient add alert rec_value")
-+        self.assert_pcs_success("alert recipient add alert value=rec_value")
-         self.assert_pcs_success(
-             "alert config",
-             """\
-@@ -425,7 +437,9 @@ Alerts:
- 
-     def test_empty_value(self):
-         self.assert_pcs_success("alert create path=test")
--        self.assert_pcs_success("alert recipient add alert rec_value id=rec")
-+        self.assert_pcs_success(
-+            "alert recipient add alert value=rec_value id=rec"
-+        )
-         self.assert_pcs_fail(
-             "alert recipient update rec value=",
-             "Error: Recipient value '' is not valid.\n"
-@@ -436,7 +450,9 @@ Alerts:
- class RemoveRecipientTest(PcsAlertTest):
-     def test_success(self):
-         self.assert_pcs_success("alert create path=test")
--        self.assert_pcs_success("alert recipient add alert rec_value id=rec")
-+        self.assert_pcs_success(
-+            "alert recipient add alert value=rec_value id=rec"
-+        )
-         self.assert_pcs_success(
-             "alert config",
-             """\
-diff --git a/pcs/test/test_lib_cib_tools.py b/pcs/test/test_lib_cib_tools.py
-index 10f8a96..0fd4d22 100644
---- a/pcs/test/test_lib_cib_tools.py
-+++ b/pcs/test/test_lib_cib_tools.py
-@@ -7,6 +7,7 @@ from __future__ import (
- 
- from unittest import TestCase
- 
-+from os.path import join
- from lxml import etree
- 
- from pcs.test.tools.assertions import (
-@@ -17,6 +18,7 @@ from pcs.test.tools.misc import get_test_resource as rc
- from pcs.test.tools.pcs_mock import mock
- from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
- 
-+from pcs import settings
- from pcs.common import report_codes
- from pcs.lib.external import CommandRunner
- from pcs.lib.errors import ReportItemSeverity as severities
-@@ -369,3 +371,92 @@ class EnsureCibVersionTest(TestCase):
-             )
-         )
-         mock_upgrade_cib.assert_called_once_with(self.cib, self.mock_runner)
-+
-+
-+@mock.patch("tempfile.NamedTemporaryFile")
-+class UpgradeCibTest(TestCase):
-+    def setUp(self):
-+        self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
-+
-+    def test_success(self, mock_named_file):
-+        mock_file = mock.MagicMock()
-+        mock_file.name = "mock_file_name"
-+        mock_file.read.return_value = "<cib/>"
-+        mock_named_file.return_value = mock_file
-+        self.mock_runner.run.return_value = ("", 0)
-+        assert_xml_equal(
-+            "<cib/>",
-+            etree.tostring(
-+                lib.upgrade_cib(etree.XML("<old_cib/>"), self.mock_runner)
-+            ).decode()
-+        )
-+        mock_named_file.assert_called_once_with("w+", suffix=".pcs")
-+        mock_file.write.assert_called_once_with("<old_cib/>")
-+        mock_file.flush.assert_called_once_with()
-+        self.mock_runner.run.assert_called_once_with(
-+            [
-+                join(settings.pacemaker_binaries, "cibadmin"),
-+                "--upgrade",
-+                "--force"
-+            ],
-+            env_extend={"CIB_file": "mock_file_name"}
-+        )
-+        mock_file.seek.assert_called_once_with(0)
-+        mock_file.read.assert_called_once_with()
-+
-+    def test_upgrade_failed(self, mock_named_file):
-+        mock_file = mock.MagicMock()
-+        mock_file.name = "mock_file_name"
-+        mock_named_file.return_value = mock_file
-+        self.mock_runner.run.return_value = ("reason", 1)
-+        assert_raise_library_error(
-+            lambda: lib.upgrade_cib(etree.XML("<old_cib/>"), self.mock_runner),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_UPGRADE_FAILED,
-+                {"reason": "reason"}
-+            )
-+        )
-+        mock_named_file.assert_called_once_with("w+", suffix=".pcs")
-+        mock_file.write.assert_called_once_with("<old_cib/>")
-+        mock_file.flush.assert_called_once_with()
-+        self.mock_runner.run.assert_called_once_with(
-+            [
-+                join(settings.pacemaker_binaries, "cibadmin"),
-+                "--upgrade",
-+                "--force"
-+            ],
-+            env_extend={"CIB_file": "mock_file_name"}
-+        )
-+
-+    def test_unable_to_parse_upgraded_cib(self, mock_named_file):
-+        mock_file = mock.MagicMock()
-+        mock_file.name = "mock_file_name"
-+        mock_file.read.return_value = "not xml"
-+        mock_named_file.return_value = mock_file
-+        self.mock_runner.run.return_value = ("", 0)
-+        assert_raise_library_error(
-+            lambda: lib.upgrade_cib(etree.XML("<old_cib/>"), self.mock_runner),
-+            (
-+                severities.ERROR,
-+                report_codes.CIB_UPGRADE_FAILED,
-+                {
-+                    "reason":
-+                        "Start tag expected, '<' not found, line 1, column 1",
-+                }
-+            )
-+        )
-+        mock_named_file.assert_called_once_with("w+", suffix=".pcs")
-+        mock_file.write.assert_called_once_with("<old_cib/>")
-+        mock_file.flush.assert_called_once_with()
-+        self.mock_runner.run.assert_called_once_with(
-+            [
-+                join(settings.pacemaker_binaries, "cibadmin"),
-+                "--upgrade",
-+                "--force"
-+            ],
-+            env_extend={"CIB_file": "mock_file_name"}
-+        )
-+        mock_file.seek.assert_called_once_with(0)
-+        mock_file.read.assert_called_once_with()
-+
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 9ebbca9..78e340b 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -1507,7 +1507,7 @@ Commands:
-     remove <alert-id>
-         Remove alert handler with specified id.
- 
--    recipient add <alert-id> <recipient-value> [id=<recipient-id>]
-+    recipient add <alert-id> value=<recipient-value> [id=<recipient-id>]
-             [description=<description>] [options [<option>=<value>]...]
-             [meta [<meta-option>=<value>]...]
-         Add new recipient to specified alert handler.
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1327739-01-add-pcs-quorum-expected-votes-command.patch b/SOURCES/bz1327739-01-add-pcs-quorum-expected-votes-command.patch
deleted file mode 100644
index ce9052f..0000000
--- a/SOURCES/bz1327739-01-add-pcs-quorum-expected-votes-command.patch
+++ /dev/null
@@ -1,311 +0,0 @@
-From bd852905ad905b83daa1a7240e7a79c3357db5b8 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Thu, 30 Jun 2016 15:09:31 +0200
-Subject: [PATCH] bz1158805-01 add "pcs quorum expected-votes" command
-
----
- pcs/cli/common/lib_wrapper.py        |  1 +
- pcs/common/report_codes.py           |  1 +
- pcs/lib/commands/quorum.py           | 21 ++++++++++++++++++
- pcs/lib/corosync/live.py             | 15 +++++++++++++
- pcs/lib/reports.py                   | 13 +++++++++++
- pcs/pcs.8                            |  3 +++
- pcs/quorum.py                        |  7 ++++++
- pcs/test/suite.py                    |  6 +++--
- pcs/test/test_lib_commands_quorum.py | 43 ++++++++++++++++++++++++++++++++++++
- pcs/test/test_lib_corosync_live.py   | 42 +++++++++++++++++++++++++++++++++++
- pcs/usage.py                         |  4 ++++
- 11 files changed, 154 insertions(+), 2 deletions(-)
-
-diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
-index 2dd5810..c4b8342 100644
---- a/pcs/cli/common/lib_wrapper.py
-+++ b/pcs/cli/common/lib_wrapper.py
-@@ -116,6 +116,7 @@ def load_module(env, middleware_factory, name):
-                 "add_device": quorum.add_device,
-                 "get_config": quorum.get_config,
-                 "remove_device": quorum.remove_device,
-+                "set_expected_votes_live": quorum.set_expected_votes_live,
-                 "set_options": quorum.set_options,
-                 "status": quorum.status_text,
-                 "status_device": quorum.status_device_text,
-diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
-index afe0554..2b39938 100644
---- a/pcs/common/report_codes.py
-+++ b/pcs/common/report_codes.py
-@@ -47,6 +47,7 @@ COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR = "COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR"
- COROSYNC_NOT_RUNNING_ON_NODE = "COROSYNC_NOT_RUNNING_ON_NODE"
- COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE = "COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE"
- COROSYNC_QUORUM_GET_STATUS_ERROR = "COROSYNC_QUORUM_GET_STATUS_ERROR"
-+COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR = "COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR"
- COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE"
- CRM_MON_ERROR = "CRM_MON_ERROR"
- DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST"
-diff --git a/pcs/lib/commands/quorum.py b/pcs/lib/commands/quorum.py
-index aa00bbd..7425e78 100644
---- a/pcs/lib/commands/quorum.py
-+++ b/pcs/lib/commands/quorum.py
-@@ -314,6 +314,27 @@ def _remove_device_model_net(lib_env, cluster_nodes, skip_offline_nodes):
-         skip_offline_nodes
-     )
- 
-+def set_expected_votes_live(lib_env, expected_votes):
-+    """
-+    set expected votes in live cluster to specified value
-+    numeric expected_votes desired value of expected votes
-+    """
-+    if lib_env.is_cman_cluster:
-+        raise LibraryError(reports.cman_unsupported_command())
-+
-+    try:
-+        votes_int = int(expected_votes)
-+        if votes_int < 1:
-+            raise ValueError()
-+    except ValueError:
-+        raise LibraryError(reports.invalid_option_value(
-+            "expected votes",
-+            expected_votes,
-+            "positive integer"
-+        ))
-+
-+    corosync_live.set_expected_votes(lib_env.cmd_runner(), votes_int)
-+
- def __ensure_not_cman(lib_env):
-     if lib_env.is_corosync_conf_live and lib_env.is_cman_cluster:
-         raise LibraryError(reports.cman_unsupported_command())
-diff --git a/pcs/lib/corosync/live.py b/pcs/lib/corosync/live.py
-index 4129aeb..b49b9f6 100644
---- a/pcs/lib/corosync/live.py
-+++ b/pcs/lib/corosync/live.py
-@@ -62,3 +62,18 @@ def get_quorum_status_text(runner):
-             reports.corosync_quorum_get_status_error(output)
-         )
-     return output
-+
-+def set_expected_votes(runner, votes):
-+    """
-+    set expected votes in live cluster to specified value
-+    """
-+    output, retval = runner.run([
-+        os.path.join(settings.corosync_binaries, "corosync-quorumtool"),
-+        # format votes to handle the case where they are int
-+        "-e", "{0}".format(votes)
-+    ])
-+    if retval != 0:
-+        raise LibraryError(
-+            reports.corosync_quorum_set_expected_votes_error(output)
-+        )
-+    return output
-diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
-index d8f88cd..9ececf9 100644
---- a/pcs/lib/reports.py
-+++ b/pcs/lib/reports.py
-@@ -565,6 +565,19 @@ def corosync_quorum_get_status_error(reason):
-         }
-     )
- 
-+def corosync_quorum_set_expected_votes_error(reason):
-+    """
-+    unable to set expcted votes in a live cluster
-+    string reason an error message
-+    """
-+    return ReportItem.error(
-+        report_codes.COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR,
-+        "Unable to set expected votes: {reason}",
-+        info={
-+            "reason": reason,
-+        }
-+    )
-+
- def corosync_config_reloaded():
-     """
-     corosync configuration has been reloaded
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 949d918..a436b4c 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -563,6 +563,9 @@ Add/Change quorum device options.  Generic options and model options are all doc
- 
- WARNING: If you want to change "host" option of qdevice model net, use "pcs quorum device remove" and "pcs quorum device add" commands to set up configuration properly unless old and new host is the same machine.
- .TP
-+expected\-votes <votes>
-+Set expected votes in the live cluster to specified value.  This only affects the live cluster, not changes any configuration files.
-+.TP
- unblock [\fB\-\-force\fR]
- Cancel waiting for all nodes when establishing quorum.  Useful in situations where you know the cluster is inquorate, but you are confident that the cluster should proceed with resource management regardless.  This command should ONLY be used when nodes which the cluster is waiting for have been confirmed to be powered off and to have no access to shared resources.
- 
-diff --git a/pcs/quorum.py b/pcs/quorum.py
-index 27085ac..2d54ed7 100644
---- a/pcs/quorum.py
-+++ b/pcs/quorum.py
-@@ -28,6 +28,8 @@ def quorum_cmd(lib, argv, modificators):
-             usage.quorum(argv)
-         elif sub_cmd == "config":
-             quorum_config_cmd(lib, argv_next, modificators)
-+        elif sub_cmd == "expected-votes":
-+            quorum_expected_votes_cmd(lib, argv_next, modificators)
-         elif sub_cmd == "status":
-             quorum_status_cmd(lib, argv_next, modificators)
-         elif sub_cmd == "device":
-@@ -101,6 +103,11 @@ def quorum_config_to_str(config):
- 
-     return lines
- 
-+def quorum_expected_votes_cmd(lib, argv, modificators):
-+    if len(argv) != 1:
-+        raise CmdLineInputError()
-+    lib.quorum.set_expected_votes_live(argv[0])
-+
- def quorum_status_cmd(lib, argv, modificators):
-     if argv:
-         raise CmdLineInputError()
-diff --git a/pcs/test/suite.py b/pcs/test/suite.py
-index 85dd20c..5b29918 100755
---- a/pcs/test/suite.py
-+++ b/pcs/test/suite.py
-@@ -74,7 +74,7 @@ def run_tests(tests, verbose=False, color=False):
-         verbosity=2 if verbose else 1,
-         resultclass=resultclass
-     )
--    testRunner.run(tests)
-+    return testRunner.run(tests)
- 
- put_package_to_path()
- explicitly_enumerated_tests = [
-@@ -85,7 +85,7 @@ explicitly_enumerated_tests = [
-         "--all-but",
-     )
- ]
--run_tests(
-+test_result = run_tests(
-     discover_tests(explicitly_enumerated_tests, "--all-but" in sys.argv),
-     verbose="-v" in sys.argv,
-     color=(
-@@ -99,6 +99,8 @@ run_tests(
-         )
-     ),
- )
-+if not test_result.wasSuccessful():
-+    sys.exit(1)
- 
- # assume that we are in pcs root dir
- #
-diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py
-index e824f37..c12ab66 100644
---- a/pcs/test/test_lib_commands_quorum.py
-+++ b/pcs/test/test_lib_commands_quorum.py
-@@ -1750,3 +1750,46 @@ class UpdateDeviceTest(TestCase, CmanMixin):
-                 "model: net\n        bad_option: bad_value"
-             )
-         )
-+
-+
-+@mock.patch("pcs.lib.commands.quorum.corosync_live.set_expected_votes")
-+@mock.patch.object(
-+    LibraryEnvironment,
-+    "cmd_runner",
-+    lambda self: "mock_runner"
-+)
-+class SetExpectedVotesLiveTest(TestCase, CmanMixin):
-+    def setUp(self):
-+        self.mock_logger = mock.MagicMock(logging.Logger)
-+        self.mock_reporter = MockLibraryReportProcessor()
-+
-+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
-+    def test_disabled_on_cman(self, mock_set_votes):
-+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-+        self.assert_disabled_on_cman(
-+            lambda: lib.set_expected_votes_live(lib_env, "5")
-+        )
-+        mock_set_votes.assert_not_called()
-+
-+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-+    def test_success(self, mock_set_votes):
-+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-+        lib.set_expected_votes_live(lib_env, "5")
-+        mock_set_votes.assert_called_once_with("mock_runner", 5)
-+
-+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-+    def test_invalid_votes(self, mock_set_votes):
-+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-+        assert_raise_library_error(
-+            lambda: lib.set_expected_votes_live(lib_env, "-5"),
-+            (
-+                severity.ERROR,
-+                report_codes.INVALID_OPTION_VALUE,
-+                {
-+                    "option_name": "expected votes",
-+                    "option_value": "-5",
-+                    "allowed_values": "positive integer",
-+                }
-+            )
-+        )
-+        mock_set_votes.assert_not_called()
-diff --git a/pcs/test/test_lib_corosync_live.py b/pcs/test/test_lib_corosync_live.py
-index 96fe235..0fc5eb2 100644
---- a/pcs/test/test_lib_corosync_live.py
-+++ b/pcs/test/test_lib_corosync_live.py
-@@ -141,3 +141,45 @@ class GetQuorumStatusTextTest(TestCase):
-         self.mock_runner.run.assert_called_once_with([
-             self.quorum_tool, "-p"
-         ])
-+
-+
-+class SetExpectedVotesTest(TestCase):
-+    def setUp(self):
-+        self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
-+
-+    def path(self, name):
-+        return os.path.join(settings.corosync_binaries, name)
-+
-+    def test_success(self):
-+        cmd_retval = 0
-+        cmd_output = "cmd output"
-+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-+        mock_runner.run.return_value = (cmd_output, cmd_retval)
-+
-+        lib.set_expected_votes(mock_runner, 3)
-+
-+        mock_runner.run.assert_called_once_with([
-+            self.path("corosync-quorumtool"), "-e", "3"
-+        ])
-+
-+    def test_error(self):
-+        cmd_retval = 1
-+        cmd_output = "cmd output"
-+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-+        mock_runner.run.return_value = (cmd_output, cmd_retval)
-+
-+        assert_raise_library_error(
-+            lambda: lib.set_expected_votes(mock_runner, 3),
-+            (
-+                severity.ERROR,
-+                report_codes.COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR,
-+                {
-+                    "reason": cmd_output,
-+                }
-+            )
-+        )
-+
-+        mock_runner.run.assert_called_once_with([
-+            self.path("corosync-quorumtool"), "-e", "3"
-+        ])
-+
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 542f806..ee53a2f 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -1352,6 +1352,10 @@ Commands:
-         to set up configuration properly unless old and new host is the same
-         machine.
- 
-+    expected-votes <votes>
-+        Set expected votes in the live cluster to specified value.  This only
-+        affects the live cluster, not changes any configuration files.
-+
-     unblock [--force]
-         Cancel waiting for all nodes when establishing quorum.  Useful in
-         situations where you know the cluster is inquorate, but you are
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1329472-01-when-removing-a-remote-node-remove-it-from-pacemakers-caches-as-well.patch b/SOURCES/bz1329472-01-when-removing-a-remote-node-remove-it-from-pacemakers-caches-as-well.patch
deleted file mode 100644
index 0a4e1cf..0000000
--- a/SOURCES/bz1329472-01-when-removing-a-remote-node-remove-it-from-pacemakers-caches-as-well.patch
+++ /dev/null
@@ -1,278 +0,0 @@
-From 6805a235de50925ed7f30ac79b3d96be3f5d71df Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Mon, 25 Jul 2016 14:23:23 +0200
-Subject: [PATCH 1/2] remove dead code
-
-Function resource_master_remove could not ever be called because conditions
-with xpath queries were never True. In the case when the resource_id was
-an id of a master resource, it got changed to the id of the master's child
-right at the beginning of resource_remove function.
----
- pcs/resource.py | 49 ++++++-------------------------------------------
- 1 file changed, 6 insertions(+), 43 deletions(-)
-
-diff --git a/pcs/resource.py b/pcs/resource.py
-index 9384a21..24128ba 100644
---- a/pcs/resource.py
-+++ b/pcs/resource.py
-@@ -1618,45 +1618,9 @@ def resource_master_create(dom, argv, update=False, master_id=None):
- 
-     return dom, master_element.getAttribute("id")
- 
--def resource_master_remove(argv):
--    if len(argv) < 1:
--        usage.resource()
--        sys.exit(1)
--
--    dom = utils.get_cib_dom()
--    master_id = argv.pop(0)
--
--    master_found = False
--# Check to see if there's a resource/group with the master_id if so, we remove the parent
--    for rg in (dom.getElementsByTagName("primitive") + dom.getElementsByTagName("group")):
--        if rg.getAttribute("id") == master_id and rg.parentNode.tagName == "master":
--            master_id = rg.parentNode.getAttribute("id")
--
--    resources_to_cleanup = []
--    for master in dom.getElementsByTagName("master"):
--        if master.getAttribute("id") == master_id:
--            childNodes = master.getElementsByTagName("primitive")
--            for child in childNodes:
--                resources_to_cleanup.append(child.getAttribute("id"))
--            master_found = True
--            break
--
--    if not master_found:
--        utils.err("Unable to find multi-state resource with id %s" % master_id)
--
--    constraints_element = dom.getElementsByTagName("constraints")
--    if len(constraints_element) > 0:
--        constraints_element = constraints_element[0]
--        for resource_id in resources_to_cleanup:
--            remove_resource_references(
--                dom, resource_id, constraints_element=constraints_element
--            )
--    master.parentNode.removeChild(master)
--    print("Removing Master - " + master_id)
--    utils.replace_cib_configuration(dom)
--
- def resource_remove(resource_id, output = True):
-     dom = utils.get_cib_dom()
-+    # if resource is a clone or a master, work with its child instead
-     cloned_resource = utils.dom_get_clone_ms_resource(dom, resource_id)
-     if cloned_resource:
-         resource_id = cloned_resource.getAttribute("id")
-@@ -1704,16 +1668,15 @@ def resource_remove(resource_id, output = True):
-             resource_remove(res.getAttribute("id"))
-         sys.exit(0)
- 
-+    # now we know resource is not a group, a clone nor a master
-+    # because of the conditions above
-+    if not utils.does_exist('//resources/descendant::primitive[@id="'+resource_id+'"]'):
-+        utils.err("Resource '{0}' does not exist.".format(resource_id))
-+
-     group_xpath = '//group/primitive[@id="'+resource_id+'"]/..'
-     group = utils.get_cib_xpath(group_xpath)
-     num_resources_in_group = 0
- 
--    if not utils.does_exist('//resources/descendant::primitive[@id="'+resource_id+'"]'):
--        if utils.does_exist('//resources/master[@id="'+resource_id+'"]'):
--            return resource_master_remove([resource_id])
--
--        utils.err("Resource '{0}' does not exist.".format(resource_id))
--
-     if (group != ""):
-         num_resources_in_group = len(parseString(group).documentElement.getElementsByTagName("primitive"))
- 
--- 
-1.8.3.1
-
-
-From cd96c34c7ad1f4f767c0d14475b683a70c3b0862 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Mon, 25 Jul 2016 17:34:35 +0200
-Subject: [PATCH 2/2] when removing a remote node remove it from pacemaker's
- caches as well
-
----
- pcs/cluster.py         |  6 +++++
- pcs/resource.py        | 64 ++++++++++++++++++++++++++++----------------------
- pcs/test/test_utils.py |  9 +++++++
- pcs/utils.py           |  8 +++++++
- 4 files changed, 59 insertions(+), 28 deletions(-)
-
-diff --git a/pcs/cluster.py b/pcs/cluster.py
-index 7a8615d..1c3b425 100644
---- a/pcs/cluster.py
-+++ b/pcs/cluster.py
-@@ -1922,6 +1922,12 @@ def cluster_remote_node(argv):
-             nvpair.parentNode.removeChild(nvpair)
-         dom = constraint.remove_constraints_containing_node(dom, hostname)
-         utils.replace_cib_configuration(dom)
-+        if not utils.usefile:
-+            output, retval = utils.run([
-+                "crm_node", "--force", "--remove", hostname
-+            ])
-+            if retval != 0:
-+                utils.err("unable to remove: {0}".fomat(output))
-     else:
-         usage.cluster(["remote-node"])
-         sys.exit(1)
-diff --git a/pcs/resource.py b/pcs/resource.py
-index 24128ba..a85f46f 100644
---- a/pcs/resource.py
-+++ b/pcs/resource.py
-@@ -929,31 +929,11 @@ def resource_update(res_id,args):
-             ia.setAttribute("value", val)
-             instance_attributes.appendChild(ia)
- 
--    meta_attributes = resource.getElementsByTagName("meta_attributes")
--    if len(meta_attributes) == 0:
--        meta_attributes = dom.createElement("meta_attributes")
--        meta_attributes.setAttribute("id", res_id + "-meta_attributes")
--        resource.appendChild(meta_attributes)
--    else:
--        meta_attributes = meta_attributes[0]
--
--    meta_attrs = utils.convert_args_to_tuples(meta_values)
--    for (key,val) in meta_attrs:
--        meta_found = False
--        for ma in meta_attributes.getElementsByTagName("nvpair"):
--            if ma.getAttribute("name") == key:
--                meta_found = True
--                if val == "":
--                    meta_attributes.removeChild(ma)
--                else:
--                    ma.setAttribute("value", val)
--                break
--        if not meta_found:
--            ma = dom.createElement("nvpair")
--            ma.setAttribute("id", res_id + "-meta_attributes-" + key)
--            ma.setAttribute("name", key)
--            ma.setAttribute("value", val)
--            meta_attributes.appendChild(ma)
-+    remote_node_name = utils.dom_get_resource_remote_node_name(resource)
-+    utils.dom_update_meta_attr(
-+        resource,
-+        utils.convert_args_to_tuples(meta_values)
-+    )
- 
-     operations = resource.getElementsByTagName("operations")
-     if len(operations) == 0:
-@@ -1005,6 +985,17 @@ def resource_update(res_id,args):
- 
-     utils.replace_cib_configuration(dom)
- 
-+    if (
-+        remote_node_name
-+        and
-+        remote_node_name != utils.dom_get_resource_remote_node_name(resource)
-+    ):
-+        # if the resource was a remote node and it is not anymore, (or its name
-+        # changed) we need to tell pacemaker about it
-+        output, retval = utils.run([
-+            "crm_node", "--force", "--remove", remote_node_name
-+        ])
-+
-     if "--wait" in utils.pcs_options:
-         args = ["crm_resource", "--wait"]
-         if wait_timeout:
-@@ -1231,10 +1222,22 @@ def resource_meta(res_id, argv):
-     if "--wait" in utils.pcs_options:
-         wait_timeout = utils.validate_wait_get_timeout()
- 
-+    remote_node_name = utils.dom_get_resource_remote_node_name(resource_el)
-     utils.dom_update_meta_attr(resource_el, utils.convert_args_to_tuples(argv))
- 
-     utils.replace_cib_configuration(dom)
- 
-+    if (
-+        remote_node_name
-+        and
-+        remote_node_name != utils.dom_get_resource_remote_node_name(resource_el)
-+    ):
-+        # if the resource was a remote node and it is not anymore, (or its name
-+        # changed) we need to tell pacemaker about it
-+        output, retval = utils.run([
-+            "crm_node", "--force", "--remove", remote_node_name
-+        ])
-+
-     if "--wait" in utils.pcs_options:
-         args = ["crm_resource", "--wait"]
-         if wait_timeout:
-@@ -1714,11 +1717,12 @@ def resource_remove(resource_id, output = True):
-     )
-     dom = utils.get_cib_dom()
-     resource_el = utils.dom_get_resource(dom, resource_id)
-+    remote_node_name = None
-     if resource_el:
--        remote_node = utils.dom_get_resource_remote_node_name(resource_el)
--        if remote_node:
-+        remote_node_name = utils.dom_get_resource_remote_node_name(resource_el)
-+        if remote_node_name:
-             dom = constraint.remove_constraints_containing_node(
--                dom, remote_node, output
-+                dom, remote_node_name, output
-             )
-             utils.replace_cib_configuration(dom)
-             dom = utils.get_cib_dom()
-@@ -1784,6 +1788,10 @@ def resource_remove(resource_id, output = True):
-             if output == True:
-                 utils.err("Unable to remove resource '%s' (do constraints exist?)" % (resource_id))
-             return False
-+    if remote_node_name and not utils.usefile:
-+        output, retval = utils.run([
-+            "crm_node", "--force", "--remove", remote_node_name
-+        ])
-     return True
- 
- def stonith_level_rm_device(cib_dom, stn_id):
-diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py
-index 819f8ee..192048e 100644
---- a/pcs/test/test_utils.py
-+++ b/pcs/test/test_utils.py
-@@ -273,6 +273,9 @@ class UtilsTest(unittest.TestCase):
-                             name="remote-node" value="guest2"/>
-                     </instance_attributes>
-                 </primitive>
-+                <primitive id="dummy3"
-+                        class="ocf" provider="pacemaker" type="remote">
-+                </primitive>
-             </resources>
-         """).documentElement
-         resources = dom.getElementsByTagName("resources")[0]
-@@ -296,6 +299,12 @@ class UtilsTest(unittest.TestCase):
-                 utils.dom_get_resource(dom, "vm-guest1")
-             )
-         )
-+        self.assertEqual(
-+            "dummy3",
-+            utils.dom_get_resource_remote_node_name(
-+                utils.dom_get_resource(dom, "dummy3")
-+            )
-+        )
- 
-     def test_dom_get_meta_attr_value(self):
-         dom = self.get_cib_empty()
-diff --git a/pcs/utils.py b/pcs/utils.py
-index a7ed975..25274dc 100644
---- a/pcs/utils.py
-+++ b/pcs/utils.py
-@@ -1252,6 +1252,14 @@ def validate_constraint_resource(dom, resource_id):
- def dom_get_resource_remote_node_name(dom_resource):
-     if dom_resource.tagName != "primitive":
-         return None
-+    if (
-+        dom_resource.getAttribute("class").lower() == "ocf"
-+        and
-+        dom_resource.getAttribute("provider").lower() == "pacemaker"
-+        and
-+        dom_resource.getAttribute("type").lower() == "remote"
-+    ):
-+        return dom_resource.getAttribute("id")
-     return dom_get_meta_attr_value(dom_resource, "remote-node")
- 
- def dom_get_meta_attr_value(dom_resource, meta_name):
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1346852-01-fix-bad-request-when-resource-removal-t.patch b/SOURCES/bz1346852-01-fix-bad-request-when-resource-removal-t.patch
deleted file mode 100644
index ae01e91..0000000
--- a/SOURCES/bz1346852-01-fix-bad-request-when-resource-removal-t.patch
+++ /dev/null
@@ -1,315 +0,0 @@
-From 4949b387cbec0b79976ca87fbde41e441c21c197 Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Mon, 27 Jun 2016 11:49:43 +0200
-Subject: [PATCH] bz1346852-01-fix bad request when resource removal takes
- longer than pcs expects
-
----
- pcs/cluster.py          | 19 +++++++++++--
- pcs/pcs.8               |  4 +--
- pcs/resource.py         |  3 ++-
- pcs/settings_default.py |  1 +
- pcs/usage.py            |  5 ++--
- pcsd/remote.rb          | 71 ++++++++++++++++++++++++++++++++++++++++++-------
- pcsd/views/main.erb     | 13 ++++++---
- pcsd/views/nodes.erb    | 14 +++++-----
- 8 files changed, 102 insertions(+), 28 deletions(-)
-
-diff --git a/pcs/cluster.py b/pcs/cluster.py
-index 988ab75..9d4798c 100644
---- a/pcs/cluster.py
-+++ b/pcs/cluster.py
-@@ -1171,6 +1171,9 @@ def cluster_push(argv):
- 
-     filename = None
-     scope = None
-+    timeout = None
-+    if "--wait" in utils.pcs_options:
-+        timeout = utils.validate_wait_get_timeout()
-     for arg in argv:
-         if "=" not in arg:
-             filename = arg
-@@ -1206,8 +1209,20 @@ def cluster_push(argv):
-     output, retval = utils.run(command)
-     if retval != 0:
-         utils.err("unable to push cib\n" + output)
--    else:
--        print("CIB updated")
-+    print("CIB updated")
-+    if "--wait" not in utils.pcs_options:
-+        return
-+    cmd = ["crm_resource", "--wait"]
-+    if timeout:
-+        cmd.extend(["--timeout", timeout])
-+    output, retval = utils.run(cmd)
-+    if retval != 0:
-+        msg = []
-+        if retval == settings.pacemaker_wait_timeout_status:
-+            msg.append("waiting timeout")
-+        if output:
-+            msg.append("\n" + output)
-+        utils.err("\n".join(msg).strip())
- 
- def cluster_edit(argv):
-     if 'EDITOR' in os.environ:
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index a72a9bd..949d918 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -259,8 +259,8 @@ Sync corosync configuration to all nodes found from current corosync.conf file (
- cib [filename] [scope=<scope> | \fB\-\-config\fR]
- Get the raw xml from the CIB (Cluster Information Base).  If a filename is provided, we save the CIB to that file, otherwise the CIB is printed.  Specify scope to get a specific section of the CIB.  Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults, status.  \fB\-\-config\fR is the same as scope=configuration.  Do not specify a scope if you want to edit the saved CIB using pcs (pcs -f <command>).
- .TP
--cib-push <filename> [scope=<scope> | \fB\-\-config\fR]
--Push the raw xml from <filename> to the CIB (Cluster Information Base).  You can obtain the CIB by running the 'pcs cluster cib' command, which is recommended first step when you want to perform desired modifications (pcs \fB\-f\fR <command>) for the one-off push.  Specify scope to push a specific section of the CIB.  Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults.  \fB\-\-config\fR is the same as scope=configuration.  Use of \fB\-\-config\fR is recommended.  Do not specify a scope if you need to push the whole CIB or be warned in the case of outdated CIB.  WARNING: the selected scope of the CIB will be overwritten by the current content of the specified file.
-+cib-push <filename> [scope=<scope> | \fB\-\-config\fR] [\fB\-\-wait\fR[=<n>]]
-+Push the raw xml from <filename> to the CIB (Cluster Information Base).  You can obtain the CIB by running the 'pcs cluster cib' command, which is recommended first step when you want to perform desired modifications (pcs \fB\-f\fR <command>) for the one-off push.  Specify scope to push a specific section of the CIB.  Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults.  \fB\-\-config\fR is the same as scope=configuration.  Use of \fB\-\-config\fR is recommended.  Do not specify a scope if you need to push the whole CIB or be warned in the case of outdated CIB.  If --wait is specified wait up to 'n' seconds for changes to be applied.  WARNING: the selected scope of the CIB will be overwritten by the current content of the specified file.
- .TP
- cib\-upgrade
- Upgrade the CIB to conform to the latest version of the document schema.
-diff --git a/pcs/resource.py b/pcs/resource.py
-index 284bdb2..9384a21 100644
---- a/pcs/resource.py
-+++ b/pcs/resource.py
-@@ -21,6 +21,8 @@ from pcs import (
-     constraint,
-     settings,
- )
-+from pcs.settings import pacemaker_wait_timeout_status as \
-+    PACEMAKER_WAIT_TIMEOUT_STATUS
- import pcs.lib.cib.acl as lib_acl
- import pcs.lib.pacemaker as lib_pacemaker
- from pcs.lib.external import get_systemd_services
-@@ -31,7 +33,6 @@ from pcs.lib.pacemaker_values import timeout_to_seconds
- import pcs.lib.resource_agent as lib_ra
- 
- 
--PACEMAKER_WAIT_TIMEOUT_STATUS = 62
- RESOURCE_RELOCATE_CONSTRAINT_PREFIX = "pcs-relocate-"
- 
- def resource_cmd(argv):
-diff --git a/pcs/settings_default.py b/pcs/settings_default.py
-index 9d44918..15421fd 100644
---- a/pcs/settings_default.py
-+++ b/pcs/settings_default.py
-@@ -40,3 +40,4 @@ ocf_resources = os.path.join(ocf_root, "resource.d/")
- nagios_metadata_path = "/usr/share/pacemaker/nagios/plugins-metadata/"
- sbd_watchdog_default = "/dev/watchdog"
- sbd_config = "/etc/sysconfig/sbd"
-+pacemaker_wait_timeout_status = 62
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 42e03e6..542f806 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -653,7 +653,7 @@ Commands:
-         scope=configuration.  Do not specify a scope if you want to edit
-         the saved CIB using pcs (pcs -f <command>).
- 
--    cib-push <filename> [scope=<scope> | --config]
-+    cib-push <filename> [scope=<scope> | --config] [--wait[=<n>]]
-         Push the raw xml from <filename> to the CIB (Cluster Information Base).
-         You can obtain the CIB by running the 'pcs cluster cib' command, which
-         is recommended first step when you want to perform desired
-@@ -663,7 +663,8 @@ Commands:
-         crm_config, rsc_defaults, op_defaults.  --config is the same as
-         scope=configuration.  Use of --config is recommended.  Do not specify
-         a scope if you need to push the whole CIB or be warned in the case
--        of outdated CIB.
-+        of outdated CIB. If --wait is specified wait up to 'n' seconds for
-+        changes to be applied.
-         WARNING: the selected scope of the CIB will be overwritten by the
-         current content of the specified file.
- 
-diff --git a/pcsd/remote.rb b/pcsd/remote.rb
-index 0b2dc61..b1e00fa 100644
---- a/pcsd/remote.rb
-+++ b/pcsd/remote.rb
-@@ -5,6 +5,7 @@ require 'set'
- require 'timeout'
- require 'rexml/document'
- require 'base64'
-+require 'tempfile'
- 
- require 'pcs.rb'
- require 'resource.rb'
-@@ -1523,23 +1524,73 @@ def remove_resource(params, request, auth_user)
-     return 403, 'Permission denied'
-   end
-   force = params['force']
-+  user = PCSAuth.getSuperuserAuth()
-   no_error_if_not_exists = params.include?('no_error_if_not_exists')
--  errors = ""
--  params.each { |k,v|
--    if k.index("resid-") == 0
--      resid = k.gsub('resid-', '')
--      command = [PCS, 'resource', 'delete', resid]
--      command << '--force' if force
--      out, errout, retval = run_cmd(auth_user, *command)
-+  resource_list = []
-+  errors = ''
-+  resource_to_remove = []
-+  params.each { |param,_|
-+    if param.start_with?('resid-')
-+      resource_list << param.split('resid-', 2)[1]
-+    end
-+  }
-+  tmp_file = nil
-+  if force
-+    resource_to_remove = resource_list
-+  else
-+    begin
-+      tmp_file = Tempfile.new('temp_cib')
-+      _, err, retval = run_cmd(user, PCS, 'cluster', 'cib', tmp_file.path)
-       if retval != 0
--        unless out.index(" does not exist.") != -1 and no_error_if_not_exists
--          errors += errout.join(' ').strip + "\n"
-+        return [400, 'Unable to stop resource(s).']
-+      end
-+      cmd = [PCS, '-f', tmp_file.path, 'resource', 'disable']
-+      resource_list.each { |resource|
-+        _, err, retval = run_cmd(user, *cmd, resource)
-+        if retval != 0
-+          unless (
-+            err.join('').index('unable to find a resource') != -1 and
-+            no_error_if_not_exists
-+          )
-+            errors += "Unable to stop resource '#{resource}': #{err.join('')}"
-+          end
-+        else
-+          resource_to_remove << resource
-         end
-+      }
-+      _, _, retval = run_cmd(
-+        user, PCS, 'cluster', 'cib-push', tmp_file.path, '--config', '--wait'
-+      )
-+      if retval != 0
-+        return [400, 'Unable to stop resource(s).']
-+      end
-+      errors.strip!
-+      unless errors.empty?
-+        $logger.info("Stopping resource(s) errors:\n#{errors}")
-+        return [400, errors]
-+      end
-+    rescue IOError
-+      return [400, 'Unable to stop resource(s).']
-+    ensure
-+      if tmp_file
-+        tmp_file.close!
-+      end
-+    end
-+  end
-+  resource_to_remove.each { |resource|
-+    cmd = [PCS, 'resource', 'delete', resource]
-+    if force
-+      cmd << '--force'
-+    end
-+    out, err, retval = run_cmd(auth_user, *cmd)
-+    if retval != 0
-+      unless out.index(' does not exist.') != -1 and no_error_if_not_exists
-+        errors += err.join(' ').strip + "\n"
-       end
-     end
-   }
-   errors.strip!
--  if errors == ""
-+  if errors.empty?
-     return 200
-   else
-     $logger.info("Remove resource errors:\n"+errors)
-diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb
-index b14c327..5461515 100644
---- a/pcsd/views/main.erb
-+++ b/pcsd/views/main.erb
-@@ -298,14 +298,19 @@
-         {{meta_attributes-table resource=resource}}
-         {{#if utilization_support}}
-           {{#if resource.is_primitive}}
--            {{utilization-table entity=resource utilization=resource.utilization type="resource"}}
-+            {{utilization-table
-+                entity=resource
-+                utilization=resource.utilization
-+                type="resource"
-+                table_id="resource_utilization_attributes"
-+            }}
-           {{/if}}
-         {{/if}}
-         <br style="clear:left;">
-       {{/unless}}
-     </div>
-     {{#if stonith}}
--      <div style="clear:left; margin-top: 2em;" id="stonith_info_div">
-+      <div style="clear:left; margin-top: 2em;" id="stonith_agent_form">
-         {{fence-form
-             resource=resource
-             agent=resource.resource_agent
-@@ -314,7 +319,7 @@
-       </div>
-     {{else}}
-     {{#if resource.is_primitive}}
--      <div style="clear:left; margin-top: 2em;" id="resource_info_div">
-+      <div style="clear:left; margin-top: 2em;" id="resource_agent_form">
-         {{resource-form
-             resource=resource
-             agent=resource.resource_agent
-@@ -725,7 +730,7 @@ Use the 'Add' button to submit the form.">
-       <tr>
-         <td
-             {{action toggleBody}}
--            id="utilization_attributes"
-+            {{bind-attr id=table_id}}
-             class="datatable_header hover-pointer"
-         >
-           {{#if show_content}}
-diff --git a/pcsd/views/nodes.erb b/pcsd/views/nodes.erb
-index 478e0f6..8fccd25 100644
---- a/pcsd/views/nodes.erb
-+++ b/pcsd/views/nodes.erb
-@@ -247,9 +247,8 @@
-               </tr>
-             </table>
-             <table style="clear:left;float:left;margin-top:25px;">
--              <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Node Attributes ({{#if Pcs.nodesController.cur_node_attr.length}}{{Pcs.nodesController.cur_node_attr.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr>
-+              <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Node Attributes ({{#if Pcs.nodesController.cur_node_attr.length}}{{Pcs.nodesController.cur_node_attr.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites" id="node_attributes"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr>
-               <tr><td>
--                <div id="node_attributes">
-                   <table class="datatable">
-                     <tr><th>Attribute</th><th>Value</th><th>Remove</th></tr>
-                     {{#each attr in Pcs.nodesController.cur_node_attr}}
-@@ -268,14 +267,12 @@
-                       <td><button type="button" onclick="add_node_attr('#new_node_attr_col');" name="add">Add</button></td>
-                     </tr>
-                   </table>
--                </div>
-               </td>
-               </tr>
-             </table>
-             <table style="clear:left;float:left;margin-top:25px;">
--              <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Fence Levels ({{#if Pcs.nodesController.cur_node_fence_levels.length}}{{Pcs.nodesController.cur_node_fence_levels.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr>
-+              <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Fence Levels ({{#if Pcs.nodesController.cur_node_fence_levels.length}}{{Pcs.nodesController.cur_node_fence_levels.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites" id="fence_levels"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr>
-               <tr><td>
--                <div id="fencelevels">
-                   <table class="datatable">
-                     <tr><th>Level</th><th>Fence Devices</th><th>Remove</th></tr>
-                     {{#each Pcs.nodesController.cur_node_fence_levels}}
-@@ -301,13 +298,16 @@
-                       <td><button type="button" onclick="add_remove_fence_level($(this).parent());" name="add">Add</button></td>
-                     </tr>
-                   </table>
--                </div>
-               </td>
-               </tr>
-             </table>
-             {{#if Pcs.nodesController.utilization_support}}
-             <table style="clear:left; float:left; margin-top: 25px;"><tr><td>
--            {{utilization-table entity=Pcs.nodesController.cur_node utilization=Pcs.nodesController.cur_node.utilization}}
-+            {{utilization-table
-+                entity=Pcs.nodesController.cur_node
-+                utilization=Pcs.nodesController.cur_node.utilization
-+                table_id="node_utilization_attributes"
-+            }}
-             </td></tr></table>
-             {{/if}}
-     </div>
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1346852-02-web-UI-fix-error-when-removing-resources-takes-long.patch b/SOURCES/bz1346852-02-web-UI-fix-error-when-removing-resources-takes-long.patch
deleted file mode 100644
index 4fbbc44..0000000
--- a/SOURCES/bz1346852-02-web-UI-fix-error-when-removing-resources-takes-long.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From 06cef95211b84150fece67970426267849e74a36 Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Fri, 22 Jul 2016 08:34:21 +0200
-Subject: [PATCH] web UI: fix error when removing resources takes long
-
----
- pcsd/public/js/pcsd.js | 24 +++++++++++++++++-------
- 1 file changed, 17 insertions(+), 7 deletions(-)
-
-diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
-index 6c88888..e763482 100644
---- a/pcsd/public/js/pcsd.js
-+++ b/pcsd/public/js/pcsd.js
-@@ -1262,7 +1262,9 @@ function remove_nodes(ids, force) {
- }
- 
- function remove_resource(ids, force) {
--  var data = {};
-+  var data = {
-+    no_error_if_not_exists: true
-+  };
-   if (force) {
-     data["force"] = force;
-   }
-@@ -1287,12 +1289,20 @@ function remove_resource(ids, force) {
-       Pcs.update();
-     },
-     error: function (xhr, status, error) {
--      error = $.trim(error)
--      var message = "Unable to remove resources (" + error + ")";
--      if (
--        (xhr.responseText.substring(0,6) == "Error:") || ("Forbidden" == error)
--      ) {
--        message += "\n\n" + xhr.responseText.replace("--force", "'Enforce removal'");
-+      error = $.trim(error);
-+      var message = "";
-+      if (status == "timeout" || error == "timeout") {
-+        message = "Operation takes longer to complete than expected.";
-+      } else {
-+        message = "Unable to remove resources (" + error + ")";
-+        if (
-+          (xhr.responseText.substring(0, 6) == "Error:") ||
-+          ("Forbidden" == error)
-+        ) {
-+          message += "\n\n" + xhr.responseText.replace(
-+            "--force", "'Enforce removal'"
-+          );
-+        }
-       }
-       alert(message);
-       $("#dialog_verify_remove_resources.ui-dialog-content").each(
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1346852-03-web-UI-correct-handling-of-timeout-when-removing-mul.patch b/SOURCES/bz1346852-03-web-UI-correct-handling-of-timeout-when-removing-mul.patch
deleted file mode 100644
index 2067df2..0000000
--- a/SOURCES/bz1346852-03-web-UI-correct-handling-of-timeout-when-removing-mul.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From 900da783cefaa9f8d81ac72bf90b532638ac297b Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Mon, 25 Jul 2016 15:30:49 +0200
-Subject: [PATCH] web UI: correct handling of timeout when removing multiple
- resources
-
-If there are only 2 nodes in cluster (timeout for request is 30 seconds per node)
-and removing resources takes longer then 1 minute, we have no other nodes to try
-remove resources so it will be returned to javascript as there was no response
-from cluster. Now we handle this stituation properly.
----
- pcsd/public/js/pcsd.js | 6 +++++-
- 1 file changed, 5 insertions(+), 1 deletion(-)
-
-diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
-index e763482..45da010 100644
---- a/pcsd/public/js/pcsd.js
-+++ b/pcsd/public/js/pcsd.js
-@@ -1291,7 +1291,11 @@ function remove_resource(ids, force) {
-     error: function (xhr, status, error) {
-       error = $.trim(error);
-       var message = "";
--      if (status == "timeout" || error == "timeout") {
-+      if (
-+        status == "timeout" ||
-+        error == "timeout" ||
-+        xhr.responseText == '{"noresponse":true}'
-+      ) {
-         message = "Operation takes longer to complete than expected.";
-       } else {
-         message = "Unable to remove resources (" + error + ")";
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1346852-04-fix-detecting-nonexisting-resources-in-pcsd.patch b/SOURCES/bz1346852-04-fix-detecting-nonexisting-resources-in-pcsd.patch
deleted file mode 100644
index 41eb6fc..0000000
--- a/SOURCES/bz1346852-04-fix-detecting-nonexisting-resources-in-pcsd.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From 2b843e7582ef3160a16094526727101180649448 Mon Sep 17 00:00:00 2001
-From: Radek Steiger <rsteiger@redhat.com>
-Date: Wed, 10 Aug 2016 09:56:28 +0200
-Subject: [PATCH] fix detecting nonexisting resources in pcsd
-
----
- pcsd/remote.rb | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/pcsd/remote.rb b/pcsd/remote.rb
-index 134ac5d..e467d0a 100644
---- a/pcsd/remote.rb
-+++ b/pcsd/remote.rb
-@@ -1584,7 +1584,7 @@ def remove_resource(params, request, auth_user)
-         out, err, retval = run_cmd(user, *(cmd + [resource]))
-         if retval != 0
-           unless (
--            (out + err).join('').include?(' does not exist.') and
-+            (out + err).join('').include?('unable to find a resource') and
-             no_error_if_not_exists
-           )
-             errors += "Unable to stop resource '#{resource}': #{err.join('')}"
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1348579-01-add-a-wrapper-for-holding-SELinux-conte.patch b/SOURCES/bz1348579-01-add-a-wrapper-for-holding-SELinux-conte.patch
deleted file mode 100644
index 52d34b0..0000000
--- a/SOURCES/bz1348579-01-add-a-wrapper-for-holding-SELinux-conte.patch
+++ /dev/null
@@ -1,62 +0,0 @@
-From 0f305d7d54b40fe13b1ef2134701b5169fe79d65 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Thu, 30 Jun 2016 17:23:19 +0200
-Subject: [PATCH] add a wrapper for holding SELinux context when pcsd is
- started by systemd
-
----
- Makefile                 |  3 +++
- pcsd/pcsd.service        |  2 +-
- pcsd/pcsd.service-runner | 13 +++++++++++++
- 3 files changed, 17 insertions(+), 1 deletion(-)
- create mode 100644 pcsd/pcsd.service-runner
-
-diff --git a/Makefile b/Makefile
-index de216ce..f0a5d03 100644
---- a/Makefile
-+++ b/Makefile
-@@ -126,6 +126,9 @@ else
-   ifeq ($(IS_SYSTEMCTL),true)
- 	install -d ${DESTDIR}/${systemddir}/system/
- 	install -m 644 pcsd/pcsd.service ${DESTDIR}/${systemddir}/system/
-+# ${DESTDIR}${PREFIX}/lib/pcsd/pcsd holds the selinux context
-+	install -m 755 pcsd/pcsd.service-runner ${DESTDIR}${PREFIX}/lib/pcsd/pcsd
-+	rm ${DESTDIR}${PREFIX}/lib/pcsd/pcsd.service-runner
-   else
- 	install -m 755 -D pcsd/pcsd ${DESTDIR}/${initdir}/pcsd
-   endif
-diff --git a/pcsd/pcsd.service b/pcsd/pcsd.service
-index 075a3a6..e506f1b 100644
---- a/pcsd/pcsd.service
-+++ b/pcsd/pcsd.service
-@@ -4,7 +4,7 @@ Description=PCS GUI and remote configuration interface
- [Service]
- EnvironmentFile=/etc/sysconfig/pcsd
- Environment=GEM_HOME=/usr/lib/pcsd/vendor/bundle/ruby
--ExecStart=/usr/bin/ruby -C/var/lib/pcsd -I/usr/lib/pcsd -- /usr/lib/pcsd/ssl.rb > /dev/null &
-+ExecStart=/usr/lib/pcsd/pcsd > /dev/null &
- 
- [Install]
- WantedBy=multi-user.target
-diff --git a/pcsd/pcsd.service-runner b/pcsd/pcsd.service-runner
-new file mode 100644
-index 0000000..1949a68
---- /dev/null
-+++ b/pcsd/pcsd.service-runner
-@@ -0,0 +1,13 @@
-+#!/usr/bin/ruby
-+# this file is a pcsd runner callable from a systemd unit
-+# it also serves as a holder of a selinux context
-+
-+# add pcsd to the load path (ruby -I)
-+libdir = File.dirname(__FILE__)
-+$LOAD_PATH.unshift(libdir) unless $LOAD_PATH.include?(libdir)
-+
-+# change current directory (ruby -C)
-+Dir.chdir('/var/lib/pcsd')
-+
-+# import and run pcsd
-+require 'ssl'
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1348579-02-fix-traceback-when-stopping-pcsd-shortly-after-start.patch b/SOURCES/bz1348579-02-fix-traceback-when-stopping-pcsd-shortly-after-start.patch
deleted file mode 100644
index 3700d4d..0000000
--- a/SOURCES/bz1348579-02-fix-traceback-when-stopping-pcsd-shortly-after-start.patch
+++ /dev/null
@@ -1,104 +0,0 @@
-From 760028cca19c07dd56162453a4eb3d3b0de7f3af Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Tue, 19 Jul 2016 13:11:04 +0200
-Subject: [PATCH] fix traceback when stopping pcsd shortly after start
-
-- properly notify systemd that pcsd finished starting up
-- gracefully exit on SIGINT and SIGTERM
----
- pcsd/pcsd.service        |  1 +
- pcsd/pcsd.service-runner | 25 ++++++++++++++++++-------
- pcsd/ssl.rb              | 18 ++++++++++++++++++
- 3 files changed, 37 insertions(+), 7 deletions(-)
-
-diff --git a/pcsd/pcsd.service b/pcsd/pcsd.service
-index e506f1b..20bc9ab 100644
---- a/pcsd/pcsd.service
-+++ b/pcsd/pcsd.service
-@@ -5,6 +5,7 @@ Description=PCS GUI and remote configuration interface
- EnvironmentFile=/etc/sysconfig/pcsd
- Environment=GEM_HOME=/usr/lib/pcsd/vendor/bundle/ruby
- ExecStart=/usr/lib/pcsd/pcsd > /dev/null &
-+Type=notify
- 
- [Install]
- WantedBy=multi-user.target
-diff --git a/pcsd/pcsd.service-runner b/pcsd/pcsd.service-runner
-index 1949a68..883d290 100644
---- a/pcsd/pcsd.service-runner
-+++ b/pcsd/pcsd.service-runner
-@@ -2,12 +2,23 @@
- # this file is a pcsd runner callable from a systemd unit
- # it also serves as a holder of a selinux context
- 
--# add pcsd to the load path (ruby -I)
--libdir = File.dirname(__FILE__)
--$LOAD_PATH.unshift(libdir) unless $LOAD_PATH.include?(libdir)
-+begin
-+  # add pcsd to the load path (ruby -I)
-+  libdir = File.dirname(__FILE__)
-+  $LOAD_PATH.unshift(libdir) unless $LOAD_PATH.include?(libdir)
- 
--# change current directory (ruby -C)
--Dir.chdir('/var/lib/pcsd')
-+  # change current directory (ruby -C)
-+  Dir.chdir('/var/lib/pcsd')
- 
--# import and run pcsd
--require 'ssl'
-+  # import and run pcsd
-+  require 'ssl'
-+rescue SignalException => e
-+  if [Signal.list['INT'], Signal.list['TERM']].include?(e.signo)
-+    # gracefully exit on SIGINT and SIGTERM
-+    # pcsd sets up signal handlers later, this catches exceptions which occur
-+    # by recieving signals before the handlers have been set up.
-+    exit
-+  else
-+    raise
-+  end
-+end
-diff --git a/pcsd/ssl.rb b/pcsd/ssl.rb
-index f56c947..c00d8b3 100644
---- a/pcsd/ssl.rb
-+++ b/pcsd/ssl.rb
-@@ -3,6 +3,7 @@ require 'webrick'
- require 'webrick/https'
- require 'openssl'
- require 'rack'
-+require 'socket'
- 
- require 'bootstrap.rb'
- require 'pcs.rb'
-@@ -66,11 +67,28 @@ def run_server(server, webrick_options, secondary_addrs)
- 
-   $logger.info("Listening on #{primary_addr} port #{port}")
-   server.run(Sinatra::Application, webrick_options) { |server_instance|
-+    # configure ssl options
-     server_instance.ssl_context.ciphers = ciphers
-+    # set listening addresses
-     secondary_addrs.each { |addr|
-       $logger.info("Adding listener on #{addr} port #{port}")
-       server_instance.listen(addr, port)
-     }
-+    # notify systemd we are running
-+    if ISSYSTEMCTL
-+      socket_name = ENV['NOTIFY_SOCKET']
-+      if socket_name
-+        if socket_name.start_with?('@')
-+          # abstract namespace socket
-+          socket_name[0] = "\0"
-+        end
-+        $logger.info("Notifying systemd we are running (socket #{socket_name})")
-+        sd_socket = Socket.new(Socket::AF_UNIX, Socket::SOCK_DGRAM)
-+        sd_socket.connect(Socket.pack_sockaddr_un(socket_name))
-+        sd_socket.send('READY=1', 0)
-+        sd_socket.close()
-+      end
-+    end
-   }
- end
- 
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1349465-01-allow-to-specify-bash-completion-install-dir.patch b/SOURCES/bz1349465-01-allow-to-specify-bash-completion-install-dir.patch
deleted file mode 100644
index acc3601..0000000
--- a/SOURCES/bz1349465-01-allow-to-specify-bash-completion-install-dir.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From 160b8c4657356725befb02212c148c58da3ce7aa Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Fri, 1 Jul 2016 07:23:37 +0200
-Subject: [PATCH] allow to specify bash completion install dir
-
----
- Makefile | 7 ++++++-
- 1 file changed, 6 insertions(+), 1 deletion(-)
-
-diff --git a/Makefile b/Makefile
-index 5c3e9d6..8e845d6 100644
---- a/Makefile
-+++ b/Makefile
-@@ -76,11 +76,16 @@ ifndef install_settings
-   endif
- endif
- 
-+
-+ifndef BASH_COMPLETION_DIR
-+	BASH_COMPLETION_DIR=${DESTDIR}/etc/bash_completion.d
-+endif
-+
- install:
- 	$(PYTHON) setup.py install --root=$(or ${DESTDIR}, /) ${EXTRA_SETUP_OPTS}
- 	mkdir -p ${DESTDIR}${PREFIX}/sbin/
- 	mv ${DESTDIR}${PREFIX}/bin/pcs ${DESTDIR}${PREFIX}/sbin/pcs
--	install -D pcs/bash_completion.sh ${DESTDIR}/etc/bash_completion.d/pcs
-+	install -D pcs/bash_completion.sh ${BASH_COMPLETION_DIR}/pcs
- 	install -m644 -D pcs/pcs.8 ${DESTDIR}/${MANDIR}/man8/pcs.8
- ifeq ($(IS_DEBIAN),true)
-   ifeq ($(install_settings),true)
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1349465-02-install-bash-completion-with-standard-permissions.patch b/SOURCES/bz1349465-02-install-bash-completion-with-standard-permissions.patch
deleted file mode 100644
index ebf7c05..0000000
--- a/SOURCES/bz1349465-02-install-bash-completion-with-standard-permissions.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From 01e0ceba838c558c08b11c51646af6e8d26a699b Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Wed, 27 Jul 2016 16:09:01 +0200
-Subject: [PATCH] install bash completion with standard permissions
-
----
- Makefile | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/Makefile b/Makefile
-index cbbeb85..25fb87d 100644
---- a/Makefile
-+++ b/Makefile
-@@ -85,7 +85,7 @@ install:
- 	$(PYTHON) setup.py install --root=$(or ${DESTDIR}, /) ${EXTRA_SETUP_OPTS}
- 	mkdir -p ${DESTDIR}${PREFIX}/sbin/
- 	mv ${DESTDIR}${PREFIX}/bin/pcs ${DESTDIR}${PREFIX}/sbin/pcs
--	install -D pcs/bash_completion.sh ${BASH_COMPLETION_DIR}/pcs
-+	install -D -m644 pcs/bash_completion.sh ${BASH_COMPLETION_DIR}/pcs
- 	install -m644 -D pcs/pcs.8 ${DESTDIR}/${MANDIR}/man8/pcs.8
- ifeq ($(IS_DEBIAN),true)
-   ifeq ($(install_settings),true)
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1353607-01-tests-use-safe-node-names.patch b/SOURCES/bz1353607-01-tests-use-safe-node-names.patch
deleted file mode 100644
index 0b0238f..0000000
--- a/SOURCES/bz1353607-01-tests-use-safe-node-names.patch
+++ /dev/null
@@ -1,1623 +0,0 @@
-From 850473a59993e1a75c248a9b3a83284f568a4bf2 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Fri, 8 Jul 2016 15:14:05 +0200
-Subject: [PATCH] tests: use safe node names
-
----
- pcs/test/test_cluster.py | 466 +++++++++++++++++++++++------------------------
- 1 file changed, 233 insertions(+), 233 deletions(-)
-
-diff --git a/pcs/test/test_cluster.py b/pcs/test/test_cluster.py
-index 2c3e71b..8a245a2 100644
---- a/pcs/test/test_cluster.py
-+++ b/pcs/test/test_cluster.py
-@@ -106,7 +106,7 @@ class ClusterTest(unittest.TestCase, AssertPcsMixin):
-         self.assertTrue(output.startswith("\nUsage: pcs cluster setup..."))
-         self.assertEqual(1, returnVal)
- 
--        output, returnVal = pcs(temp_cib, "cluster setup cname rh7-1 rh7-2")
-+        output, returnVal = pcs(temp_cib, "cluster setup cname rh7-1.localhost rh7-2.localhost")
-         self.assertEqual(
-             "Error: A cluster name (--name <name>) is required to setup a cluster\n",
-             output
-@@ -116,22 +116,22 @@ class ClusterTest(unittest.TestCase, AssertPcsMixin):
-     def test_cluster_setup_hostnames_resolving(self):
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --corosync_conf={0} --cluster_conf={1} --name cname nonexistant-address"
-+            "cluster setup --local --corosync_conf={0} --cluster_conf={1} --name cname nonexistant-address.invalid"
-             .format(corosync_conf_tmp, cluster_conf_tmp)
-         )
-         ac(output, """\
- Error: Unable to resolve all hostnames, use --force to override
--Warning: Unable to resolve hostname: nonexistant-address
-+Warning: Unable to resolve hostname: nonexistant-address.invalid
- """)
-         self.assertEqual(1, returnVal)
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --corosync_conf={0} --cluster_conf={1} --name cname nonexistant-address --force"
-+            "cluster setup --local --corosync_conf={0} --cluster_conf={1} --name cname nonexistant-address.invalid --force"
-             .format(corosync_conf_tmp, cluster_conf_tmp)
-         )
-         ac(output, """\
--Warning: Unable to resolve hostname: nonexistant-address
-+Warning: Unable to resolve hostname: nonexistant-address.invalid
- """)
-         self.assertEqual(0, returnVal)
- 
-@@ -141,7 +141,7 @@ Warning: Unable to resolve hostname: nonexistant-address
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2"
-+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost"
-             .format(corosync_conf_tmp)
-         )
-         self.assertEqual("", output)
-@@ -156,12 +156,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- }
-@@ -183,7 +183,7 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --corosync_conf={0} --name cname rh7-2 rh7-3"
-+            "cluster setup --local --corosync_conf={0} --name cname rh7-2.localhost rh7-3.localhost"
-             .format(corosync_conf_tmp)
-         )
-         self.assertEqual("""\
-@@ -198,7 +198,7 @@ Error: {0} already exists, use --force to overwrite
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --force --local --corosync_conf={0} --name cname rh7-2 rh7-3"
-+            "cluster setup --force --local --corosync_conf={0} --name cname rh7-2.localhost rh7-3.localhost"
-             .format(corosync_conf_tmp)
-         )
-         self.assertEqual("", output)
-@@ -215,12 +215,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-3
-+        ring0_addr: rh7-3.localhost
-         nodeid: 2
-     }
- }
-@@ -243,7 +243,7 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2"
-+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost"
-             .format(cluster_conf_tmp)
-         )
-         self.assertEqual("", output)
-@@ -252,17 +252,17 @@ logging {
- <cluster config_version="9" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -283,7 +283,7 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --cluster_conf={0} --name cname rh7-2 rh7-3"
-+            "cluster setup --local --cluster_conf={0} --name cname rh7-2.localhost rh7-3.localhost"
-             .format(cluster_conf_tmp)
-         )
-         self.assertEqual("""\
-@@ -298,7 +298,7 @@ Error: {0} already exists, use --force to overwrite
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --force --local --cluster_conf={0} --name cname rh7-2 rh7-3"
-+            "cluster setup --force --local --cluster_conf={0} --name cname rh7-2.localhost rh7-3.localhost"
-             .format(cluster_conf_tmp)
-         )
-         self.assertEqual("", output)
-@@ -309,17 +309,17 @@ Error: {0} already exists, use --force to overwrite
- <cluster config_version="9" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-2" nodeid="1">
-+    <clusternode name="rh7-2.localhost" nodeid="1">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-3" nodeid="2">
-+    <clusternode name="rh7-3.localhost" nodeid="2">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-3"/>
-+          <device name="pcmk-redirect" port="rh7-3.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -344,7 +344,7 @@ Error: {0} already exists, use --force to overwrite
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2"
-+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost"
-             .format(corosync_conf_tmp)
-         )
-         self.assertEqual("", output)
-@@ -361,12 +361,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- }
-@@ -385,10 +385,10 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster localnode add --corosync_conf={0} rh7-3"
-+            "cluster localnode add --corosync_conf={0} rh7-3.localhost"
-             .format(corosync_conf_tmp)
-         )
--        self.assertEqual("rh7-3: successfully added!\n", output)
-+        self.assertEqual("rh7-3.localhost: successfully added!\n", output)
-         self.assertEqual(0, returnVal)
-         with open(corosync_conf_tmp) as f:
-             data = f.read()
-@@ -402,17 +402,17 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- 
-     node {
--        ring0_addr: rh7-3
-+        ring0_addr: rh7-3.localhost
-         nodeid: 3
-     }
- }
-@@ -430,11 +430,11 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster localnode remove --corosync_conf={0} rh7-3"
-+            "cluster localnode remove --corosync_conf={0} rh7-3.localhost"
-             .format(corosync_conf_tmp)
-         )
-         self.assertEqual(0, returnVal)
--        self.assertEqual("rh7-3: successfully removed!\n", output)
-+        self.assertEqual("rh7-3.localhost: successfully removed!\n", output)
-         with open(corosync_conf_tmp) as f:
-             data = f.read()
-             ac(data, """\
-@@ -447,12 +447,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- }
-@@ -471,10 +471,10 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster localnode add --corosync_conf={0} rh7-3,192.168.1.3"
-+            "cluster localnode add --corosync_conf={0} rh7-3.localhost,192.168.1.3"
-             .format(corosync_conf_tmp)
-         )
--        self.assertEqual("rh7-3,192.168.1.3: successfully added!\n", output)
-+        self.assertEqual("rh7-3.localhost,192.168.1.3: successfully added!\n", output)
-         self.assertEqual(0, returnVal)
-         with open(corosync_conf_tmp) as f:
-             data = f.read()
-@@ -488,17 +488,17 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- 
-     node {
--        ring0_addr: rh7-3
-+        ring0_addr: rh7-3.localhost
-         ring1_addr: 192.168.1.3
-         nodeid: 3
-     }
-@@ -517,11 +517,11 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster localnode remove --corosync_conf={0} rh7-2"
-+            "cluster localnode remove --corosync_conf={0} rh7-2.localhost"
-             .format(corosync_conf_tmp)
-         )
-         self.assertEqual(0, returnVal)
--        self.assertEqual("rh7-2: successfully removed!\n", output)
-+        self.assertEqual("rh7-2.localhost: successfully removed!\n", output)
-         with open(corosync_conf_tmp) as f:
-             data = f.read()
-             ac(data, """\
-@@ -534,12 +534,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-3
-+        ring0_addr: rh7-3.localhost
-         ring1_addr: 192.168.1.3
-         nodeid: 3
-     }
-@@ -559,11 +559,11 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster localnode remove --corosync_conf={0} rh7-3,192.168.1.3"
-+            "cluster localnode remove --corosync_conf={0} rh7-3.localhost,192.168.1.3"
-             .format(corosync_conf_tmp)
-         )
-         self.assertEqual(0, returnVal)
--        self.assertEqual("rh7-3,192.168.1.3: successfully removed!\n", output)
-+        self.assertEqual("rh7-3.localhost,192.168.1.3: successfully removed!\n", output)
-         with open(corosync_conf_tmp) as f:
-             data = f.read()
-             ac(data, """\
-@@ -576,7 +576,7 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- }
-@@ -601,7 +601,7 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --auto_tie_breaker=1"
-+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --auto_tie_breaker=1"
-             .format(corosync_conf_tmp)
-         )
-         self.assertEqual("", output)
-@@ -618,12 +618,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- }
-@@ -642,10 +642,10 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster localnode add --corosync_conf={0} rh7-3"
-+            "cluster localnode add --corosync_conf={0} rh7-3.localhost"
-             .format(corosync_conf_tmp)
-         )
--        self.assertEqual(output, "rh7-3: successfully added!\n")
-+        self.assertEqual(output, "rh7-3.localhost: successfully added!\n")
-         self.assertEqual(0, returnVal)
-         with open(corosync_conf_tmp) as f:
-             data = f.read()
-@@ -659,17 +659,17 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- 
-     node {
--        ring0_addr: rh7-3
-+        ring0_addr: rh7-3.localhost
-         nodeid: 3
-     }
- }
-@@ -688,10 +688,10 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster localnode remove --corosync_conf={0} rh7-3"
-+            "cluster localnode remove --corosync_conf={0} rh7-3.localhost"
-             .format(corosync_conf_tmp)
-         )
--        self.assertEqual("rh7-3: successfully removed!\n", output)
-+        self.assertEqual("rh7-3.localhost: successfully removed!\n", output)
-         self.assertEqual(0, returnVal)
-         with open(corosync_conf_tmp) as f:
-             data = f.read()
-@@ -705,12 +705,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- }
-@@ -734,7 +734,7 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 rh7-3"
-+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost rh7-3.localhost"
-             .format(corosync_conf_tmp)
-         )
-         self.assertEqual("", output)
-@@ -751,17 +751,17 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- 
-     node {
--        ring0_addr: rh7-3
-+        ring0_addr: rh7-3.localhost
-         nodeid: 3
-     }
- }
-@@ -784,7 +784,7 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --transport udp"
-+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --transport udp"
-             .format(corosync_conf_tmp)
-         )
-         self.assertEqual("", output)
-@@ -801,12 +801,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- }
-@@ -834,7 +834,7 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2"
-+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost"
-             .format(cluster_conf_tmp)
-         )
-         ac(output, "")
-@@ -845,17 +845,17 @@ logging {
- <cluster config_version="9" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -873,10 +873,10 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster localnode add --cluster_conf={0} rh7-3"
-+            "cluster localnode add --cluster_conf={0} rh7-3.localhost"
-             .format(cluster_conf_tmp)
-         )
--        ac(output, "rh7-3: successfully added!\n")
-+        ac(output, "rh7-3.localhost: successfully added!\n")
-         self.assertEqual(returnVal, 0)
-         with open(cluster_conf_tmp) as f:
-             data = f.read()
-@@ -884,24 +884,24 @@ logging {
- <cluster config_version="13" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-3" nodeid="3">
-+    <clusternode name="rh7-3.localhost" nodeid="3">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-3"/>
-+          <device name="pcmk-redirect" port="rh7-3.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -919,10 +919,10 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster localnode remove --cluster_conf={0} rh7-3"
-+            "cluster localnode remove --cluster_conf={0} rh7-3.localhost"
-             .format(cluster_conf_tmp)
-         )
--        ac(output, "rh7-3: successfully removed!\n")
-+        ac(output, "rh7-3.localhost: successfully removed!\n")
-         self.assertEqual(returnVal, 0)
- 
-         with open(cluster_conf_tmp) as f:
-@@ -931,17 +931,17 @@ logging {
- <cluster config_version="15" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -959,10 +959,10 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster localnode add --cluster_conf={0} rh7-3,192.168.1.3"
-+            "cluster localnode add --cluster_conf={0} rh7-3.localhost,192.168.1.3"
-             .format(cluster_conf_tmp)
-         )
--        ac(output, "rh7-3,192.168.1.3: successfully added!\n")
-+        ac(output, "rh7-3.localhost,192.168.1.3: successfully added!\n")
-         self.assertEqual(returnVal, 0)
- 
-         with open(cluster_conf_tmp) as f:
-@@ -971,25 +971,25 @@ logging {
- <cluster config_version="20" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-3" nodeid="3">
-+    <clusternode name="rh7-3.localhost" nodeid="3">
-       <altname name="192.168.1.3"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-3"/>
-+          <device name="pcmk-redirect" port="rh7-3.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -1007,10 +1007,10 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster localnode remove --cluster_conf={0} rh7-2"
-+            "cluster localnode remove --cluster_conf={0} rh7-2.localhost"
-             .format(cluster_conf_tmp)
-         )
--        ac(output, "rh7-2: successfully removed!\n")
-+        ac(output, "rh7-2.localhost: successfully removed!\n")
-         self.assertEqual(returnVal, 0)
- 
-         with open(cluster_conf_tmp) as f:
-@@ -1019,18 +1019,18 @@ logging {
- <cluster config_version="22" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-3" nodeid="3">
-+    <clusternode name="rh7-3.localhost" nodeid="3">
-       <altname name="192.168.1.3"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-3"/>
-+          <device name="pcmk-redirect" port="rh7-3.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -1048,10 +1048,10 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster localnode remove --cluster_conf={0} rh7-3,192.168.1.3"
-+            "cluster localnode remove --cluster_conf={0} rh7-3.localhost,192.168.1.3"
-             .format(cluster_conf_tmp)
-         )
--        ac(output, "rh7-3,192.168.1.3: successfully removed!\n")
-+        ac(output, "rh7-3.localhost,192.168.1.3: successfully removed!\n")
-         self.assertEqual(returnVal, 0)
- 
-         with open(cluster_conf_tmp) as f:
-@@ -1060,10 +1060,10 @@ logging {
- <cluster config_version="23" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -1086,7 +1086,7 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 rh7-3"
-+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost rh7-3.localhost"
-             .format(cluster_conf_tmp)
-         )
-         ac(output, "")
-@@ -1097,24 +1097,24 @@ logging {
- <cluster config_version="12" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-3" nodeid="3">
-+    <clusternode name="rh7-3.localhost" nodeid="3">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-3"/>
-+          <device name="pcmk-redirect" port="rh7-3.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -1137,7 +1137,7 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --transport udpu"
-+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --transport udpu"
-             .format(cluster_conf_tmp)
-         )
-         ac(output, """\
-@@ -1150,17 +1150,17 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
- <cluster config_version="9" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -1182,7 +1182,7 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --ipv6"
-+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --ipv6"
-             .format(corosync_conf_tmp)
-         )
-         self.assertEqual("", output)
-@@ -1200,12 +1200,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- }
-@@ -1228,7 +1228,7 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --ipv6"
-+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --ipv6"
-             .format(cluster_conf_tmp)
-         )
-         ac(output, """\
-@@ -1241,17 +1241,17 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters
- <cluster config_version="9" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -1272,14 +1272,14 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters
-             return
- 
-         o,r = pcs(
--            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr0 1.1.2.0"
-+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr0 1.1.2.0"
-             .format(corosync_conf_tmp)
-         )
-         assert r == 1
-         ac(o, "Error: --addr0 can only be used once\n")
- 
-         o,r = pcs(
--            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp"
-+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp"
-             .format(corosync_conf_tmp)
-         )
-         assert r == 1
-@@ -1289,7 +1289,7 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters
-         )
- 
-         o,r = pcs(
--            "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0"
-+            "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0"
-             .format(corosync_conf_tmp)
-         )
-         ac(o,"")
-@@ -1321,12 +1321,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- }
-@@ -1348,7 +1348,7 @@ logging {
-             return
- 
-         o,r = pcs(
--            "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9"
-+            "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9"
-             .format(corosync_conf_tmp)
-         )
-         ac(o,"")
-@@ -1380,12 +1380,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- }
-@@ -1407,7 +1407,7 @@ logging {
-             return
- 
-         o,r = pcs(
--            "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0"
-+            "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0"
-             .format(corosync_conf_tmp)
-         )
-         ac(o,"")
-@@ -1439,12 +1439,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- }
-@@ -1466,7 +1466,7 @@ logging {
-             return
- 
-         o,r = pcs(
--            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp"
-+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp"
-             .format(corosync_conf_tmp)
-         )
-         ac(o,"")
-@@ -1500,12 +1500,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- }
-@@ -1527,14 +1527,14 @@ logging {
-             return
- 
-         o,r = pcs(
--            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
-+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
-             .format(corosync_conf_tmp)
-         )
-         ac(o, "Error: using a RRP mode of 'active' is not supported or tested, use --force to override\n")
-         assert r == 1
- 
-         o,r = pcs(
--            "cluster setup --force --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
-+            "cluster setup --force --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
-             .format(corosync_conf_tmp)
-         )
-         ac(o, "Warning: using a RRP mode of 'active' is not supported or tested\n")
-@@ -1566,12 +1566,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- }
-@@ -1593,14 +1593,14 @@ logging {
-             return
- 
-         o,r = pcs(
--            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
-+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
-             .format(corosync_conf_tmp)
-         )
-         ac(o, "Error: using a RRP mode of 'active' is not supported or tested, use --force to override\n")
-         assert r == 1
- 
-         o,r = pcs(
--            "cluster setup --force --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
-+            "cluster setup --force --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
-             .format(corosync_conf_tmp)
-         )
-         ac(o, "Warning: using a RRP mode of 'active' is not supported or tested\n")
-@@ -1631,12 +1631,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- }
-@@ -1658,25 +1658,25 @@ logging {
-             return
- 
-         o,r = pcs(
--            "cluster setup --local --corosync_conf={0} --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2,192.168.99.3"
-+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost,192.168.99.2,192.168.99.3"
-             .format(corosync_conf_tmp)
-         )
--        ac(o,"Error: You cannot specify more than two addresses for a node: rh7-2,192.168.99.2,192.168.99.3\n")
-+        ac(o,"Error: You cannot specify more than two addresses for a node: rh7-2.localhost,192.168.99.2,192.168.99.3\n")
-         assert r == 1
- 
-         o,r = pcs(
--            "cluster setup --local --corosync_conf={0} --name cname rh7-1,192.168.99.1 rh7-2"
-+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost"
-             .format(corosync_conf_tmp)
-         )
-         ac(o,"Error: if one node is configured for RRP, all nodes must be configured for RRP\n")
-         assert r == 1
- 
--        o,r = pcs("cluster setup --force --local --name test99 rh7-1 rh7-2 --addr0 1.1.1.1")
-+        o,r = pcs("cluster setup --force --local --name test99 rh7-1.localhost rh7-2.localhost --addr0 1.1.1.1")
-         ac(o,"Error: --addr0 and --addr1 can only be used with --transport=udp\n")
-         assert r == 1
- 
-         o,r = pcs(
--            "cluster setup --local --corosync_conf={0} --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2"
-+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost,192.168.99.2"
-             .format(corosync_conf_tmp)
-         )
-         ac(o,"")
-@@ -1694,13 +1694,13 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         ring1_addr: 192.168.99.1
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         ring1_addr: 192.168.99.2
-         nodeid: 2
-     }
-@@ -1723,49 +1723,49 @@ logging {
-             return
- 
-         o,r = pcs(
--            "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --wait_for_all=2"
-+            "cluster setup --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --wait_for_all=2"
-             .format(corosync_conf_tmp)
-         )
-         ac(o, "Error: '2' is not a valid --wait_for_all value, use 0, 1\n")
-         assert r == 1
- 
-         o,r = pcs(
--            "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --wait_for_all=2"
-+            "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --wait_for_all=2"
-             .format(corosync_conf_tmp)
-         )
-         ac(o, "Error: '2' is not a valid --wait_for_all value, use 0, 1\n")
-         assert r == 1
- 
-         o,r = pcs(
--            "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --auto_tie_breaker=2"
-+            "cluster setup --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --auto_tie_breaker=2"
-             .format(corosync_conf_tmp)
-         )
-         ac(o, "Error: '2' is not a valid --auto_tie_breaker value, use 0, 1\n")
-         assert r == 1
- 
-         o,r = pcs(
--            "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --auto_tie_breaker=2"
-+            "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --auto_tie_breaker=2"
-             .format(corosync_conf_tmp)
-         )
-         ac(o, "Error: '2' is not a valid --auto_tie_breaker value, use 0, 1\n")
-         assert r == 1
- 
-         o,r = pcs(
--            "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --last_man_standing=2"
-+            "cluster setup --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --last_man_standing=2"
-             .format(corosync_conf_tmp)
-         )
-         ac(o, "Error: '2' is not a valid --last_man_standing value, use 0, 1\n")
-         assert r == 1
- 
-         o,r = pcs(
--            "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --last_man_standing=2"
-+            "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --last_man_standing=2"
-             .format(corosync_conf_tmp)
-         )
-         ac(o, "Error: '2' is not a valid --last_man_standing value, use 0, 1\n")
-         assert r == 1
- 
-         o,r = pcs(
--            "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --wait_for_all=1 --auto_tie_breaker=1 --last_man_standing=1 --last_man_standing_window=12000"
-+            "cluster setup --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --wait_for_all=1 --auto_tie_breaker=1 --last_man_standing=1 --last_man_standing_window=12000"
-             .format(corosync_conf_tmp)
-         )
-         ac(o,"")
-@@ -1782,12 +1782,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- }
-@@ -1813,14 +1813,14 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr0 1.1.2.0"
-+            "cluster setup --local --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr0 1.1.2.0"
-         )
-         ac(output, "Error: --addr0 can only be used once\n")
-         self.assertEqual(returnVal, 1)
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp"
-+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp"
-             .format(cluster_conf_tmp)
-         )
-         ac(output, """\
-@@ -1831,7 +1831,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0"
-+            "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0"
-             .format(cluster_conf_tmp)
-         )
-         ac(output, "")
-@@ -1843,19 +1843,19 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
- <cluster config_version="14" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <altname name="1.1.2.0"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <altname name="1.1.2.0"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -1881,7 +1881,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9"
-+            "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9"
-             .format(cluster_conf_tmp)
-         )
-         ac(output, "")
-@@ -1893,19 +1893,19 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
- <cluster config_version="14" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <altname name="1.1.2.0"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <altname name="1.1.2.0"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -1931,7 +1931,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0"
-+            "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0"
-             .format(cluster_conf_tmp)
-         )
-         ac(output, "")
-@@ -1943,19 +1943,19 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
- <cluster config_version="14" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <altname name="1.1.2.0"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <altname name="1.1.2.0"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -1981,7 +1981,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp"
-+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp"
-             .format(cluster_conf_tmp)
-         )
-         ac(output, "")
-@@ -1993,19 +1993,19 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
- <cluster config_version="14" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <altname name="1.1.2.0"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <altname name="1.1.2.0"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -2031,7 +2031,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
-+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
-             .format(cluster_conf_tmp)
-         )
-         ac(
-@@ -2042,7 +2042,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --force --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
-+            "cluster setup --force --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
-             .format(cluster_conf_tmp)
-         )
-         ac(
-@@ -2056,19 +2056,19 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
- <cluster config_version="14" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <altname name="1.1.2.0"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <altname name="1.1.2.0"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -2094,7 +2094,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
-+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
-             .format(cluster_conf_tmp)
-         )
-         ac(output, """\
-@@ -2105,7 +2105,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --force --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
-+            "cluster setup --force --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
-             .format(cluster_conf_tmp)
-         )
-         ac(output, """\
-@@ -2119,19 +2119,19 @@ Warning: using a RRP mode of 'active' is not supported or tested
- <cluster config_version="12" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <altname name="1.1.2.0"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <altname name="1.1.2.0"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -2154,17 +2154,17 @@ Warning: using a RRP mode of 'active' is not supported or tested
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --cluster_conf={0} --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2,192.168.99.3"
-+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost,192.168.99.2,192.168.99.3"
-             .format(cluster_conf_tmp)
-         )
-         ac(output, """\
--Error: You cannot specify more than two addresses for a node: rh7-2,192.168.99.2,192.168.99.3
-+Error: You cannot specify more than two addresses for a node: rh7-2.localhost,192.168.99.2,192.168.99.3
- """)
-         self.assertEqual(returnVal, 1)
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --name cname rh7-1,192.168.99.1 rh7-2"
-+            "cluster setup --local --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost"
-         )
-         ac(output, """\
- Error: if one node is configured for RRP, all nodes must be configured for RRP
-@@ -2173,7 +2173,7 @@ Error: if one node is configured for RRP, all nodes must be configured for RRP
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --name test99 rh7-1 rh7-2 --addr0 1.1.1.1 --transport=udpu"
-+            "cluster setup --local --name test99 rh7-1.localhost rh7-2.localhost --addr0 1.1.1.1 --transport=udpu"
-         )
-         ac(output, """\
- Error: --addr0 and --addr1 can only be used with --transport=udp
-@@ -2183,7 +2183,7 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --cluster_conf={0} --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2"
-+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost,192.168.99.2"
-             .format(cluster_conf_tmp)
-         )
-         ac(output, "")
-@@ -2194,19 +2194,19 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
- <cluster config_version="12" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <altname name="192.168.99.1"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <altname name="192.168.99.2"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -2231,19 +2231,19 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
- <cluster config_version="12" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <altname name="1.1.2.0"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <altname name="1.1.2.0"/>
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -2262,7 +2262,7 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode passive --broadcast0 --transport udp"
-+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode passive --broadcast0 --transport udp"
-             .format(cluster_conf_tmp)
-         )
-         ac(output, """\
-@@ -2277,7 +2277,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --broadcast0 --transport udp"
-+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --broadcast0 --transport udp"
-             .format(cluster_conf_tmp)
-         )
-         ac(output, """\
-@@ -2294,7 +2294,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --cluster_conf={0} --name test99 rh7-1 rh7-2 --wait_for_all=2 --auto_tie_breaker=3 --last_man_standing=4 --last_man_standing_window=5"
-+            "cluster setup --local --cluster_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --wait_for_all=2 --auto_tie_breaker=3 --last_man_standing=4 --last_man_standing_window=5"
-             .format(cluster_conf_tmp)
-         )
-         ac(output, """\
-@@ -2310,17 +2310,17 @@ Warning: --last_man_standing_window ignored as it is not supported on CMAN clust
- <cluster config_version="9" name="test99">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -2341,7 +2341,7 @@ Warning: --last_man_standing_window ignored as it is not supported on CMAN clust
-             return
- 
-         o,r = pcs(
--            "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005"
-+            "cluster setup --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005"
-             .format(corosync_conf_tmp)
-         )
-         ac(o,"")
-@@ -2364,12 +2364,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- }
-@@ -2392,7 +2392,7 @@ logging {
- 
-         output, returnVal = pcs(
-             temp_cib,
--            "cluster setup --local --cluster_conf={0} --name test99 rh7-1 rh7-2 --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005"
-+            "cluster setup --local --cluster_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005"
-             .format(cluster_conf_tmp)
-         )
-         ac(output, """\
-@@ -2405,17 +2405,17 @@ Warning: --token_coefficient ignored as it is not supported on CMAN clusters
- <cluster config_version="10" name="test99">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
-@@ -2583,12 +2583,12 @@ Warning: --token_coefficient ignored as it is not supported on CMAN clusters
-             return
- 
-         self.assert_pcs_fail(
--            "cluster setup --local --name cname rh7-1 rh7-2 --transport=unknown",
-+            "cluster setup --local --name cname rh7-1.localhost rh7-2.localhost --transport=unknown",
-             "Error: 'unknown' is not a valid transport value, use udp, udpu, use --force to override\n"
-         )
- 
-         self.assert_pcs_success(
--            "cluster setup --local --name cname rh7-1 rh7-2 --transport=unknown --force",
-+            "cluster setup --local --name cname rh7-1.localhost rh7-2.localhost --transport=unknown --force",
-             "Warning: 'unknown' is not a valid transport value, use udp, udpu\n"
-         )
-         with open(corosync_conf_tmp) as f:
-@@ -2603,12 +2603,12 @@ totem {
- 
- nodelist {
-     node {
--        ring0_addr: rh7-1
-+        ring0_addr: rh7-1.localhost
-         nodeid: 1
-     }
- 
-     node {
--        ring0_addr: rh7-2
-+        ring0_addr: rh7-2.localhost
-         nodeid: 2
-     }
- }
-@@ -2630,12 +2630,12 @@ logging {
-             return
- 
-         self.assert_pcs_fail(
--            "cluster setup --local --name cname rh7-1 rh7-2 --transport=rdma",
-+            "cluster setup --local --name cname rh7-1.localhost rh7-2.localhost --transport=rdma",
-             "Error: 'rdma' is not a valid transport value, use udp, udpu, use --force to override\n"
-         )
- 
-         self.assert_pcs_success(
--            "cluster setup --local --name cname rh7-1 rh7-2 --transport=rdma --force",
-+            "cluster setup --local --name cname rh7-1.localhost rh7-2.localhost --transport=rdma --force",
-             "Warning: 'rdma' is not a valid transport value, use udp, udpu\n"
-         )
-         with open(cluster_conf_tmp) as f:
-@@ -2644,17 +2644,17 @@ logging {
- <cluster config_version="9" name="cname">
-   <fence_daemon/>
-   <clusternodes>
--    <clusternode name="rh7-1" nodeid="1">
-+    <clusternode name="rh7-1.localhost" nodeid="1">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-1"/>
-+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--    <clusternode name="rh7-2" nodeid="2">
-+    <clusternode name="rh7-2.localhost" nodeid="2">
-       <fence>
-         <method name="pcmk-method">
--          <device name="pcmk-redirect" port="rh7-2"/>
-+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
-         </method>
-       </fence>
-     </clusternode>
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1354498-01-handle-exceptions-when-waiting-for-response-from-user.patch b/SOURCES/bz1354498-01-handle-exceptions-when-waiting-for-response-from-user.patch
deleted file mode 100644
index 73bcba3..0000000
--- a/SOURCES/bz1354498-01-handle-exceptions-when-waiting-for-response-from-user.patch
+++ /dev/null
@@ -1,47 +0,0 @@
-From 35d19fd293a758c0696410a490daa349bfcc9e21 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Mon, 8 Aug 2016 15:05:51 +0200
-Subject: [PATCH] handle exceptions when waiting for response from user
-
----
- pcs/utils.py | 22 ++++++++++++++++------
- 1 file changed, 16 insertions(+), 6 deletions(-)
-
-diff --git a/pcs/utils.py b/pcs/utils.py
-index 8b2cf7c..53cc0b0 100644
---- a/pcs/utils.py
-+++ b/pcs/utils.py
-@@ -1801,14 +1801,24 @@ def get_terminal_input(message=None):
-     if message:
-         sys.stdout.write(message)
-         sys.stdout.flush()
--    if PYTHON2:
--        return raw_input("")
--    else:
--        return input("")
-+    try:
-+        if PYTHON2:
-+            return raw_input("")
-+        else:
-+            return input("")
-+    except EOFError:
-+        return ""
-+    except KeyboardInterrupt:
-+        print("Interrupted")
-+        sys.exit(1)
- 
- def get_terminal_password(message="Password: "):
--    if sys.stdout.isatty():
--        return getpass.getpass(message)
-+    if sys.stdin.isatty():
-+        try:
-+            return getpass.getpass(message)
-+        except KeyboardInterrupt:
-+            print("Interrupted")
-+            sys.exit(1)
-     else:
-         return get_terminal_input(message)
- 
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1357945-01-add-support-for-clufter-s-dist-parameter.patch b/SOURCES/bz1357945-01-add-support-for-clufter-s-dist-parameter.patch
deleted file mode 100644
index a2beae5..0000000
--- a/SOURCES/bz1357945-01-add-support-for-clufter-s-dist-parameter.patch
+++ /dev/null
@@ -1,253 +0,0 @@
-From 4ffe5f795247be1f1a14100721bf09d54c904a94 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Wed, 20 Jul 2016 16:42:37 +0200
-Subject: [PATCH] add support for clufter's 'dist' parameter
-
----
- pcs/config.py | 69 ++++++++++++++++++++++++++++++++++++++++++++++++-----------
- pcs/pcs.8     | 10 ++++-----
- pcs/usage.py  | 21 ++++++++++++------
- 3 files changed, 76 insertions(+), 24 deletions(-)
-
-diff --git a/pcs/config.py b/pcs/config.py
-index 9119c3c..e410a5a 100644
---- a/pcs/config.py
-+++ b/pcs/config.py
-@@ -18,8 +18,10 @@ import logging
- import pwd
- import grp
- import time
-+import platform
- 
- try:
-+    import clufter.facts
-     import clufter.format_manager
-     import clufter.filter_manager
-     import clufter.command_manager
-@@ -555,6 +557,7 @@ def config_import_cman(argv):
-     cluster_conf = settings.cluster_conf_file
-     dry_run_output = None
-     output_format = "cluster.conf" if utils.is_rhel6() else "corosync.conf"
-+    dist = None
-     invalid_args = False
-     for arg in argv:
-         if "=" in arg:
-@@ -571,6 +574,8 @@ def config_import_cman(argv):
-                     output_format = value
-                 else:
-                     invalid_args = True
-+            elif name == "dist":
-+                dist = value
-             else:
-                 invalid_args = True
-         else:
-@@ -588,12 +593,34 @@ def config_import_cman(argv):
-     force = "--force" in utils.pcs_options
-     interactive = "--interactive" in utils.pcs_options
- 
-+    if dist is not None:
-+        if output_format == "cluster.conf":
-+            if not clufter.facts.cluster_pcs_flatiron("linux", dist.split(",")):
-+                utils.err("dist does not match output-format")
-+        elif output_format == "corosync.conf":
-+            if not clufter.facts.cluster_pcs_needle("linux", dist.split(",")):
-+                utils.err("dist does not match output-format")
-+    elif (
-+        (output_format == "cluster.conf" and utils.is_rhel6())
-+        or
-+        (output_format == "corosync.conf" and not utils.is_rhel6())
-+    ):
-+        dist = ",".join(platform.linux_distribution(full_distribution_name=0))
-+    elif output_format == "cluster.conf":
-+        dist = "redhat,6.7,Santiago"
-+    elif output_format == "corosync.conf":
-+        dist = "redhat,7.1,Maipo"
-+    else:
-+        # for output-format=pcs-command[-verbose]
-+        dist = ",".join(platform.linux_distribution(full_distribution_name=0))
-+
-     clufter_args = {
-         "input": str(cluster_conf),
-         "cib": {"passin": "bytestring"},
-         "nocheck": force,
-         "batch": True,
-         "sys": "linux",
-+        "dist": dist,
-         # Make it work on RHEL6 as well for sure
-         "color": "always" if sys.stdout.isatty() else "never"
-     }
-@@ -606,11 +633,9 @@ def config_import_cman(argv):
-         logging.getLogger("clufter").setLevel(logging.DEBUG)
-     if output_format == "cluster.conf":
-         clufter_args["ccs_pcmk"] = {"passin": "bytestring"}
--        clufter_args["dist"] = "redhat,6.7,Santiago"
-         cmd_name = "ccs2pcs-flatiron"
-     elif output_format == "corosync.conf":
-         clufter_args["coro"] = {"passin": "struct"}
--        clufter_args["dist"] = "redhat,7.1,Maipo"
-         cmd_name = "ccs2pcs-needle"
-     elif output_format in ("pcs-commands", "pcs-commands-verbose"):
-         clufter_args["output"] = {"passin": "bytestring"}
-@@ -624,7 +649,15 @@ def config_import_cman(argv):
-             clufter_args["text_width"] = "-1"
-             clufter_args["silent"] = False
-             clufter_args["noguidance"] = False
--        cmd_name = "ccs2pcscmd-flatiron"
-+        if clufter.facts.cluster_pcs_flatiron("linux", dist.split(",")):
-+            cmd_name = "ccs2pcscmd-flatiron"
-+        elif clufter.facts.cluster_pcs_needle("linux", dist.split(",")):
-+            cmd_name = "ccs2pcscmd-needle"
-+        else:
-+            utils.err(
-+                "unrecognized dist, try something recognized"
-+                + " (e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty)"
-+            )
-     clufter_args_obj = type(str("ClufterOptions"), (object, ), clufter_args)
- 
-     # run convertor
-@@ -737,29 +770,36 @@ def config_export_pcs_commands(argv, verbose=False):
-     interactive = "--interactive" in utils.pcs_options
-     invalid_args = False
-     output_file = None
-+    dist = None
-     for arg in argv:
-         if "=" in arg:
-             name, value = arg.split("=", 1)
-             if name == "output":
-                 output_file = value
-+            elif name == "dist":
-+                dist = value
-             else:
-                 invalid_args = True
-         else:
-             invalid_args = True
--    if invalid_args or not output_file:
-+    # check options
-+    if invalid_args:
-         usage.config(["export", "pcs-commands"])
-         sys.exit(1)
-+    # complete optional options
-+    if dist is None:
-+        dist = ",".join(platform.linux_distribution(full_distribution_name=0))
- 
-     # prepare convertor options
-     clufter_args = {
-         "nocheck": force,
-         "batch": True,
-         "sys": "linux",
-+        "dist": dist,
-         # Make it work on RHEL6 as well for sure
-         "color": "always" if sys.stdout.isatty() else "never",
-         "coro": settings.corosync_conf_file,
-         "ccs": settings.cluster_conf_file,
--        "output": {"passin": "bytestring"},
-         "start_wait": "60",
-         "tmp_cib": "tmp-cib.xml",
-         "force": force,
-@@ -767,6 +807,10 @@ def config_export_pcs_commands(argv, verbose=False):
-         "silent": True,
-         "noguidance": True,
-     }
-+    if output_file:
-+        clufter_args["output"] = {"passin": "bytestring"}
-+    else:
-+        clufter_args["output"] = "-"
-     if interactive:
-         if "EDITOR" not in os.environ:
-             utils.err("$EDITOR environment variable is not set")
-@@ -791,13 +835,14 @@ def config_export_pcs_commands(argv, verbose=False):
-         "Error: unable to export cluster configuration"
-     )
- 
--    # save commands
--    ok, message = utils.write_file(
--        output_file,
--        clufter_args_obj.output["passout"]
--    )
--    if not ok:
--        utils.err(message)
-+    # save commands if not printed to stdout by clufter
-+    if output_file:
-+        ok, message = utils.write_file(
-+            output_file,
-+            clufter_args_obj.output["passout"]
-+        )
-+        if not ok:
-+            utils.err(message)
- 
- def run_clufter(cmd_name, cmd_args, debug, force, err_prefix):
-     try:
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index a26c94b..66ffb8c 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -624,14 +624,14 @@ Show specified configuration checkpoint.
- checkpoint restore <checkpoint_number>
- Restore cluster configuration to specified checkpoint.
- .TP
--import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] [output\-format=corosync.conf|cluster.conf]
--Converts CMAN cluster configuration to Pacemaker cluster configuration.  Converted configuration will be saved to 'output' file.  To send the configuration to the cluster nodes the 'pcs config restore' command can be used.  If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually.  If no input is specified /etc/cluster/cluster.conf will be used.  You can force to create output containing either cluster.conf or corosync.conf using the output-format option.
-+import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] [output\-format=corosync.conf|cluster.conf] [dist=<dist>]
-+Converts CMAN cluster configuration to Pacemaker cluster configuration.  Converted configuration will be saved to 'output' file.  To send the configuration to the cluster nodes the 'pcs config restore' command can be used.  If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually.  If no input is specified /etc/cluster/cluster.conf will be used.  You can force to create output containing either cluster.conf or corosync.conf using the output-format option.  Optionally you can specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  If 'dist' is not specified, it defaults to this node's version if that matches output-format, otherwise redhat,6.7 is used for cluster.conf and redhat,7.1 is used for corosync.conf.
- .TP
--import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] output\-format=pcs-commands|pcs-commands-verbose
-+import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] output\-format=pcs-commands|pcs-commands-verbose [dist=<dist>]
- Converts CMAN cluster configuration to a list of pcs commands which recreates the same cluster as Pacemaker cluster when executed.  Commands will be saved to 'output' file.  For other options see above.
- .TP
--export pcs\-commands|pcs\-commands\-verbose output=<filename>
--Creates a list of pcs commands which upon execution recreates the current cluster running on this node.  Commands will be saved to 'output' file.  Use pcs\-commands to get a simple list of commands, whereas pcs\-commands\-verbose creates a list including comments and debug messages.
-+export pcs\-commands|pcs\-commands\-verbose output=<filename> [dist=<dist>]
-+Creates a list of pcs commands which upon execution recreates the current cluster running on this node.  Commands will be saved to 'output' file or written to stdout if 'output' is not specified.  Use pcs\-commands to get a simple list of commands, whereas pcs\-commands\-verbose creates a list including comments and debug messages.  Optionally specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  If 'dist' is not specified, it defaults to this node's version.
- .SS "pcsd"
- .TP
- certkey <certificate file> <key file>
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 0605cd7..0474324 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -1173,7 +1173,7 @@ Commands:
-         Restore cluster configuration to specified checkpoint.
- 
-     import-cman output=<filename> [input=<filename>] [--interactive]
--            [output-format=corosync.conf|cluster.conf]
-+            [output-format=corosync.conf|cluster.conf] [dist=<dist>]
-         Converts CMAN cluster configuration to Pacemaker cluster configuration.
-         Converted configuration will be saved to 'output' file.  To send
-         the configuration to the cluster nodes the 'pcs config restore'
-@@ -1181,20 +1181,27 @@ Commands:
-         prompted to solve incompatibilities manually.  If no input is specified
-         /etc/cluster/cluster.conf will be used.  You can force to create output
-         containing either cluster.conf or corosync.conf using the output-format
--        option.
-+        option.  Optionally you can specify output version by setting 'dist'
-+        option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.
-+        If 'dist' is not specified, it defaults to this nodei's version if that
-+        matches output-format, otherwise redhat,6.7 is used for cluster.conf
-+        and redhat,7.1 is used for corosync.conf.
- 
-     import-cman output=<filename> [input=<filename>] [--interactive]
--            output-format=pcs-commands|pcs-commands-verbose
-+            output-format=pcs-commands|pcs-commands-verbose [dist=<dist>]
-         Converts CMAN cluster configuration to a list of pcs commands which
-         recreates the same cluster as Pacemaker cluster when executed.  Commands
-         will be saved to 'output' file.  For other options see above.
- 
--    export pcs-commands|pcs-commands-verbose output=<filename>
-+    export pcs-commands|pcs-commands-verbose [output=<filename>] [dist=<dist>]
-         Creates a list of pcs commands which upon execution recreates
-         the current cluster running on this node.  Commands will be saved
--        to 'output' file.  Use pcs-commands to get a simple list of commands,
--        whereas pcs-commands-verbose creates a list including comments and debug
--        messages.
-+        to 'output' file or written to stdout if 'output' is not specified.  Use
-+        pcs-commands to get a simple list of commands, whereas
-+        pcs-commands-verbose creates a list including comments and debug
-+        messages.  Optionally specify output version by setting 'dist' option
-+        e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  If 'dist'
-+        is not specified, it defaults to this node's version.
- """
-     if pout:
-         print(sub_usage(args, output))
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1357945-02-doc-fixes-regarding-clufter.patch b/SOURCES/bz1357945-02-doc-fixes-regarding-clufter.patch
deleted file mode 100644
index aad1e01..0000000
--- a/SOURCES/bz1357945-02-doc-fixes-regarding-clufter.patch
+++ /dev/null
@@ -1,76 +0,0 @@
-From 4d8b5102f6504b2faf8219e799244f37dde2db10 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Wed, 3 Aug 2016 14:55:31 +0200
-Subject: [PATCH] doc: fixes regarding clufter
-
----
- pcs/pcs.8    |  8 ++++----
- pcs/usage.py | 15 +++++++++------
- 2 files changed, 13 insertions(+), 10 deletions(-)
-
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 0e8e967..09c0235 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -625,13 +625,13 @@ checkpoint restore <checkpoint_number>
- Restore cluster configuration to specified checkpoint.
- .TP
- import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] [output\-format=corosync.conf|cluster.conf] [dist=<dist>]
--Converts CMAN cluster configuration to Pacemaker cluster configuration.  Converted configuration will be saved to 'output' file.  To send the configuration to the cluster nodes the 'pcs config restore' command can be used.  If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually.  If no input is specified /etc/cluster/cluster.conf will be used.  You can force to create output containing either cluster.conf or corosync.conf using the output-format option.  Optionally you can specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  If 'dist' is not specified, it defaults to this node's version if that matches output-format, otherwise redhat,6.7 is used for cluster.conf and redhat,7.1 is used for corosync.conf.
-+Converts CMAN cluster configuration to Pacemaker cluster configuration.  Converted configuration will be saved to 'output' file.  To send the configuration to the cluster nodes the 'pcs config restore' command can be used.  If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually.  If no input is specified /etc/cluster/cluster.conf will be used.  You can force to create output containing either cluster.conf or corosync.conf using the output-format option.  Optionally you can specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  You can get the list of supported dist values by running the "clufter \fB\-\-list-dists\fR" command.  If 'dist' is not specified, it defaults to this node's version if that matches output-format, otherwise redhat,6.7 is used for cluster.conf and redhat,7.1 is used for corosync.conf.
- .TP
- import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] output\-format=pcs-commands|pcs-commands-verbose [dist=<dist>]
- Converts CMAN cluster configuration to a list of pcs commands which recreates the same cluster as Pacemaker cluster when executed.  Commands will be saved to 'output' file.  For other options see above.
- .TP
--export pcs\-commands|pcs\-commands\-verbose output=<filename> [dist=<dist>]
--Creates a list of pcs commands which upon execution recreates the current cluster running on this node.  Commands will be saved to 'output' file or written to stdout if 'output' is not specified.  Use pcs\-commands to get a simple list of commands, whereas pcs\-commands\-verbose creates a list including comments and debug messages.  Optionally specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  If 'dist' is not specified, it defaults to this node's version.
-+export pcs\-commands|pcs\-commands\-verbose [output=<filename>] [dist=<dist>]
-+Creates a list of pcs commands which upon execution recreates the current cluster running on this node.  Commands will be saved to 'output' file or written to stdout if 'output' is not specified.  Use pcs\-commands to get a simple list of commands, whereas pcs\-commands\-verbose creates a list including comments and debug messages.  Optionally specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  You can get the list of supported dist values by running the "clufter \fB\-\-list-dists\fR" command.  If 'dist' is not specified, it defaults to this node's version.
- .SS "pcsd"
- .TP
- certkey <certificate file> <key file>
-@@ -644,7 +644,7 @@ clear-auth [\fB\-\-local\fR] [\fB\-\-remote\fR]
- Removes all system tokens which allow pcs/pcsd on the current system to authenticate with remote pcs/pcsd instances and vice\-versa.  After this command is run this node will need to be re\-authenticated with other nodes (using 'pcs cluster auth').  Using \fB\-\-local\fR only removes tokens used by local pcs (and pcsd if root) to connect to other pcsd instances, using \fB\-\-remote\fR clears authentication tokens used by remote systems to connect to the local pcsd instance.
- .SS "node"
- .TP
--attribute [[<node>] [\fB\-\-name\fR <attr>] | <node> <name>=<value> ...]
-+attribute [[<node>] [\fB\-\-name\fR <name>] | <node> <name>=<value> ...]
- Manage node attributes.  If no parameters are specified, show attributes of all nodes.  If one parameter is specified, show attributes of specified node.  If \fB\-\-name\fR is specified, show specified attribute's value from all nodes.  If more parameters are specified, set attributes of specified node.  Attributes can be removed by setting an attribute without a value.
- .TP
- maintenance [\fB\-\-all\fR] | [<node>]...
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 7cfb33e..ef60b64 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -1182,10 +1182,11 @@ Commands:
-         /etc/cluster/cluster.conf will be used.  You can force to create output
-         containing either cluster.conf or corosync.conf using the output-format
-         option.  Optionally you can specify output version by setting 'dist'
--        option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.
--        If 'dist' is not specified, it defaults to this nodei's version if that
--        matches output-format, otherwise redhat,6.7 is used for cluster.conf
--        and redhat,7.1 is used for corosync.conf.
-+        option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  You
-+        can get the list of supported dist values by running the "clufter
-+        --list-dists" command.  If 'dist' is not specified, it defaults to this
-+        node's version if that matches output-format, otherwise redhat,6.7 is
-+        used for cluster.conf and redhat,7.1 is used for corosync.conf.
- 
-     import-cman output=<filename> [input=<filename>] [--interactive]
-             output-format=pcs-commands|pcs-commands-verbose [dist=<dist>]
-@@ -1200,8 +1201,10 @@ Commands:
-         pcs-commands to get a simple list of commands, whereas
-         pcs-commands-verbose creates a list including comments and debug
-         messages.  Optionally specify output version by setting 'dist' option
--        e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  If 'dist'
--        is not specified, it defaults to this node's version.
-+        e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  You can get
-+        the list of supported dist values by running the "clufter --list-dists"
-+        command.  If 'dist' is not specified, it defaults to this node's
-+        version.
- """
-     if pout:
-         print(sub_usage(args, output))
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1359154-01-fix-exceptions-when-authenticating-cluster-nodes.patch b/SOURCES/bz1359154-01-fix-exceptions-when-authenticating-cluster-nodes.patch
deleted file mode 100644
index 7acb761..0000000
--- a/SOURCES/bz1359154-01-fix-exceptions-when-authenticating-cluster-nodes.patch
+++ /dev/null
@@ -1,136 +0,0 @@
-From 54e03344d1d10b66bb0aad92bf072c283ec07185 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Tue, 26 Jul 2016 13:44:09 +0200
-Subject: [PATCH] fix exceptions when authenticating cluster nodes
-
----
- pcsd/pcs.rb  | 70 ++++++++++++++++++++++++++++++------------------------------
- pcsd/pcsd.rb | 18 ++++++++++++++--
- 2 files changed, 51 insertions(+), 37 deletions(-)
-
-diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
-index 0956de9..ad54a75 100644
---- a/pcsd/pcs.rb
-+++ b/pcsd/pcs.rb
-@@ -395,47 +395,47 @@ end
- 
- def send_request(auth_user, node, request, post=false, data={}, remote=true, raw_data=nil, timeout=30, cookies_data=nil)
-   cookies_data = {} if not cookies_data
--  begin
--    request = "/#{request}" if not request.start_with?("/")
-+  request = "/#{request}" if not request.start_with?("/")
- 
--    # fix ipv6 address for URI.parse
--    node6 = node
--    if (node.include?(":") and ! node.start_with?("["))
--      node6 = "[#{node}]"
--    end
-+  # fix ipv6 address for URI.parse
-+  node6 = node
-+  if (node.include?(":") and ! node.start_with?("["))
-+    node6 = "[#{node}]"
-+  end
- 
--    if remote
--      uri = URI.parse("https://#{node6}:2224/remote" + request)
--    else
--      uri = URI.parse("https://#{node6}:2224" + request)
--    end
-+  if remote
-+    uri = URI.parse("https://#{node6}:2224/remote" + request)
-+  else
-+    uri = URI.parse("https://#{node6}:2224" + request)
-+  end
- 
--    if post
--      req = Net::HTTP::Post.new(uri.path)
--      raw_data ? req.body = raw_data : req.set_form_data(data)
--    else
--      req = Net::HTTP::Get.new(uri.path)
--      req.set_form_data(data)
--    end
-+  if post
-+    req = Net::HTTP::Post.new(uri.path)
-+    raw_data ? req.body = raw_data : req.set_form_data(data)
-+  else
-+    req = Net::HTTP::Get.new(uri.path)
-+    req.set_form_data(data)
-+  end
- 
--    cookies_to_send = []
--    cookies_data_default = {}
--    # Let's be safe about characters in cookie variables and do base64.
--    # We cannot do it for CIB_user however to be backward compatible
--    # so we at least remove disallowed characters.
--    cookies_data_default['CIB_user'] = PCSAuth.cookieUserSafe(
--      auth_user[:username].to_s
--    )
--    cookies_data_default['CIB_user_groups'] = PCSAuth.cookieUserEncode(
--      (auth_user[:usergroups] || []).join(' ')
--    )
-+  cookies_to_send = []
-+  cookies_data_default = {}
-+  # Let's be safe about characters in cookie variables and do base64.
-+  # We cannot do it for CIB_user however to be backward compatible
-+  # so we at least remove disallowed characters.
-+  cookies_data_default['CIB_user'] = PCSAuth.cookieUserSafe(
-+    auth_user[:username].to_s
-+  )
-+  cookies_data_default['CIB_user_groups'] = PCSAuth.cookieUserEncode(
-+    (auth_user[:usergroups] || []).join(' ')
-+  )
- 
--    cookies_data_default.update(cookies_data)
--    cookies_data_default.each { |name, value|
--      cookies_to_send << CGI::Cookie.new('name' => name, 'value' => value).to_s
--    }
--    req.add_field('Cookie', cookies_to_send.join(';'))
-+  cookies_data_default.update(cookies_data)
-+  cookies_data_default.each { |name, value|
-+    cookies_to_send << CGI::Cookie.new('name' => name, 'value' => value).to_s
-+  }
-+  req.add_field('Cookie', cookies_to_send.join(';'))
- 
-+  begin
-     # uri.host returns "[addr]" for ipv6 addresses, which is wrong
-     # uri.hostname returns "addr" for ipv6 addresses, which is correct, but it
-     #   is not available in older ruby versions
-diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
-index d3032cf..287cf03 100644
---- a/pcsd/pcsd.rb
-+++ b/pcsd/pcsd.rb
-@@ -75,6 +75,7 @@ if development?
- end
- 
- before do
-+  # nobody is logged in yet
-   @auth_user = nil
- 
-   # get session storage instance from env
-@@ -83,8 +84,21 @@ before do
-     $session_storage_env = env
-   end
- 
--  if request.path != '/login' and not request.path == "/logout" and not request.path == '/remote/auth' and not request.path == '/login-status'
--    protected! 
-+  # urls which are accesible for everybody including not logged in users
-+  always_accessible = [
-+    '/login',
-+    '/logout',
-+    '/login-status',
-+    '/remote/auth',
-+  ]
-+  if not always_accessible.include?(request.path)
-+    # Sets @auth_user to a hash containing info about logged in user or halts
-+    # the request processing if login credentials are incorrect.
-+    protected!
-+  else
-+    # Set a sane default: nobody is logged in, but we do not need to check both
-+    # for nil and empty username (if auth_user and auth_user[:username])
-+    @auth_user = {} if not @auth_user
-   end
-   $cluster_name = get_cluster_name()
- end
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1366307-01-web-ui-fix-bad-using-of-html-ids.patch b/SOURCES/bz1366307-01-web-ui-fix-bad-using-of-html-ids.patch
deleted file mode 100644
index 5ef6de1..0000000
--- a/SOURCES/bz1366307-01-web-ui-fix-bad-using-of-html-ids.patch
+++ /dev/null
@@ -1,74 +0,0 @@
-From 25413c28853e1d350982feba7e306e05b6e74f49 Mon Sep 17 00:00:00 2001
-From: Ondrej Mular <omular@redhat.com>
-Date: Sun, 14 Aug 2016 13:38:52 +0200
-Subject: [PATCH] web UI: fix bad using of HTML ids
-
----
- pcsd/public/js/nodes-ember.js |  5 +++++
- pcsd/public/js/pcsd.js        | 11 ++++++++---
- pcsd/views/main.erb           |  4 ++--
- 3 files changed, 15 insertions(+), 5 deletions(-)
-
-diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
-index c51a341..f176c39 100644
---- a/pcsd/public/js/nodes-ember.js
-+++ b/pcsd/public/js/nodes-ember.js
-@@ -823,6 +823,11 @@ Pcs.ResourceObj = Ember.Object.extend({
-   id: null,
-   _id: Ember.computed.alias('id'),
-   name: Ember.computed.alias('id'),
-+  treeview_element_id: function() {
-+    if (this.get("id")) {
-+      return this.get("id") + "-treeview-element";
-+    }
-+  }.property("id"),
-   parent: null,
-   meta_attr: [],
-   meta_attributes: Ember.computed.alias('meta_attr'),
-diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
-index 56219d4..1060bd3 100644
---- a/pcsd/public/js/pcsd.js
-+++ b/pcsd/public/js/pcsd.js
-@@ -2077,8 +2077,13 @@ function fix_auth_of_cluster() {
-   });
- }
- 
--function get_tree_view_element_id(element) {
--  return $(element).parents('table.tree-element')[0].id;
-+function get_tree_view_resource_id(element) {
-+  var suffix = '-treeview-element';
-+  var element_id = $(element).parents('table.tree-element')[0].id;
-+  if (element_id && element_id.endsWith(suffix)) {
-+    return element_id.substr(0, element_id.lastIndexOf(suffix));
-+  }
-+  return null;
- }
- 
- function get_list_view_element_id(element) {
-@@ -2166,7 +2171,7 @@ function tree_view_onclick(resource_id) {
- }
- 
- function tree_view_select(element_id) {
--  var e = $('#' + element_id);
-+  var e = $(`#${element_id}-treeview-element`);
-   var view = e.parents('table.tree-view');
-   view.find('div.arrow').hide();
-   view.find('tr.children').hide();
-diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb
-index 64fe560..8de1c60 100644
---- a/pcsd/views/main.erb
-+++ b/pcsd/views/main.erb
-@@ -99,8 +99,8 @@
-   </script>
- 
-   <script type="text/x-handlebars" data-template-name="components/resource-tree-element">
--    <table class="tree-element"  cellpadding="0" cellspacing="0" style="width: 100%; border: none;" {{bind-attr id=node._id}}>
--    <tr class="tree-element-name" onclick="tree_view_onclick(get_tree_view_element_id(this));" onmouseover="$(this).addClass('mouse_on_row');" onmouseout="$(this).removeClass('mouse_on_row');" {{bind-attr nodeID=node.id}}>
-+    <table class="tree-element"  cellpadding="0" cellspacing="0" style="width: 100%; border: none;" {{bind-attr id=node.treeview_element_id}}>
-+    <tr class="tree-element-name" onclick="tree_view_onclick(get_tree_view_resource_id(this));" onmouseover="$(this).addClass('mouse_on_row');" onmouseout="$(this).removeClass('mouse_on_row');" {{bind-attr nodeID=node.id}}>
-         <td style="width:20px;" class="node_list_check">
-           <input type="checkbox" onchange="tree_view_checkbox_onchange(this)">
-         </td>
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1373614-01-return-1-when-pcsd-is-unable-to-bind.patch b/SOURCES/bz1373614-01-return-1-when-pcsd-is-unable-to-bind.patch
new file mode 100644
index 0000000..b868744
--- /dev/null
+++ b/SOURCES/bz1373614-01-return-1-when-pcsd-is-unable-to-bind.patch
@@ -0,0 +1,27 @@
+From 1cd8a8c7dbe3b5728caf68b6659fc59fe5b3031f Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Wed, 31 May 2017 11:58:19 +0200
+Subject: [PATCH] return 1 when pcsd is unable to bind
+
+---
+ pcsd/ssl.rb | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/pcsd/ssl.rb b/pcsd/ssl.rb
+index 24ee059..1a41ab2 100644
+--- a/pcsd/ssl.rb
++++ b/pcsd/ssl.rb
+@@ -99,8 +99,10 @@ def run_server(server, webrick_options, secondary_addrs)
+   rescue Errno::EADDRNOTAVAIL, Errno::EADDRINUSE => e
+     $logger.error 'Unable to bind to specified address(es), exiting'
+     $logger.error e.message
++    exit 1
+   rescue SocketError => e
+     $logger.error e.message
++    exit 1
+   end
+ end
+ 
+-- 
+1.8.3.1
+
diff --git a/SOURCES/bz1386114-01-fix-a-crash-in-adding-a-remote-node.patch b/SOURCES/bz1386114-01-fix-a-crash-in-adding-a-remote-node.patch
new file mode 100644
index 0000000..0a499d9
--- /dev/null
+++ b/SOURCES/bz1386114-01-fix-a-crash-in-adding-a-remote-node.patch
@@ -0,0 +1,26 @@
+From a551c1f4b57cb678a9251d6fc7050a7970df3906 Mon Sep 17 00:00:00 2001
+From: Tomas Jelinek <tojeline@redhat.com>
+Date: Wed, 31 May 2017 07:50:59 +0200
+Subject: [PATCH] fix a crash in adding a remote node
+
+... when an id conflict occurs
+---
+ pcs/lib/commands/cluster.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/pcs/lib/commands/cluster.py b/pcs/lib/commands/cluster.py
+index 7386e3c..0bafef5 100644
+--- a/pcs/lib/commands/cluster.py
++++ b/pcs/lib/commands/cluster.py
+@@ -189,7 +189,7 @@ def node_add_remote(
+         for report in report_list + list(e.args):
+             if report.code != report_codes.ID_ALREADY_EXISTS:
+                 unified_report_list.append(report)
+-            elif report.info.get["id"] not in already_exists:
++            elif report.info["id"] not in already_exists:
+                 unified_report_list.append(report)
+                 already_exists.append(report.info["id"])
+         report_list = unified_report_list
+-- 
+1.8.3.1
+
diff --git a/SOURCES/bz1386114-02-deal-with-f-corosync_conf-if-create-remote-res.patch b/SOURCES/bz1386114-02-deal-with-f-corosync_conf-if-create-remote-res.patch
new file mode 100644
index 0000000..76e28e8
--- /dev/null
+++ b/SOURCES/bz1386114-02-deal-with-f-corosync_conf-if-create-remote-res.patch
@@ -0,0 +1,270 @@
+From 698f63f743aa970b4af977e4a410e64ee7013fa4 Mon Sep 17 00:00:00 2001
+From: Ivan Devat <idevat@redhat.com>
+Date: Tue, 30 May 2017 17:00:41 +0200
+Subject: [PATCH] deal with -f/--corosync_conf if create remote res.
+
+Do not request corosync.conf when `pcs resource create` with -f is used
+for create remote (ocf:pacemaker:remote) or guest (meta remote-node).
+---
+ pcs/cli/common/lib_wrapper.py |  6 ++-
+ pcs/lib/commands/resource.py  | 90 ++++++++++++++++++++++++++++---------------
+ pcs/resource.py               | 13 +++++--
+ 3 files changed, 74 insertions(+), 35 deletions(-)
+
+diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
+index 683ba4d..4d6ed9a 100644
+--- a/pcs/cli/common/lib_wrapper.py
++++ b/pcs/cli/common/lib_wrapper.py
+@@ -318,7 +318,8 @@ def load_module(env, middleware_factory, name):
+         return bind_all(
+             env,
+             middleware.build(
+-                middleware_factory.cib
++                middleware_factory.cib,
++                middleware_factory.corosync_conf_existing,
+             ),
+             {
+                 "bundle_create": resource.bundle_create,
+@@ -338,7 +339,8 @@ def load_module(env, middleware_factory, name):
+         return bind_all(
+             env,
+             middleware.build(
+-                middleware_factory.cib
++                middleware_factory.cib,
++                middleware_factory.corosync_conf_existing,
+             ),
+             {
+                 "create": stonith.create,
+diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py
+index a9f8271..3a060b8 100644
+--- a/pcs/lib/commands/resource.py
++++ b/pcs/lib/commands/resource.py
+@@ -46,8 +46,12 @@ def resource_environment(
+         ])
+ 
+ def _validate_remote_connection(
+-    nodes, resource_id, instance_attributes,  allow_not_suitable_command
++    resource_agent, nodes_to_validate_against, resource_id, instance_attributes,
++    allow_not_suitable_command
+ ):
++    if resource_agent.get_name() != remote_node.AGENT_NAME.full_name:
++        return []
++
+     report_list = []
+     report_list.append(
+         reports.get_problem_creator(
+@@ -58,7 +62,7 @@ def _validate_remote_connection(
+ 
+     report_list.extend(
+         remote_node.validate_host_not_conflicts(
+-            nodes,
++            nodes_to_validate_against,
+             resource_id,
+             instance_attributes
+         )
+@@ -66,8 +70,8 @@ def _validate_remote_connection(
+     return report_list
+ 
+ def _validate_guest_change(
+-    tree, nodes, meta_attributes, allow_not_suitable_command,
+-    detect_remove=False
++    tree, nodes_to_validate_against, meta_attributes,
++    allow_not_suitable_command, detect_remove=False
+ ):
+     if not guest_node.is_node_name_in_options(meta_attributes):
+         return []
+@@ -89,7 +93,7 @@ def _validate_guest_change(
+     report_list.extend(
+         guest_node.validate_conflicts(
+             tree,
+-            nodes,
++            nodes_to_validate_against,
+             node_name,
+             meta_attributes
+         )
+@@ -97,28 +101,54 @@ def _validate_guest_change(
+ 
+     return report_list
+ 
+-def _validate_special_cases(
+-    nodes, resource_agent, resources_section, resource_id, meta_attributes,
++def _get_nodes_to_validate_against(env, tree):
++    if not env.is_corosync_conf_live and env.is_cib_live:
++        raise LibraryError(
++            reports.live_environment_required(["COROSYNC_CONF"])
++        )
++
++    if not env.is_cib_live and env.is_corosync_conf_live:
++        #we do not try to get corosync.conf from live cluster when cib is not
++        #taken from live cluster
++        return get_nodes(tree=tree)
++
++    return get_nodes(env.get_corosync_conf(), tree)
++
++
++def _check_special_cases(
++    env, resource_agent, resources_section, resource_id, meta_attributes,
+     instance_attributes, allow_not_suitable_command
+ ):
+-    report_list = []
+-
+-    if resource_agent.get_name() == remote_node.AGENT_NAME.full_name:
+-        report_list.extend(_validate_remote_connection(
+-            nodes,
+-            resource_id,
+-            instance_attributes,
+-            allow_not_suitable_command,
+-        ))
++    if(
++        resource_agent.get_name() != remote_node.AGENT_NAME.full_name
++        and
++        not guest_node.is_node_name_in_options(meta_attributes)
++    ):
++        #if no special case happens we won't take care about corosync.conf that
++        #is needed for getting nodes to validate against
++        return
++
++    nodes_to_validate_against = _get_nodes_to_validate_against(
++        env,
++        resources_section
++    )
+ 
++    report_list = []
++    report_list.extend(_validate_remote_connection(
++        resource_agent,
++        nodes_to_validate_against,
++        resource_id,
++        instance_attributes,
++        allow_not_suitable_command,
++    ))
+     report_list.extend(_validate_guest_change(
+         resources_section,
+-        nodes,
++        nodes_to_validate_against,
+         meta_attributes,
+         allow_not_suitable_command,
+     ))
+ 
+-    return report_list
++    env.report_processor.process_list(report_list)
+ 
+ def create(
+     env, resource_id, resource_agent_name,
+@@ -167,15 +197,15 @@ def create(
+         [resource_id],
+         ensure_disabled or resource.common.are_meta_disabled(meta_attributes),
+     ) as resources_section:
+-        env.report_processor.process_list(_validate_special_cases(
+-            get_nodes(env.get_corosync_conf(), resources_section),
++        _check_special_cases(
++            env,
+             resource_agent,
+             resources_section,
+             resource_id,
+             meta_attributes,
+             instance_attributes,
+             allow_not_suitable_command
+-        ))
++        )
+ 
+         primitive_element = resource.primitive.create(
+             env.report_processor, resources_section,
+@@ -247,15 +277,15 @@ def _create_as_clone_common(
+             resource.common.is_clone_deactivated_by_meta(clone_meta_options)
+         )
+     ) as resources_section:
+-        env.report_processor.process_list(_validate_special_cases(
+-            get_nodes(env.get_corosync_conf(), resources_section),
++        _check_special_cases(
++            env,
+             resource_agent,
+             resources_section,
+             resource_id,
+             meta_attributes,
+             instance_attributes,
+             allow_not_suitable_command
+-        ))
++        )
+ 
+         primitive_element = resource.primitive.create(
+             env.report_processor, resources_section,
+@@ -325,15 +355,15 @@ def create_in_group(
+         [resource_id],
+         ensure_disabled or resource.common.are_meta_disabled(meta_attributes),
+     ) as resources_section:
+-        env.report_processor.process_list(_validate_special_cases(
+-            get_nodes(env.get_corosync_conf(), resources_section),
++        _check_special_cases(
++            env,
+             resource_agent,
+             resources_section,
+             resource_id,
+             meta_attributes,
+             instance_attributes,
+             allow_not_suitable_command
+-        ))
++        )
+ 
+         primitive_element = resource.primitive.create(
+             env.report_processor, resources_section,
+@@ -406,15 +436,15 @@ def create_into_bundle(
+         disabled_after_wait=ensure_disabled,
+         required_cib_version=(2, 8, 0)
+     ) as resources_section:
+-        env.report_processor.process_list(_validate_special_cases(
+-            get_nodes(env.get_corosync_conf(), resources_section),
++        _check_special_cases(
++            env,
+             resource_agent,
+             resources_section,
+             resource_id,
+             meta_attributes,
+             instance_attributes,
+             allow_not_suitable_command
+-        ))
++        )
+ 
+         primitive_element = resource.primitive.create(
+             env.report_processor, resources_section,
+diff --git a/pcs/resource.py b/pcs/resource.py
+index 4d5f43a..dc6da13 100644
+--- a/pcs/resource.py
++++ b/pcs/resource.py
+@@ -28,24 +28,31 @@ from pcs.cli.resource.parse_args import (
+     parse_bundle_update_options,
+     parse_create as parse_create_args,
+ )
+-from pcs.lib.env_tools import get_nodes
+ from pcs.lib.errors import LibraryError
++from pcs.lib.cib.resource import guest_node
+ import pcs.lib.pacemaker.live as lib_pacemaker
+ from pcs.lib.pacemaker.values import timeout_to_seconds
+ import pcs.lib.resource_agent as lib_ra
+ from pcs.cli.common.console_report import error, warn
+-from pcs.lib.commands.resource import _validate_guest_change
++from pcs.lib.commands.resource import(
++    _validate_guest_change,
++    _get_nodes_to_validate_against,
++)
+ 
+ 
+ RESOURCE_RELOCATE_CONSTRAINT_PREFIX = "pcs-relocate-"
+ 
+ def _detect_guest_change(meta_attributes, allow_not_suitable_command):
++    if not guest_node.is_node_name_in_options(meta_attributes):
++        return
++
+     env = utils.get_lib_env()
+     cib = env.get_cib()
++    nodes_to_validate_against = _get_nodes_to_validate_against(env, cib)
+     env.report_processor.process_list(
+         _validate_guest_change(
+             cib,
+-            get_nodes(env.get_corosync_conf(), cib),
++            nodes_to_validate_against,
+             meta_attributes,
+             allow_not_suitable_command,
+             detect_remove=True,
+-- 
+1.8.3.1
+
diff --git a/SOURCES/bz1404233-01-cluster-cib-push-allows-to-obtain-and-push-a-diff.patch b/SOURCES/bz1404233-01-cluster-cib-push-allows-to-obtain-and-push-a-diff.patch
deleted file mode 100644
index b9649dc..0000000
--- a/SOURCES/bz1404233-01-cluster-cib-push-allows-to-obtain-and-push-a-diff.patch
+++ /dev/null
@@ -1,147 +0,0 @@
-From 5cd73a40d15e1ef692130e0a85e2f2e411bd8b66 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Fri, 6 Jan 2017 15:26:11 +0100
-Subject: [PATCH] 'cluster cib-push' allows to obtain and push a diff
-
----
- pcs/cluster.py | 43 +++++++++++++++++++++++++++++++++++++------
- pcs/pcs.8      | 10 ++++++++--
- pcs/usage.py   | 14 +++++++++++---
- 3 files changed, 56 insertions(+), 11 deletions(-)
-
-diff --git a/pcs/cluster.py b/pcs/cluster.py
-index 4572643..69e2852 100644
---- a/pcs/cluster.py
-+++ b/pcs/cluster.py
-@@ -1192,6 +1192,8 @@ def cluster_push(argv):
-     filename = None
-     scope = None
-     timeout = None
-+    diff_against = None
-+
-     if "--wait" in utils.pcs_options:
-         timeout = utils.validate_wait_get_timeout()
-     for arg in argv:
-@@ -1204,6 +1206,8 @@ def cluster_push(argv):
-                     utils.err("invalid CIB scope '%s'" % arg_value)
-                 else:
-                     scope = arg_value
-+            if arg_name == "diff-against":
-+                diff_against = arg_value
-             else:
-                 usage.cluster(["cib-push"])
-                 sys.exit(1)
-@@ -1212,6 +1216,8 @@ def cluster_push(argv):
-     if not filename:
-         usage.cluster(["cib-push"])
-         sys.exit(1)
-+    if diff_against and scope:
-+        utils.err("Cannot use both scope and diff-against")
- 
-     try:
-         new_cib_dom = xml.dom.minidom.parse(filename)
-@@ -1223,13 +1229,38 @@ def cluster_push(argv):
-     except (EnvironmentError, xml.parsers.expat.ExpatError) as e:
-         utils.err("unable to parse new cib: %s" % e)
- 
--    command = ["cibadmin", "--replace", "--xml-file", filename]
--    if scope:
--        command.append("--scope=%s" % scope)
--    output, retval = utils.run(command)
--    if retval != 0:
--        utils.err("unable to push cib\n" + output)
-+    if diff_against:
-+        try:
-+            xml.dom.minidom.parse(diff_against)
-+        except (EnvironmentError, xml.parsers.expat.ExpatError) as e:
-+            utils.err("unable to parse original cib: %s" % e)
-+        runner = utils.cmd_runner()
-+        command = [
-+            "crm_diff", "--original", diff_against, "--new", filename,
-+            "--no-version"
-+        ]
-+        patch, error, dummy_retval = runner.run(command)
-+        # dummy_retval == -1 means one of two things:
-+        # a) an error has occured
-+        # b) --original and --new differ
-+        if error.strip():
-+            utils.err("unable to diff the CIBs:\n" + error)
-+
-+        command = ["cibadmin", "--patch", "--xml-pipe"]
-+        output, error, retval = runner.run(command, patch)
-+        if retval != 0:
-+            utils.err("unable to push cib\n" + error + output)
-+
-+    else:
-+        command = ["cibadmin", "--replace", "--xml-file", filename]
-+        if scope:
-+            command.append("--scope=%s" % scope)
-+        output, retval = utils.run(command)
-+        if retval != 0:
-+            utils.err("unable to push cib\n" + output)
-+
-     print("CIB updated")
-+
-     if "--wait" not in utils.pcs_options:
-         return
-     cmd = ["crm_resource", "--wait"]
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index dffaddd..4062ce3 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -262,8 +262,14 @@ Sync corosync configuration to all nodes found from current corosync.conf file (
- cib [filename] [scope=<scope> | \fB\-\-config\fR]
- Get the raw xml from the CIB (Cluster Information Base).  If a filename is provided, we save the CIB to that file, otherwise the CIB is printed.  Specify scope to get a specific section of the CIB.  Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults, status.  \fB\-\-config\fR is the same as scope=configuration.  Do not specify a scope if you want to edit the saved CIB using pcs (pcs -f <command>).
- .TP
--cib-push <filename> [scope=<scope> | \fB\-\-config\fR] [\fB\-\-wait\fR[=<n>]]
--Push the raw xml from <filename> to the CIB (Cluster Information Base).  You can obtain the CIB by running the 'pcs cluster cib' command, which is recommended first step when you want to perform desired modifications (pcs \fB\-f\fR <command>) for the one-off push.  Specify scope to push a specific section of the CIB.  Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults.  \fB\-\-config\fR is the same as scope=configuration.  Use of \fB\-\-config\fR is recommended.  Do not specify a scope if you need to push the whole CIB or be warned in the case of outdated CIB.  If --wait is specified wait up to 'n' seconds for changes to be applied.  WARNING: the selected scope of the CIB will be overwritten by the current content of the specified file.
-+cib-push <filename> [diff\-against=<filename_original> | scope=<scope> | \fB\-\-config\fR] [\fB\-\-wait\fR[=<n>]]
-+Push the raw xml from <filename> to the CIB (Cluster Information Base).  You can obtain the CIB by running the 'pcs cluster cib' command, which is recommended first step when you want to perform desired modifications (pcs \fB\-f\fR <command>) for the one\-off push.  If diff\-against is specified, pcs diffs contents of filename against contents of filename_original and pushes the result to the CIB.  Specify scope to push a specific section of the CIB.  Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults.  \fB\-\-config\fR is the same as scope=configuration.  Use of \fB\-\-config\fR is recommended.  Do not specify a scope if you need to push the whole CIB or be warned in the case of outdated CIB.  If --wait is specified wait up to 'n' seconds for changes to be applied.  WARNING: the selected scope of the CIB will be overwritten by the current content of the specified file.
-+
-+Example:
-+    pcs cluster cib > original.xml
-+    cp original.xml new.xml
-+    pcs -f new.xml constraint location apache prefers node2
-+    pcs cluster cib-push new.xml diff-against=original.xml
- .TP
- cib\-upgrade
- Upgrade the CIB to conform to the latest version of the document schema.
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 0ebebe0..b33da35 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -656,20 +656,28 @@ Commands:
-         scope=configuration.  Do not specify a scope if you want to edit
-         the saved CIB using pcs (pcs -f <command>).
- 
--    cib-push <filename> [scope=<scope> | --config] [--wait[=<n>]]
-+    cib-push <filename> [--wait[=<n>]]
-+            [diff-against=<filename_orignal> | scope=<scope> | --config]
-         Push the raw xml from <filename> to the CIB (Cluster Information Base).
-         You can obtain the CIB by running the 'pcs cluster cib' command, which
-         is recommended first step when you want to perform desired
-         modifications (pcs -f <command>) for the one-off push.
-+        If diff-against is specified, pcs diffs contents of filename against
-+        contents of filename_original and pushes the result to the CIB.
-         Specify scope to push a specific section of the CIB.  Valid values
-         of the scope are: configuration, nodes, resources, constraints,
-         crm_config, rsc_defaults, op_defaults.  --config is the same as
-         scope=configuration.  Use of --config is recommended.  Do not specify
-         a scope if you need to push the whole CIB or be warned in the case
--        of outdated CIB. If --wait is specified wait up to 'n' seconds for
--        changes to be applied.
-+        of outdated CIB.
-+        If --wait is specified wait up to 'n' seconds for changes to be applied.
-         WARNING: the selected scope of the CIB will be overwritten by the
-         current content of the specified file.
-+        Example:
-+            pcs cluster cib > original.xml
-+            cp original.xml new.xml
-+            pcs -f new.xml constraint location apache prefers node2
-+            pcs cluster cib-push new.xml diff-against=original.xml
- 
-     cib-upgrade
-         Upgrade the CIB to conform to the latest version of the document schema.
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1408476-01-accept-RA-with-instantiated-systemd-service-in-name.patch b/SOURCES/bz1408476-01-accept-RA-with-instantiated-systemd-service-in-name.patch
deleted file mode 100644
index c3020b3..0000000
--- a/SOURCES/bz1408476-01-accept-RA-with-instantiated-systemd-service-in-name.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From efd4958f35d7760bd4d80c4d82e257708c25416d Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Fri, 6 Jan 2017 09:06:35 +0100
-Subject: [PATCH] accept RA with instantiated systemd serv. in name
-
-Accept resource agent with instantiated systemd service in name.
----
- pcs/utils.py | 6 ++++++
- 1 file changed, 6 insertions(+)
-
-diff --git a/pcs/utils.py b/pcs/utils.py
-index d5b6dcf..6daecf3 100644
---- a/pcs/utils.py
-+++ b/pcs/utils.py
-@@ -1524,6 +1524,12 @@ def is_valid_resource(resource, caseInsensitiveCheck=False):
-         return is_file_abs_path(agent)
-     elif resource.startswith("systemd:"):
-         _, agent_name = resource.split(":", 1)
-+        # For Instantiated services we need to make sure that the
-+        # <agent_name>@.service file exists
-+        # For example: we need recognize systemd:getty@tty3 as existing service.
-+        if '@' in agent_name:
-+            agent_name, instance_name = agent_name.split("@", 1)
-+            agent_name += '@'
-         agent1 = os.path.join(
-             "/etc/systemd/system/", agent_name + ".service"
-         )
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1420757-01-fix-pcs-cluster-cib-push-scope.patch b/SOURCES/bz1420757-01-fix-pcs-cluster-cib-push-scope.patch
deleted file mode 100644
index 64a7edc..0000000
--- a/SOURCES/bz1420757-01-fix-pcs-cluster-cib-push-scope.patch
+++ /dev/null
@@ -1,46 +0,0 @@
-From 0d63dd38a152f6edc852b209ee82e00f31cc7a70 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Tue, 7 Feb 2017 17:50:51 +0100
-Subject: [PATCH] fix "pcs cluster cib-push scope="
-
----
- pcs/cluster.py | 10 ++++++----
- 1 file changed, 6 insertions(+), 4 deletions(-)
-
-diff --git a/pcs/cluster.py b/pcs/cluster.py
-index 69e2852..32ea8c3 100644
---- a/pcs/cluster.py
-+++ b/pcs/cluster.py
-@@ -1201,23 +1201,25 @@ def cluster_push(argv):
-             filename = arg
-         else:
-             arg_name, arg_value = arg.split("=", 1)
--            if arg_name == "scope" and "--config" not in utils.pcs_options:
-+            if arg_name == "scope":
-+                if "--config" in utils.pcs_options:
-+                    utils.err("Cannot use both scope and --config")
-                 if not utils.is_valid_cib_scope(arg_value):
-                     utils.err("invalid CIB scope '%s'" % arg_value)
-                 else:
-                     scope = arg_value
--            if arg_name == "diff-against":
-+            elif arg_name == "diff-against":
-                 diff_against = arg_value
-             else:
-                 usage.cluster(["cib-push"])
-                 sys.exit(1)
-     if "--config" in utils.pcs_options:
-         scope = "configuration"
-+    if diff_against and scope:
-+        utils.err("Cannot use both scope and diff-against")
-     if not filename:
-         usage.cluster(["cib-push"])
-         sys.exit(1)
--    if diff_against and scope:
--        utils.err("Cannot use both scope and diff-against")
- 
-     try:
-         new_cib_dom = xml.dom.minidom.parse(filename)
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1420757-02-fix-cib-push-diff-against-when-the-diff-is-empty.patch b/SOURCES/bz1420757-02-fix-cib-push-diff-against-when-the-diff-is-empty.patch
deleted file mode 100644
index 91f4eb8..0000000
--- a/SOURCES/bz1420757-02-fix-cib-push-diff-against-when-the-diff-is-empty.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-From 0fb8059bc401c5797ec7c1c46a8aed5ad6b63249 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Tue, 14 Feb 2017 09:19:15 +0100
-Subject: [PATCH 3/3] fix 'cib-push diff-against=' when the diff is empty
-
----
- pcs/cluster.py | 5 +++++
- 1 file changed, 5 insertions(+)
-
-diff --git a/pcs/cluster.py b/pcs/cluster.py
-index 32ea8c3..1af1ebf 100644
---- a/pcs/cluster.py
-+++ b/pcs/cluster.py
-@@ -1245,8 +1245,13 @@ def cluster_push(argv):
-         # dummy_retval == -1 means one of two things:
-         # a) an error has occured
-         # b) --original and --new differ
-+        # therefore it's of no use to see if an error occurred
-         if error.strip():
-             utils.err("unable to diff the CIBs:\n" + error)
-+        if not patch.strip():
-+            utils.err(
-+                "The new CIB is the same as the original CIB, nothing to push."
-+            )
- 
-         command = ["cibadmin", "--patch", "--xml-pipe"]
-         output, error, retval = runner.run(command, patch)
--- 
-1.8.3.1
-
diff --git a/SOURCES/bz1433016-02-make-container-type-mandatory-in-bundle-create.patch b/SOURCES/bz1433016-02-make-container-type-mandatory-in-bundle-create.patch
new file mode 100644
index 0000000..a9e2ee7
--- /dev/null
+++ b/SOURCES/bz1433016-02-make-container-type-mandatory-in-bundle-create.patch
@@ -0,0 +1,447 @@
+From 8d3bcf972d9f6aa7e568e203f749cc878cd6fd34 Mon Sep 17 00:00:00 2001
+From: Ivan Devat <idevat@redhat.com>
+Date: Thu, 15 Jun 2017 11:46:12 +0200
+Subject: [PATCH] make container type mandatory in "bundle create"
+
+---
+ pcs/cli/resource/parse_args.py           |  4 +-
+ pcs/cli/resource/test/test_parse_args.py | 32 +++++++-------
+ pcs/pcs.8                                |  4 +-
+ pcs/test/cib_resource/test_bundle.py     | 75 +++++++++++++++++++-------------
+ pcs/test/cib_resource/test_create.py     |  2 +-
+ pcs/test/test_constraints.py             |  2 +-
+ pcs/test/test_resource.py                |  2 +-
+ pcs/usage.py                             |  3 +-
+ 8 files changed, 67 insertions(+), 57 deletions(-)
+
+diff --git a/pcs/cli/resource/parse_args.py b/pcs/cli/resource/parse_args.py
+index 366acac..1bdcd5b 100644
+--- a/pcs/cli/resource/parse_args.py
++++ b/pcs/cli/resource/parse_args.py
+@@ -84,7 +84,7 @@ def _parse_bundle_groups(arg_list):
+ def parse_bundle_create_options(arg_list):
+     groups = _parse_bundle_groups(arg_list)
+     container_options = groups.get("container", [])
+-    container_type = None
++    container_type = ""
+     if container_options and "=" not in container_options[0]:
+         container_type = container_options.pop(0)
+     parts = {
+@@ -101,8 +101,6 @@ def parse_bundle_create_options(arg_list):
+         ],
+         "meta": prepare_options(groups.get("meta", []))
+     }
+-    if not parts["container_type"]:
+-        parts["container_type"] = "docker"
+     return parts
+ 
+ def _split_bundle_map_update_op_and_options(
+diff --git a/pcs/cli/resource/test/test_parse_args.py b/pcs/cli/resource/test/test_parse_args.py
+index 0c936cc..791b60d 100644
+--- a/pcs/cli/resource/test/test_parse_args.py
++++ b/pcs/cli/resource/test/test_parse_args.py
+@@ -215,7 +215,7 @@ class ParseBundleCreateOptions(TestCase):
+         self.assert_produce(
+             [],
+             {
+-                "container_type": "docker",
++                "container_type": "",
+                 "container": {},
+                 "network": {},
+                 "port_map": [],
+@@ -229,9 +229,9 @@ class ParseBundleCreateOptions(TestCase):
+ 
+     def test_container_type(self):
+         self.assert_produce(
+-            ["container", "lxc"],
++            ["container", "docker"],
+             {
+-                "container_type": "lxc",
++                "container_type": "docker",
+                 "container": {},
+                 "network": {},
+                 "port_map": [],
+@@ -244,7 +244,7 @@ class ParseBundleCreateOptions(TestCase):
+         self.assert_produce(
+             ["container", "a=b", "c=d"],
+             {
+-                "container_type": "docker",
++                "container_type": "",
+                 "container": {"a": "b", "c": "d"},
+                 "network": {},
+                 "port_map": [],
+@@ -255,9 +255,9 @@ class ParseBundleCreateOptions(TestCase):
+ 
+     def test_container_type_and_options(self):
+         self.assert_produce(
+-            ["container", "lxc", "a=b", "c=d"],
++            ["container", "docker", "a=b", "c=d"],
+             {
+-                "container_type": "lxc",
++                "container_type": "docker",
+                 "container": {"a": "b", "c": "d"},
+                 "network": {},
+                 "port_map": [],
+@@ -279,7 +279,7 @@ class ParseBundleCreateOptions(TestCase):
+         self.assert_produce(
+             ["network", "a=b", "c=d"],
+             {
+-                "container_type": "docker",
++                "container_type": "",
+                 "container": {},
+                 "network": {"a": "b", "c": "d"},
+                 "port_map": [],
+@@ -309,7 +309,7 @@ class ParseBundleCreateOptions(TestCase):
+         self.assert_produce(
+             ["port-map", "a=b", "c=d"],
+             {
+-                "container_type": "docker",
++                "container_type": "",
+                 "container": {},
+                 "network": {},
+                 "port_map": [{"a": "b", "c": "d"}],
+@@ -322,7 +322,7 @@ class ParseBundleCreateOptions(TestCase):
+         self.assert_produce(
+             ["port-map", "a=b", "c=d", "port-map", "e=f"],
+             {
+-                "container_type": "docker",
++                "container_type": "",
+                 "container": {},
+                 "network": {},
+                 "port_map": [{"a": "b", "c": "d"}, {"e": "f"}],
+@@ -349,7 +349,7 @@ class ParseBundleCreateOptions(TestCase):
+         self.assert_produce(
+             ["storage-map", "a=b", "c=d"],
+             {
+-                "container_type": "docker",
++                "container_type": "",
+                 "container": {},
+                 "network": {},
+                 "port_map": [],
+@@ -362,7 +362,7 @@ class ParseBundleCreateOptions(TestCase):
+         self.assert_produce(
+             ["storage-map", "a=b", "c=d", "storage-map", "e=f"],
+             {
+-                "container_type": "docker",
++                "container_type": "",
+                 "container": {},
+                 "network": {},
+                 "port_map": [],
+@@ -381,7 +381,7 @@ class ParseBundleCreateOptions(TestCase):
+         self.assert_produce(
+             ["meta", "a=b", "c=d"],
+             {
+-                "container_type": "docker",
++                "container_type": "",
+                 "container": {},
+                 "network": {},
+                 "port_map": [],
+@@ -402,7 +402,7 @@ class ParseBundleCreateOptions(TestCase):
+     def test_all(self):
+         self.assert_produce(
+             [
+-                "container", "lxc", "a=b", "c=d",
++                "container", "docker", "a=b", "c=d",
+                 "network", "e=f", "g=h",
+                 "port-map", "i=j", "k=l",
+                 "port-map", "m=n", "o=p",
+@@ -411,7 +411,7 @@ class ParseBundleCreateOptions(TestCase):
+                 "meta", "y=z", "A=B",
+             ],
+             {
+-                "container_type": "lxc",
++                "container_type": "docker",
+                 "container": {"a": "b", "c": "d"},
+                 "network": {"e": "f", "g": "h"},
+                 "port_map": [{"i": "j", "k": "l"}, {"m": "n", "o": "p"}],
+@@ -427,7 +427,7 @@ class ParseBundleCreateOptions(TestCase):
+                 "meta", "y=z",
+                 "port-map", "i=j", "k=l",
+                 "network", "e=f",
+-                "container", "lxc", "a=b",
++                "container", "docker", "a=b",
+                 "storage-map", "u=v", "w=x",
+                 "port-map", "m=n", "o=p",
+                 "meta", "A=B",
+@@ -435,7 +435,7 @@ class ParseBundleCreateOptions(TestCase):
+                 "container", "c=d",
+             ],
+             {
+-                "container_type": "lxc",
++                "container_type": "docker",
+                 "container": {"a": "b", "c": "d"},
+                 "network": {"e": "f", "g": "h"},
+                 "port_map": [{"i": "j", "k": "l"}, {"m": "n", "o": "p"}],
+diff --git a/pcs/pcs.8 b/pcs/pcs.8
+index 20b5c2e..27298a7 100644
+--- a/pcs/pcs.8
++++ b/pcs/pcs.8
+@@ -162,8 +162,8 @@ Remove the clone which contains the specified group or resource (the resource or
+ master [<master/slave id>] <resource id | group id> [options] [\fB\-\-wait\fR[=n]]
+ Configure a resource or group as a multi\-state (master/slave) resource.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including starting and promoting resource instances if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.  Note: to remove a master you must remove the resource/group it contains.
+ .TP
+-bundle create <bundle id> [container [<container type>] <container options>] [network <network options>] [port\-map <port options>]... [storage\-map <storage options>]... [meta <meta options>] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
+-Create a new bundle encapsulating no resources. The bundle can be used either as it is or a resource may be put into it at any time. If the container type is not specified, it defaults to 'docker'. If \fB\-\-disabled\fR is specified, the bundle is not started automatically. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the bundle to start and then return 0 on success or 1 on error. If 'n' is not specified it defaults to 60 minutes.
++bundle create <bundle id> container <container type> [<container options>] [network <network options>] [port\-map <port options>]... [storage\-map <storage options>]... [meta <meta options>] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
++Create a new bundle encapsulating no resources. The bundle can be used either as it is or a resource may be put into it at any time. If \fB\-\-disabled\fR is specified, the bundle is not started automatically. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the bundle to start and then return 0 on success or 1 on error. If 'n' is not specified it defaults to 60 minutes.
+ .TP
+ bundle update <bundle id> [container <container options>] [network <network options>] [port\-map (add <port options>) | (remove <id>...)]... [storage\-map (add <storage options>) | (remove <id>...)]... [meta <meta options>] [\fB\-\-wait\fR[=n]]
+ Add, remove or change options to specified bundle. If you wish to update a resource encapsulated in the bundle, use the 'pcs resource update' command instead and specify the resource id.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
+diff --git a/pcs/test/cib_resource/test_bundle.py b/pcs/test/cib_resource/test_bundle.py
+index 29e4339..50ea1df 100644
+--- a/pcs/test/cib_resource/test_bundle.py
++++ b/pcs/test/cib_resource/test_bundle.py
+@@ -41,7 +41,7 @@ class BundleCreateUpgradeCib(BundleCreateCommon):
+ 
+     def test_success(self):
+         self.assert_effect(
+-            "resource bundle create B1 container image=pcs:test",
++            "resource bundle create B1 container docker image=pcs:test",
+             """
+                 <resources>
+                     <bundle id="B1">
+@@ -59,7 +59,7 @@ class BundleCreate(BundleCreateCommon):
+ 
+     def test_minimal(self):
+         self.assert_effect(
+-            "resource bundle create B1 container image=pcs:test",
++            "resource bundle create B1 container docker image=pcs:test",
+             """
+                 <resources>
+                     <bundle id="B1">
+@@ -73,7 +73,8 @@ class BundleCreate(BundleCreateCommon):
+         self.assert_effect(
+             """
+                 resource bundle create B1
+-                container replicas=4 replicas-per-host=2 run-command=/bin/true
++                container docker replicas=4 replicas-per-host=2
++                    run-command=/bin/true
+                 port-map port=1001
+                 meta target-role=Stopped
+                 network control-port=12345 host-interface=eth0 host-netmask=24
+@@ -171,15 +172,24 @@ class BundleCreate(BundleCreateCommon):
+             stdout_start="\nUsage: pcs resource bundle create...\n"
+         )
+ 
+-    def test_fail_when_missing_required(self):
++    def test_fail_when_missing_container_type(self):
+         self.assert_pcs_fail_regardless_of_force(
+             "resource bundle create B1",
++            "Error: '' is not a valid container type value, use docker\n"
++        )
++
++    def test_fail_when_missing_required(self):
++        self.assert_pcs_fail_regardless_of_force(
++            "resource bundle create B1 container docker",
+             "Error: required container option 'image' is missing\n"
+         )
+ 
+     def test_fail_on_unknown_option(self):
+         self.assert_pcs_fail(
+-            "resource bundle create B1 container image=pcs:test extra=option",
++            """
++                resource bundle create B1 container docker image=pcs:test
++                extra=option
++            """,
+             "Error: invalid container option 'extra', allowed options are: "
+                 "image, masters, network, options, replicas, replicas-per-host,"
+                 " run-command, use --force to override\n"
+@@ -192,8 +202,8 @@ class BundleCreate(BundleCreateCommon):
+         # supported by pacemaker and so the command fails.
+         self.assert_pcs_fail(
+             """
+-                resource bundle create B1 container image=pcs:test extra=option
+-                --force
++                resource bundle create B1 container docker image=pcs:test
++                extra=option --force
+             """
+             ,
+             stdout_start="Error: Unable to update cib\n"
+@@ -201,7 +211,7 @@ class BundleCreate(BundleCreateCommon):
+ 
+     def test_more_errors(self):
+         self.assert_pcs_fail_regardless_of_force(
+-            "resource bundle create B#1 container replicas=x",
++            "resource bundle create B#1 container docker replicas=x",
+             outdent(
+                 """\
+                 Error: invalid bundle name 'B#1', '#' is not a valid character for a bundle name
+@@ -239,25 +249,25 @@ class BundleUpdate(BundleCreateCommon):
+ 
+     def fixture_bundle(self, name):
+         self.assert_pcs_success(
+-            "resource bundle create {0} container image=pcs:test".format(
++            "resource bundle create {0} container docker image=pcs:test".format(
+                 name
+             )
+         )
+ 
+     def fixture_bundle_complex(self, name):
+         self.assert_pcs_success(
+-            (
+-                "resource bundle create {0} "
+-                "container image=pcs:test replicas=4 masters=2 "
+-                "network control-port=12345 host-interface=eth0 host-netmask=24 "
+-                "port-map internal-port=1000 port=2000 "
+-                "port-map internal-port=1001 port=2001 "
+-                "port-map internal-port=1002 port=2002 "
+-                "storage-map source-dir=/tmp/docker1a target-dir=/tmp/docker1b "
+-                "storage-map source-dir=/tmp/docker2a target-dir=/tmp/docker2b "
+-                "storage-map source-dir=/tmp/docker3a target-dir=/tmp/docker3b "
+-                "meta priority=15 resource-stickiness=100 is-managed=false "
+-            ).format(name)
++            ("""
++                resource bundle create {0}
++                container docker image=pcs:test replicas=4 masters=2
++                network control-port=12345 host-interface=eth0 host-netmask=24
++                port-map internal-port=1000 port=2000
++                port-map internal-port=1001 port=2001
++                port-map internal-port=1002 port=2002
++                storage-map source-dir=/tmp/docker1a target-dir=/tmp/docker1b
++                storage-map source-dir=/tmp/docker2a target-dir=/tmp/docker2b
++                storage-map source-dir=/tmp/docker3a target-dir=/tmp/docker3b
++                meta priority=15 resource-stickiness=100 is-managed=false
++            """).format(name)
+         )
+ 
+     def test_fail_when_missing_args_1(self):
+@@ -415,7 +425,7 @@ class BundleShow(TestCase, AssertPcsMixin):
+ 
+     def test_minimal(self):
+         self.assert_pcs_success(
+-            "resource bundle create B1 container image=pcs:test"
++            "resource bundle create B1 container docker image=pcs:test"
+         )
+         self.assert_pcs_success("resource show B1", outdent(
+             """\
+@@ -428,7 +438,8 @@ class BundleShow(TestCase, AssertPcsMixin):
+         self.assert_pcs_success(
+             """
+                 resource bundle create B1
+-                container image=pcs:test masters=2 replicas=4 options='a b c'
++                container docker image=pcs:test masters=2 replicas=4
++                    options='a b c'
+             """
+         )
+         self.assert_pcs_success("resource show B1", outdent(
+@@ -442,7 +453,7 @@ class BundleShow(TestCase, AssertPcsMixin):
+         self.assert_pcs_success(
+             """
+                 resource bundle create B1
+-                container image=pcs:test
++                container docker image=pcs:test
+                 network host-interface=eth0 host-netmask=24 control-port=12345
+             """
+         )
+@@ -458,7 +469,7 @@ class BundleShow(TestCase, AssertPcsMixin):
+         self.assert_pcs_success(
+             """
+                 resource bundle create B1
+-                container image=pcs:test
++                container docker image=pcs:test
+                 port-map id=B1-port-map-1001 internal-port=2002 port=2000
+                 port-map range=3000-3300
+             """
+@@ -477,7 +488,7 @@ class BundleShow(TestCase, AssertPcsMixin):
+         self.assert_pcs_success(
+             """
+                 resource bundle create B1
+-                container image=pcs:test
++                container docker image=pcs:test
+                 storage-map source-dir=/tmp/docker1a target-dir=/tmp/docker1b
+                 storage-map id=my-storage-map source-dir=/tmp/docker2a
+                     target-dir=/tmp/docker2b
+@@ -494,9 +505,10 @@ class BundleShow(TestCase, AssertPcsMixin):
+         ))
+ 
+     def test_meta(self):
+-        self.assert_pcs_success(
+-            "resource bundle create B1 container image=pcs:test --disabled"
+-        )
++        self.assert_pcs_success("""
++            resource bundle create B1 container docker image=pcs:test
++            --disabled
++        """)
+         self.assert_pcs_success("resource show B1", outdent(
+             # pylint:disable=trailing-whitespace
+             """\
+@@ -508,7 +520,7 @@ class BundleShow(TestCase, AssertPcsMixin):
+ 
+     def test_resource(self):
+         self.assert_pcs_success(
+-            "resource bundle create B1 container image=pcs:test"
++            "resource bundle create B1 container docker image=pcs:test"
+         )
+         self.assert_pcs_success(
+             "resource create A ocf:pacemaker:Dummy bundle B1 --no-default-ops"
+@@ -526,7 +538,8 @@ class BundleShow(TestCase, AssertPcsMixin):
+         self.assert_pcs_success(
+             """
+                 resource bundle create B1
+-                container image=pcs:test masters=2 replicas=4 options='a b c'
++                container docker image=pcs:test masters=2 replicas=4
++                    options='a b c'
+                 network host-interface=eth0 host-netmask=24 control-port=12345
+                 port-map id=B1-port-map-1001 internal-port=2002 port=2000
+                 port-map range=3000-3300
+diff --git a/pcs/test/cib_resource/test_create.py b/pcs/test/cib_resource/test_create.py
+index 2adef5a..2492ba9 100644
+--- a/pcs/test/cib_resource/test_create.py
++++ b/pcs/test/cib_resource/test_create.py
+@@ -888,7 +888,7 @@ class Bundle(ResourceTest):
+ 
+     def fixture_bundle(self, name):
+         self.assert_pcs_success(
+-            "resource bundle create {0} container image=pcs:test".format(
++            "resource bundle create {0} container docker image=pcs:test".format(
+                 name
+             )
+         )
+diff --git a/pcs/test/test_constraints.py b/pcs/test/test_constraints.py
+index 69d955d..4160b01 100644
+--- a/pcs/test/test_constraints.py
++++ b/pcs/test/test_constraints.py
+@@ -3246,7 +3246,7 @@ class Bundle(ConstraintEffect):
+ 
+     def fixture_bundle(self, name):
+         self.assert_pcs_success(
+-            "resource bundle create {0} container image=pcs:test".format(
++            "resource bundle create {0} container docker image=pcs:test".format(
+                 name
+             )
+         )
+diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
+index 4bdc194..c015fa4 100644
+--- a/pcs/test/test_resource.py
++++ b/pcs/test/test_resource.py
+@@ -4710,7 +4710,7 @@ class BundleCommon(
+ 
+     def fixture_bundle(self, name):
+         self.assert_pcs_success(
+-            "resource bundle create {0} container image=pcs:test".format(
++            "resource bundle create {0} container docker image=pcs:test".format(
+                 name
+             )
+         )
+diff --git a/pcs/usage.py b/pcs/usage.py
+index 75cb118..9cbf7de 100644
+--- a/pcs/usage.py
++++ b/pcs/usage.py
+@@ -428,13 +428,12 @@ Commands:
+         If 'n' is not specified it defaults to 60 minutes.
+         Note: to remove a master you must remove the resource/group it contains.
+ 
+-    bundle create <bundle id> [container [<container type>] <container options>]
++    bundle create <bundle id> container <container type> [<container options>]
+             [network <network options>] [port-map <port options>]...
+             [storage-map <storage options>]... [meta <meta options>]
+             [--disabled] [--wait[=n]]
+         Create a new bundle encapsulating no resources. The bundle can be used
+         either as it is or a resource may be put into it at any time.
+-        If the container type is not specified, it defaults to 'docker'.
+         If --disabled is specified, the bundle is not started automatically.
+         If --wait is specified, pcs will wait up to 'n' seconds for the bundle
+         to start and then return 0 on success or 1 on error. If 'n' is not
+-- 
+1.8.3.1
+
diff --git a/SOURCES/bz1447910-01-bundle-resources-are-missing-meta-attributes.patch b/SOURCES/bz1447910-01-bundle-resources-are-missing-meta-attributes.patch
new file mode 100644
index 0000000..7bf8e65
--- /dev/null
+++ b/SOURCES/bz1447910-01-bundle-resources-are-missing-meta-attributes.patch
@@ -0,0 +1,2488 @@
+From db1a118ed0d36633c67513961b479f8fae3cc2b9 Mon Sep 17 00:00:00 2001
+From: Ivan Devat <idevat@redhat.com>
+Date: Thu, 15 Jun 2017 11:46:12 +0200
+Subject: [PATCH] squash bz1447910 bundle resources are missing meta
+
+d21dd0e6b4d3 make resource enable | disable work with bundles
+
+27d46c115210 make resource manage | unmanage work with bundles
+
+c963cdcd321b show bundles' meta attributes in resources listing
+
+f1923af76d73 support meta attributes in 'resource bundle create'
+
+e09015ee868a support meta attributes in 'resource bundle update'
+
+c6e70a38346a stop bundles when deleting them
+---
+ pcs/cli/resource/parse_args.py                     |   4 +-
+ pcs/cli/resource/test/test_parse_args.py           |  70 ++++++++
+ pcs/lib/cib/nvpair.py                              |  12 +-
+ pcs/lib/cib/resource/bundle.py                     |  17 +-
+ pcs/lib/cib/resource/common.py                     |  40 +++--
+ pcs/lib/cib/test/test_nvpair.py                    |  42 +++++
+ pcs/lib/cib/test/test_resource_common.py           |  16 +-
+ pcs/lib/cib/tools.py                               |  10 +-
+ pcs/lib/commands/resource.py                       |  86 +++++++---
+ pcs/lib/commands/test/resource/fixture.py          |   2 +-
+ .../commands/test/resource/test_bundle_create.py   | 179 +++++++++++++++----
+ .../commands/test/resource/test_bundle_update.py   | 102 ++++++++++-
+ .../test/resource/test_resource_enable_disable.py  |  93 ++++++++--
+ .../test/resource/test_resource_manage_unmanage.py | 189 +++++++++++++++++++--
+ pcs/lib/pacemaker/state.py                         |  40 ++++-
+ pcs/lib/pacemaker/test/test_state.py               | 108 +++++++++++-
+ pcs/pcs.8                                          |   6 +-
+ pcs/resource.py                                    |  99 ++++++++---
+ pcs/test/cib_resource/test_bundle.py               |  67 ++++++++
+ pcs/test/cib_resource/test_manage_unmanage.py      |   5 +-
+ pcs/test/test_resource.py                          |  40 +++--
+ pcs/usage.py                                       |   9 +-
+ 22 files changed, 1055 insertions(+), 181 deletions(-)
+
+diff --git a/pcs/cli/resource/parse_args.py b/pcs/cli/resource/parse_args.py
+index 19ee8f9..366acac 100644
+--- a/pcs/cli/resource/parse_args.py
++++ b/pcs/cli/resource/parse_args.py
+@@ -58,7 +58,7 @@ def parse_create(arg_list):
+ 
+ def _parse_bundle_groups(arg_list):
+     repeatable_keyword_list = ["port-map", "storage-map"]
+-    keyword_list = ["container", "network"] + repeatable_keyword_list
++    keyword_list = ["meta", "container", "network"] + repeatable_keyword_list
+     groups = group_by_keywords(
+         arg_list,
+         set(keyword_list),
+@@ -99,6 +99,7 @@ def parse_bundle_create_options(arg_list):
+             prepare_options(storage_map)
+             for storage_map in groups.get("storage-map", [])
+         ],
++        "meta": prepare_options(groups.get("meta", []))
+     }
+     if not parts["container_type"]:
+         parts["container_type"] = "docker"
+@@ -144,6 +145,7 @@ def parse_bundle_update_options(arg_list):
+         "port_map_remove": port_map["remove"],
+         "storage_map_add": storage_map["add"],
+         "storage_map_remove": storage_map["remove"],
++        "meta": prepare_options(groups.get("meta", []))
+     }
+     return parts
+ 
+diff --git a/pcs/cli/resource/test/test_parse_args.py b/pcs/cli/resource/test/test_parse_args.py
+index 5033ec7..0c936cc 100644
+--- a/pcs/cli/resource/test/test_parse_args.py
++++ b/pcs/cli/resource/test/test_parse_args.py
+@@ -220,6 +220,7 @@ class ParseBundleCreateOptions(TestCase):
+                 "network": {},
+                 "port_map": [],
+                 "storage_map": [],
++                "meta": {},
+             }
+         )
+ 
+@@ -235,6 +236,7 @@ class ParseBundleCreateOptions(TestCase):
+                 "network": {},
+                 "port_map": [],
+                 "storage_map": [],
++                "meta": {},
+             }
+         )
+ 
+@@ -247,6 +249,7 @@ class ParseBundleCreateOptions(TestCase):
+                 "network": {},
+                 "port_map": [],
+                 "storage_map": [],
++                "meta": {},
+             }
+         )
+ 
+@@ -259,6 +262,7 @@ class ParseBundleCreateOptions(TestCase):
+                 "network": {},
+                 "port_map": [],
+                 "storage_map": [],
++                "meta": {},
+             }
+         )
+ 
+@@ -280,6 +284,7 @@ class ParseBundleCreateOptions(TestCase):
+                 "network": {"a": "b", "c": "d"},
+                 "port_map": [],
+                 "storage_map": [],
++                "meta": {},
+             }
+         )
+ 
+@@ -309,6 +314,7 @@ class ParseBundleCreateOptions(TestCase):
+                 "network": {},
+                 "port_map": [{"a": "b", "c": "d"}],
+                 "storage_map": [],
++                "meta": {},
+             }
+         )
+ 
+@@ -321,6 +327,7 @@ class ParseBundleCreateOptions(TestCase):
+                 "network": {},
+                 "port_map": [{"a": "b", "c": "d"}, {"e": "f"}],
+                 "storage_map": [],
++                "meta": {},
+             }
+         )
+ 
+@@ -347,6 +354,7 @@ class ParseBundleCreateOptions(TestCase):
+                 "network": {},
+                 "port_map": [],
+                 "storage_map": [{"a": "b", "c": "d"}],
++                "meta": {},
+             }
+         )
+ 
+@@ -359,6 +367,7 @@ class ParseBundleCreateOptions(TestCase):
+                 "network": {},
+                 "port_map": [],
+                 "storage_map": [{"a": "b", "c": "d"}, {"e": "f"}],
++                "meta": {},
+             }
+         )
+ 
+@@ -368,6 +377,28 @@ class ParseBundleCreateOptions(TestCase):
+     def test_storage_map_missing_key(self):
+         self.assert_raises_cmdline(["storage-map", "=b", "c=d"])
+ 
++    def test_meta(self):
++        self.assert_produce(
++            ["meta", "a=b", "c=d"],
++            {
++                "container_type": "docker",
++                "container": {},
++                "network": {},
++                "port_map": [],
++                "storage_map": [],
++                "meta": {"a": "b", "c": "d"},
++            }
++        )
++
++    def test_meta_empty(self):
++        self.assert_raises_cmdline(["meta"])
++
++    def test_meta_missing_value(self):
++        self.assert_raises_cmdline(["meta", "a", "c=d"])
++
++    def test_meta_missing_key(self):
++        self.assert_raises_cmdline(["meta", "=b", "c=d"])
++
+     def test_all(self):
+         self.assert_produce(
+             [
+@@ -377,6 +408,7 @@ class ParseBundleCreateOptions(TestCase):
+                 "port-map", "m=n", "o=p",
+                 "storage-map", "q=r", "s=t",
+                 "storage-map", "u=v", "w=x",
++                "meta", "y=z", "A=B",
+             ],
+             {
+                 "container_type": "lxc",
+@@ -384,6 +416,7 @@ class ParseBundleCreateOptions(TestCase):
+                 "network": {"e": "f", "g": "h"},
+                 "port_map": [{"i": "j", "k": "l"}, {"m": "n", "o": "p"}],
+                 "storage_map": [{"q": "r", "s": "t"}, {"u": "v", "w": "x"}],
++                "meta": {"y": "z", "A": "B"},
+             }
+         )
+ 
+@@ -391,11 +424,13 @@ class ParseBundleCreateOptions(TestCase):
+         self.assert_produce(
+             [
+                 "storage-map", "q=r", "s=t",
++                "meta", "y=z",
+                 "port-map", "i=j", "k=l",
+                 "network", "e=f",
+                 "container", "lxc", "a=b",
+                 "storage-map", "u=v", "w=x",
+                 "port-map", "m=n", "o=p",
++                "meta", "A=B",
+                 "network", "g=h",
+                 "container", "c=d",
+             ],
+@@ -405,6 +440,7 @@ class ParseBundleCreateOptions(TestCase):
+                 "network": {"e": "f", "g": "h"},
+                 "port_map": [{"i": "j", "k": "l"}, {"m": "n", "o": "p"}],
+                 "storage_map": [{"q": "r", "s": "t"}, {"u": "v", "w": "x"}],
++                "meta": {"y": "z", "A": "B"},
+             }
+         )
+ 
+@@ -432,6 +468,7 @@ class ParseBundleUpdateOptions(TestCase):
+                 "port_map_remove": [],
+                 "storage_map_add": [],
+                 "storage_map_remove": [],
++                "meta": {},
+             }
+         )
+ 
+@@ -445,6 +482,7 @@ class ParseBundleUpdateOptions(TestCase):
+                 "port_map_remove": [],
+                 "storage_map_add": [],
+                 "storage_map_remove": [],
++                "meta": {},
+             }
+         )
+ 
+@@ -467,6 +505,7 @@ class ParseBundleUpdateOptions(TestCase):
+                 "port_map_remove": [],
+                 "storage_map_add": [],
+                 "storage_map_remove": [],
++                "meta": {},
+             }
+         )
+ 
+@@ -519,6 +558,7 @@ class ParseBundleUpdateOptions(TestCase):
+                 "port_map_remove": ["c", "d", "i"],
+                 "storage_map_add": [],
+                 "storage_map_remove": [],
++                "meta": {},
+             }
+         )
+ 
+@@ -562,9 +602,34 @@ class ParseBundleUpdateOptions(TestCase):
+                     {"e": "f", "g": "h",},
+                 ],
+                 "storage_map_remove": ["c", "d", "i"],
++                "meta": {},
++            }
++        )
++
++    def test_meta(self):
++        self.assert_produce(
++            ["meta", "a=b", "c=d"],
++            {
++                "container": {},
++                "network": {},
++                "port_map_add": [],
++                "port_map_remove": [],
++                "storage_map_add": [],
++                "storage_map_remove": [],
++                "meta": {"a": "b", "c": "d"},
+             }
+         )
+ 
++    def test_meta_empty(self):
++        self.assert_raises_cmdline(["meta"])
++
++    def test_meta_missing_value(self):
++        self.assert_raises_cmdline(["meta", "a", "c=d"])
++
++    def test_meta_missing_key(self):
++        self.assert_raises_cmdline(["meta", "=b", "c=d"])
++
++
+     def test_all(self):
+         self.assert_produce(
+             [
+@@ -578,6 +643,7 @@ class ParseBundleUpdateOptions(TestCase):
+                 "storage-map", "add", "v=w",
+                 "storage-map", "remove", "x", "y",
+                 "storage-map", "remove", "z",
++                "meta", "A=B", "C=D",
+             ],
+             {
+                 "container": {"a": "b", "c": "d"},
+@@ -592,6 +658,7 @@ class ParseBundleUpdateOptions(TestCase):
+                     {"v": "w"},
+                 ],
+                 "storage_map_remove": ["x", "y", "z"],
++                "meta": {"A": "B", "C": "D"},
+             }
+         )
+ 
+@@ -599,11 +666,13 @@ class ParseBundleUpdateOptions(TestCase):
+         self.assert_produce(
+             [
+                 "storage-map", "remove", "x", "y",
++                "meta", "A=B",
+                 "port-map", "remove", "o", "p",
+                 "network", "e=f", "g=h",
+                 "storage-map", "add", "r=s", "t=u",
+                 "port-map", "add", "i=j", "k=l",
+                 "container", "a=b", "c=d",
++                "meta", "C=D",
+                 "port-map", "remove", "q",
+                 "storage-map", "remove", "z",
+                 "storage-map", "add", "v=w",
+@@ -622,6 +691,7 @@ class ParseBundleUpdateOptions(TestCase):
+                     {"v": "w"},
+                 ],
+                 "storage_map_remove": ["x", "y", "z"],
++                "meta": {"A": "B", "C": "D"},
+             }
+         )
+ 
+diff --git a/pcs/lib/cib/nvpair.py b/pcs/lib/cib/nvpair.py
+index 261d17c..d3f5a5c 100644
+--- a/pcs/lib/cib/nvpair.py
++++ b/pcs/lib/cib/nvpair.py
+@@ -11,18 +11,19 @@ from functools import partial
+ from pcs.lib.cib.tools import create_subelement_id
+ from pcs.lib.xml_tools import get_sub_element
+ 
+-def _append_new_nvpair(nvset_element, name, value):
++def _append_new_nvpair(nvset_element, name, value, id_provider=None):
+     """
+     Create nvpair with name and value as subelement of nvset_element.
+ 
+     etree.Element nvset_element is context of new nvpair
+     string name is name attribute of new nvpair
+     string value is value attribute of new nvpair
++    IdProvider id_provider -- elements' ids generator
+     """
+     etree.SubElement(
+         nvset_element,
+         "nvpair",
+-        id=create_subelement_id(nvset_element, name),
++        id=create_subelement_id(nvset_element, name, id_provider),
+         name=name,
+         value=value
+     )
+@@ -73,7 +74,7 @@ def arrange_first_nvset(tag_name, context_element, nvpair_dict):
+ 
+     update_nvset(nvset_element, nvpair_dict)
+ 
+-def append_new_nvset(tag_name, context_element, nvpair_dict):
++def append_new_nvset(tag_name, context_element, nvpair_dict, id_provider=None):
+     """
+     Append new nvset_element comprising nvpairs children (corresponding
+     nvpair_dict) to the context_element
+@@ -81,12 +82,13 @@ def append_new_nvset(tag_name, context_element, nvpair_dict):
+     string tag_name should be "instance_attributes" or "meta_attributes"
+     etree.Element context_element is element where new nvset will be appended
+     dict nvpair_dict contains source for nvpair children
++    IdProvider id_provider -- elements' ids generator
+     """
+     nvset_element = etree.SubElement(context_element, tag_name, {
+-        "id": create_subelement_id(context_element, tag_name)
++        "id": create_subelement_id(context_element, tag_name, id_provider)
+     })
+     for name, value in sorted(nvpair_dict.items()):
+-        _append_new_nvpair(nvset_element, name, value)
++        _append_new_nvpair(nvset_element, name, value, id_provider)
+ 
+ append_new_instance_attributes = partial(
+     append_new_nvset,
+diff --git a/pcs/lib/cib/resource/bundle.py b/pcs/lib/cib/resource/bundle.py
+index 0fe16f3..8a49c28 100644
+--- a/pcs/lib/cib/resource/bundle.py
++++ b/pcs/lib/cib/resource/bundle.py
+@@ -9,6 +9,10 @@ from lxml import etree
+ 
+ from pcs.common import report_codes
+ from pcs.lib import reports, validate
++from pcs.lib.cib.nvpair import (
++    append_new_meta_attributes,
++    arrange_first_meta_attributes,
++)
+ from pcs.lib.cib.resource.primitive import TAG as TAG_PRIMITIVE
+ from pcs.lib.cib.tools import find_element_by_tag_and_id
+ from pcs.lib.errors import (
+@@ -96,7 +100,7 @@ def validate_new(
+ 
+ def append_new(
+     parent_element, id_provider, bundle_id, container_type, container_options,
+-    network_options, port_map, storage_map
++    network_options, port_map, storage_map, meta_attributes
+ ):
+     """
+     Create new bundle and add it to the CIB
+@@ -109,6 +113,7 @@ def append_new(
+     dict network_options -- network options
+     list of dict port_map -- list of port mapping options
+     list of dict storage_map -- list of storage mapping options
++    dict meta_attributes -- meta attributes
+     """
+     bundle_element = etree.SubElement(parent_element, TAG, {"id": bundle_id})
+     # TODO create the proper element once more container_types are supported
+@@ -132,6 +137,8 @@ def append_new(
+         _append_storage_map(
+             storage_element, id_provider, bundle_id, storage_map_options
+         )
++    if meta_attributes:
++        append_new_meta_attributes(bundle_element, meta_attributes, id_provider)
+     return bundle_element
+ 
+ def validate_update(
+@@ -203,7 +210,8 @@ def validate_update(
+ 
+ def update(
+     id_provider, bundle_el, container_options, network_options,
+-    port_map_add, port_map_remove, storage_map_add, storage_map_remove
++    port_map_add, port_map_remove, storage_map_add, storage_map_remove,
++    meta_attributes
+ ):
+     """
+     Modify an existing bundle (does not touch encapsulated resources)
+@@ -216,6 +224,7 @@ def update(
+     list of string port_map_remove -- list of port mapping ids to remove
+     list of dict storage_map_add -- list of storage mapping options to add
+     list of string storage_map_remove -- list of storage mapping ids to remove
++    dict meta_attributes -- meta attributes to update
+     """
+     bundle_id = bundle_el.get("id")
+     update_attributes_remove_empty(
+@@ -253,7 +262,11 @@ def update(
+             storage_element, id_provider, bundle_id, storage_map_options
+         )
+ 
++    if meta_attributes:
++        arrange_first_meta_attributes(bundle_el, meta_attributes)
++
+     # remove empty elements with no attributes
++    # meta attributes are handled in their own function
+     for element in (network_element, storage_element):
+         if len(element) < 1 and not element.attrib:
+             element.getparent().remove(element)
+diff --git a/pcs/lib/cib/resource/common.py b/pcs/lib/cib/resource/common.py
+index f9028ff..0e52b4c 100644
+--- a/pcs/lib/cib/resource/common.py
++++ b/pcs/lib/cib/resource/common.py
+@@ -58,16 +58,18 @@ def find_resources_to_enable(resource_el):
+     etree resource_el -- resource element
+     """
+     if is_bundle(resource_el):
+-        # bundles currently cannot be disabled - pcmk does not support that
+-        # inner resources are supposed to be managed separately
+-        return []
++        to_enable = [resource_el]
++        in_bundle = get_bundle_inner_resource(resource_el)
++        if in_bundle is not None:
++            to_enable.append(in_bundle)
++        return to_enable
+ 
+     if is_any_clone(resource_el):
+         return [resource_el, get_clone_inner_resource(resource_el)]
+ 
+     to_enable = [resource_el]
+     parent = resource_el.getparent()
+-    if is_any_clone(parent):
++    if is_any_clone(parent) or is_bundle(parent):
+         to_enable.append(parent)
+     return to_enable
+ 
+@@ -109,20 +111,25 @@ def find_resources_to_manage(resource_el):
+     # put there manually. If we didn't do it, the resource may stay unmanaged,
+     # as a managed primitive in an unmanaged clone / group is still unmanaged
+     # and vice versa.
+-    # Bundle resources cannot be set as unmanaged - pcmk currently doesn't
+-    # support that. Resources in a bundle are supposed to be treated separately.
+-    if is_bundle(resource_el):
+-        return []
+     res_id = resource_el.attrib["id"]
+     return (
+         [resource_el] # the resource itself
+         +
+         # its parents
+         find_parent(resource_el, "resources").xpath(
++            # a master or a clone which contains a group, a primitve, or a
++            # grouped primitive with the specified id
++            # OR
++            # a group (in a clone, master, etc. - hence //) which contains a
++            # primitive with the specified id
++            # OR
++            # a bundle which contains a primitive with the specified id
+             """
+                 (./master|./clone)[(group|group/primitive|primitive)[@id='{r}']]
+                 |
+                 //group[primitive[@id='{r}']]
++                |
++                ./bundle[primitive[@id='{r}']]
+             """
+             .format(r=res_id)
+         )
+@@ -164,10 +171,19 @@ def find_resources_to_unmanage(resource_el):
+     #   See clone notes above
+     #
+     # a bundled primitive - the primitive - the primitive
+-    # a bundled primitive - the bundle - nothing
+-    #  bundles currently cannot be set as unmanaged - pcmk does not support that
+-    # an empty bundle - the bundle - nothing
+-    #  bundles currently cannot be set as unmanaged - pcmk does not support that
++    # a bundled primitive - the bundle - the bundle and the primitive
++    #  We need to unmanage implicit resources create by pacemaker and there is
++    #  no other way to do it than unmanage the bundle itself.
++    #  Since it is not possible to unbundle a resource, the concers described
++    #  at unclone don't apply here. However to prevent future bugs, in case
++    #  unbundling becomes possible, we unmanage the primitive as well.
++    # an empty bundle - the bundle - the bundle
++    #  There is nothing else to unmanage.
++    if is_bundle(resource_el):
++        in_bundle = get_bundle_inner_resource(resource_el)
++        return (
++            [resource_el, in_bundle] if in_bundle is not None else [resource_el]
++        )
+     if is_any_clone(resource_el):
+         resource_el = get_clone_inner_resource(resource_el)
+     if is_group(resource_el):
+diff --git a/pcs/lib/cib/test/test_nvpair.py b/pcs/lib/cib/test/test_nvpair.py
+index 9b9d9b9..0f6d8f8 100644
+--- a/pcs/lib/cib/test/test_nvpair.py
++++ b/pcs/lib/cib/test/test_nvpair.py
+@@ -8,6 +8,7 @@ from __future__ import (
+ from lxml import etree
+ 
+ from pcs.lib.cib import nvpair
++from pcs.lib.cib.tools import IdProvider
+ from pcs.test.tools.assertions import assert_xml_equal
+ from pcs.test.tools.pcs_unittest import TestCase, mock
+ from pcs.test.tools.xml import etree_to_str
+@@ -25,6 +26,21 @@ class AppendNewNvpair(TestCase):
+             """
+         )
+ 
++    def test_with_id_provider(self):
++        nvset_element = etree.fromstring('<nvset id="a"/>')
++        provider = IdProvider(nvset_element)
++        provider.book_ids("a-b")
++        nvpair._append_new_nvpair(nvset_element, "b", "c", provider)
++        assert_xml_equal(
++            etree_to_str(nvset_element),
++            """
++            <nvset id="a">
++                <nvpair id="a-b-1" name="b" value="c"></nvpair>
++            </nvset>
++            """
++        )
++
++
+ class UpdateNvsetTest(TestCase):
+     @mock.patch(
+         "pcs.lib.cib.nvpair.create_subelement_id",
+@@ -167,6 +183,32 @@ class AppendNewNvsetTest(TestCase):
+             etree_to_str(context_element)
+         )
+ 
++    def test_with_id_provider(self):
++        context_element = etree.fromstring('<context id="a"/>')
++        provider = IdProvider(context_element)
++        provider.book_ids("a-instance_attributes", "a-instance_attributes-1-a")
++        nvpair.append_new_nvset(
++            "instance_attributes",
++            context_element,
++            {
++                "a": "b",
++                "c": "d",
++            },
++            provider
++        )
++        assert_xml_equal(
++            """
++                <context id="a">
++                    <instance_attributes id="a-instance_attributes-1">
++                        <nvpair id="a-instance_attributes-1-a-1" name="a" value="b"/>
++                        <nvpair id="a-instance_attributes-1-c" name="c" value="d"/>
++                    </instance_attributes>
++                </context>
++            """,
++            etree_to_str(context_element)
++        )
++
++
+ class ArrangeFirstNvsetTest(TestCase):
+     def setUp(self):
+         self.root = etree.Element("root", id="root")
+diff --git a/pcs/lib/cib/test/test_resource_common.py b/pcs/lib/cib/test/test_resource_common.py
+index 52c2329..6b485f7 100644
+--- a/pcs/lib/cib/test/test_resource_common.py
++++ b/pcs/lib/cib/test/test_resource_common.py
+@@ -180,7 +180,7 @@ class FindResourcesToEnable(TestCase):
+         self.assert_find_resources("F2", ["F2"])
+ 
+     def test_primitive_in_bundle(self):
+-        self.assert_find_resources("H", ["H"])
++        self.assert_find_resources("H", ["H", "H-bundle"])
+ 
+     def test_group(self):
+         self.assert_find_resources("D", ["D"])
+@@ -204,10 +204,10 @@ class FindResourcesToEnable(TestCase):
+         self.assert_find_resources("F-master", ["F-master", "F"])
+ 
+     def test_bundle_empty(self):
+-        self.assert_find_resources("G-bundle", [])
++        self.assert_find_resources("G-bundle", ["G-bundle"])
+ 
+     def test_bundle_with_primitive(self):
+-        self.assert_find_resources("H-bundle", [])
++        self.assert_find_resources("H-bundle", ["H-bundle", "H"])
+ 
+ 
+ class Enable(TestCase):
+@@ -360,7 +360,7 @@ class FindResourcesToManage(TestCase):
+         self.assert_find_resources("F2", ["F2", "F-master", "F"])
+ 
+     def test_primitive_in_bundle(self):
+-        self.assert_find_resources("H", ["H"])
++        self.assert_find_resources("H", ["H", "H-bundle"])
+ 
+     def test_group(self):
+         self.assert_find_resources("D", ["D", "D1", "D2"])
+@@ -384,10 +384,10 @@ class FindResourcesToManage(TestCase):
+         self.assert_find_resources("F-master", ["F-master", "F", "F1", "F2"])
+ 
+     def test_bundle_empty(self):
+-        self.assert_find_resources("G-bundle", [])
++        self.assert_find_resources("G-bundle", ["G-bundle"])
+ 
+     def test_bundle_with_primitive(self):
+-        self.assert_find_resources("H-bundle", [])
++        self.assert_find_resources("H-bundle", ["H-bundle", "H"])
+ 
+ 
+ class FindResourcesToUnmanage(TestCase):
+@@ -447,10 +447,10 @@ class FindResourcesToUnmanage(TestCase):
+         self.assert_find_resources("F-master", ["F1", "F2"])
+ 
+     def test_bundle_empty(self):
+-        self.assert_find_resources("G-bundle", [])
++        self.assert_find_resources("G-bundle", ["G-bundle"])
+ 
+     def test_bundle_with_primitive(self):
+-        self.assert_find_resources("H-bundle", [])
++        self.assert_find_resources("H-bundle", ["H-bundle", "H"])
+ 
+ 
+ class Manage(TestCase):
+diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
+index 2308a42..cf91125 100644
+--- a/pcs/lib/cib/tools.py
++++ b/pcs/lib/cib/tools.py
+@@ -177,11 +177,11 @@ def find_element_by_tag_and_id(
+         )
+     )
+ 
+-def create_subelement_id(context_element, suffix):
+-    return find_unique_id(
+-        context_element,
+-        "{0}-{1}".format(context_element.get("id"), suffix)
+-    )
++def create_subelement_id(context_element, suffix, id_provider=None):
++    proposed_id = "{0}-{1}".format(context_element.get("id"), suffix)
++    if id_provider:
++        return id_provider.allocate_id(proposed_id)
++    return find_unique_id(context_element, proposed_id)
+ 
+ def check_new_id_applicable(tree, description, id):
+     validate_id(id, description)
+diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py
+index 3a060b8..0c5f682 100644
+--- a/pcs/lib/commands/resource.py
++++ b/pcs/lib/commands/resource.py
+@@ -22,6 +22,7 @@ from pcs.lib.errors import LibraryError
+ from pcs.lib.pacemaker.values import validate_id
+ from pcs.lib.pacemaker.state import (
+     ensure_resource_state,
++    info_resource_state,
+     is_resource_managed,
+     ResourceNotFound,
+ )
+@@ -31,7 +32,10 @@ from pcs.lib.resource_agent import(
+ 
+ @contextmanager
+ def resource_environment(
+-    env, wait=False, wait_for_resource_ids=None, disabled_after_wait=False,
++    env,
++    wait=False,
++    wait_for_resource_ids=None,
++    resource_state_reporter=info_resource_state,
+     required_cib_version=None
+ ):
+     env.ensure_wait_satisfiable(wait)
+@@ -41,10 +45,19 @@ def resource_environment(
+     if wait is not False and wait_for_resource_ids:
+         state = env.get_cluster_state()
+         env.report_processor.process_list([
+-            ensure_resource_state(not disabled_after_wait, state, res_id)
++            resource_state_reporter(state, res_id)
+             for res_id in wait_for_resource_ids
+         ])
+ 
++def _ensure_disabled_after_wait(disabled_after_wait):
++    def inner(state, resource_id):
++        return ensure_resource_state(
++            not disabled_after_wait,
++            state,
++            resource_id
++        )
++    return inner
++
+ def _validate_remote_connection(
+     resource_agent, nodes_to_validate_against, resource_id, instance_attributes,
+     allow_not_suitable_command
+@@ -195,7 +208,11 @@ def create(
+         env,
+         wait,
+         [resource_id],
+-        ensure_disabled or resource.common.are_meta_disabled(meta_attributes),
++        _ensure_disabled_after_wait(
++            ensure_disabled
++            or
++            resource.common.are_meta_disabled(meta_attributes)
++        )
+     ) as resources_section:
+         _check_special_cases(
+             env,
+@@ -269,7 +286,7 @@ def _create_as_clone_common(
+         env,
+         wait,
+         [resource_id],
+-        (
++        _ensure_disabled_after_wait(
+             ensure_disabled
+             or
+             resource.common.are_meta_disabled(meta_attributes)
+@@ -353,7 +370,11 @@ def create_in_group(
+         env,
+         wait,
+         [resource_id],
+-        ensure_disabled or resource.common.are_meta_disabled(meta_attributes),
++        _ensure_disabled_after_wait(
++            ensure_disabled
++            or
++            resource.common.are_meta_disabled(meta_attributes)
++        )
+     ) as resources_section:
+         _check_special_cases(
+             env,
+@@ -433,7 +454,11 @@ def create_into_bundle(
+         env,
+         wait,
+         [resource_id],
+-        disabled_after_wait=ensure_disabled,
++        _ensure_disabled_after_wait(
++            ensure_disabled
++            or
++            resource.common.are_meta_disabled(meta_attributes)
++        ),
+         required_cib_version=(2, 8, 0)
+     ) as resources_section:
+         _check_special_cases(
+@@ -465,8 +490,9 @@ def create_into_bundle(
+ 
+ def bundle_create(
+     env, bundle_id, container_type, container_options=None,
+-    network_options=None, port_map=None, storage_map=None,
++    network_options=None, port_map=None, storage_map=None, meta_attributes=None,
+     force_options=False,
++    ensure_disabled=False,
+     wait=False,
+ ):
+     """
+@@ -477,24 +503,32 @@ def bundle_create(
+     string container_type -- container engine name (docker, lxc...)
+     dict container_options -- container options
+     dict network_options -- network options
+-    list of dict port_map -- list of port mapping options
+-    list of dict storage_map -- list of storage mapping options
++    list of dict port_map -- a list of port mapping options
++    list of dict storage_map -- a list of storage mapping options
++    dict meta_attributes -- bundle's meta attributes
+     bool force_options -- return warnings instead of forceable errors
++    bool ensure_disabled -- set the bundle's target-role to "Stopped"
+     mixed wait -- False: no wait, None: wait default timeout, int: wait timeout
+     """
+     container_options = container_options or {}
+     network_options = network_options or {}
+     port_map = port_map or []
+     storage_map = storage_map or []
++    meta_attributes = meta_attributes or {}
+ 
+     with resource_environment(
+         env,
+         wait,
+         [bundle_id],
+-        # bundles are always enabled, currently there is no way to disable them
+-        disabled_after_wait=False,
++        _ensure_disabled_after_wait(
++            ensure_disabled
++            or
++            resource.common.are_meta_disabled(meta_attributes)
++        ),
+         required_cib_version=(2, 8, 0)
+     ) as resources_section:
++        # no need to run validations related to remote and guest nodes as those
++        # nodes can only be created from primitive resources
+         id_provider = IdProvider(resources_section)
+         env.report_processor.process_list(
+             resource.bundle.validate_new(
+@@ -505,10 +539,11 @@ def bundle_create(
+                 network_options,
+                 port_map,
+                 storage_map,
++                # TODO meta attributes - there is no validation for now
+                 force_options
+             )
+         )
+-        resource.bundle.append_new(
++        bundle_element = resource.bundle.append_new(
+             resources_section,
+             id_provider,
+             bundle_id,
+@@ -516,13 +551,16 @@ def bundle_create(
+             container_options,
+             network_options,
+             port_map,
+-            storage_map
++            storage_map,
++            meta_attributes
+         )
++        if ensure_disabled:
++            resource.common.disable(bundle_element)
+ 
+ def bundle_update(
+     env, bundle_id, container_options=None, network_options=None,
+     port_map_add=None, port_map_remove=None, storage_map_add=None,
+-    storage_map_remove=None,
++    storage_map_remove=None, meta_attributes=None,
+     force_options=False,
+     wait=False,
+ ):
+@@ -537,6 +575,7 @@ def bundle_update(
+     list of string port_map_remove -- list of port mapping ids to remove
+     list of dict storage_map_add -- list of storage mapping options to add
+     list of string storage_map_remove -- list of storage mapping ids to remove
++    dict meta_attributes -- meta attributes to update
+     bool force_options -- return warnings instead of forceable errors
+     mixed wait -- False: no wait, None: wait default timeout, int: wait timeout
+     """
+@@ -546,15 +585,16 @@ def bundle_update(
+     port_map_remove = port_map_remove or []
+     storage_map_add = storage_map_add or []
+     storage_map_remove = storage_map_remove or []
++    meta_attributes = meta_attributes or {}
+ 
+     with resource_environment(
+         env,
+         wait,
+         [bundle_id],
+-        # bundles are always enabled, currently there is no way to disable them
+-        disabled_after_wait=False,
+         required_cib_version=(2, 8, 0)
+     ) as resources_section:
++        # no need to run validations related to remote and guest nodes as those
++        # nodes can only be created from primitive resources
+         id_provider = IdProvider(resources_section)
+         bundle_element = find_element_by_tag_and_id(
+             resource.bundle.TAG,
+@@ -571,6 +611,7 @@ def bundle_update(
+                 port_map_remove,
+                 storage_map_add,
+                 storage_map_remove,
++                # TODO meta attributes - there is no validation for now
+                 force_options
+             )
+         )
+@@ -582,7 +623,8 @@ def bundle_update(
+             port_map_add,
+             port_map_remove,
+             storage_map_add,
+-            storage_map_remove
++            storage_map_remove,
++            meta_attributes
+         )
+ 
+ def disable(env, resource_ids, wait):
+@@ -593,7 +635,7 @@ def disable(env, resource_ids, wait):
+     mixed wait -- False: no wait, None: wait default timeout, int: wait timeout
+     """
+     with resource_environment(
+-        env, wait, resource_ids, True
++        env, wait, resource_ids, _ensure_disabled_after_wait(True)
+     ) as resources_section:
+         resource_el_list = _find_resources_or_raise(
+             resources_section,
+@@ -615,7 +657,7 @@ def enable(env, resource_ids, wait):
+     mixed wait -- False: no wait, None: wait default timeout, int: wait timeout
+     """
+     with resource_environment(
+-        env, wait, resource_ids, False
++        env, wait, resource_ids, _ensure_disabled_after_wait(False)
+     ) as resources_section:
+         resource_el_list = _find_resources_or_raise(
+             resources_section,
+@@ -642,7 +684,7 @@ def _resource_list_enable_disable(resource_el_list, func, cluster_state):
+             report_list.append(
+                 reports.id_not_found(
+                     res_id,
+-                    id_description="resource/clone/master/group"
++                    id_description="resource/clone/master/group/bundle"
+                )
+             )
+     return report_list
+@@ -735,7 +777,7 @@ def _find_resources_or_raise(
+     resource_tags = (
+         resource.clone.ALL_TAGS
+         +
+-        [resource.group.TAG, resource.primitive.TAG]
++        [resource.group.TAG, resource.primitive.TAG, resource.bundle.TAG]
+     )
+     for res_id in resource_ids:
+         try:
+@@ -745,7 +787,7 @@ def _find_resources_or_raise(
+                         resource_tags,
+                         resources_section,
+                         res_id,
+-                        id_description="resource/clone/master/group"
++                        id_description="resource/clone/master/group/bundle"
+                     )
+                 )
+             )
+diff --git a/pcs/lib/commands/test/resource/fixture.py b/pcs/lib/commands/test/resource/fixture.py
+index f1fe09b..8d96dc9 100644
+--- a/pcs/lib/commands/test/resource/fixture.py
++++ b/pcs/lib/commands/test/resource/fixture.py
+@@ -145,7 +145,7 @@ def report_not_found(res_id, context_type=""):
+             "context_type": context_type,
+             "context_id": "",
+             "id": res_id,
+-            "id_description": "resource/clone/master/group",
++            "id_description": "resource/clone/master/group/bundle",
+         },
+         None
+     )
+diff --git a/pcs/lib/commands/test/resource/test_bundle_create.py b/pcs/lib/commands/test/resource/test_bundle_create.py
+index b9922d8..3bdeee9 100644
+--- a/pcs/lib/commands/test/resource/test_bundle_create.py
++++ b/pcs/lib/commands/test/resource/test_bundle_create.py
+@@ -40,7 +40,7 @@ class MinimalCreate(CommonTest):
+             self.fixture_cib_pre,
+             lambda: resource.bundle_create(
+                 self.env, "B1", "docker",
+-                {"image": "pcs:test", }
++                container_options={"image": "pcs:test", }
+             ),
+             self.fixture_resources_bundle_simple
+         )
+@@ -90,7 +90,7 @@ class MinimalCreate(CommonTest):
+ 
+         resource.bundle_create(
+             self.env, "B1", "docker",
+-            {"image": "pcs:test", }
++            container_options={"image": "pcs:test", }
+         )
+ 
+         self.env.report_processor.assert_reports([
+@@ -122,7 +122,7 @@ class CreateDocker(CommonTest):
+             self.fixture_cib_pre,
+             lambda: resource.bundle_create(
+                 self.env, "B1", "docker",
+-                {"image": "pcs:test", }
++                container_options={"image": "pcs:test", }
+             ),
+             self.fixture_resources_bundle_simple
+         )
+@@ -132,7 +132,7 @@ class CreateDocker(CommonTest):
+             self.fixture_cib_pre,
+             lambda: resource.bundle_create(
+                 self.env, "B1", "docker",
+-                {
++                container_options={
+                     "image": "pcs:test",
+                     "masters": "0",
+                     "network": "extra network settings",
+@@ -168,7 +168,7 @@ class CreateDocker(CommonTest):
+         assert_raise_library_error(
+             lambda: resource.bundle_create(
+                 self.env, "B1", "docker",
+-                {
++                container_options={
+                     "replicas-per-host": "0",
+                     "replicas": "0",
+                     "masters": "-1",
+@@ -226,7 +226,7 @@ class CreateDocker(CommonTest):
+         assert_raise_library_error(
+             lambda: resource.bundle_create(
+                 self.env, "B1", "docker",
+-                {
++                container_options={
+                     "image": "",
+                 },
+                 force_options=True
+@@ -253,7 +253,7 @@ class CreateDocker(CommonTest):
+         assert_raise_library_error(
+             lambda: resource.bundle_create(
+                 self.env, "B1", "docker",
+-                {
++                container_options={
+                     "image": "pcs:test",
+                     "extra": "option",
+                 }
+@@ -276,7 +276,7 @@ class CreateDocker(CommonTest):
+             self.fixture_cib_pre,
+             lambda: resource.bundle_create(
+                 self.env, "B1", "docker",
+-                {
++                container_options={
+                     "image": "pcs:test",
+                     "extra": "option",
+                 },
+@@ -932,13 +932,61 @@ class CreateWithStorageMap(CommonTest):
+         )
+ 
+ 
++class CreateWithMeta(CommonTest):
++    def test_success(self):
++        self.assert_command_effect(
++            self.fixture_cib_pre,
++            lambda: resource.bundle_create(
++                self.env, "B1", "docker",
++                container_options={"image": "pcs:test", },
++                meta_attributes={
++                    "target-role": "Stopped",
++                    "is-managed": "false",
++                }
++            ),
++            """
++                <resources>
++                    <bundle id="B1">
++                        <docker image="pcs:test" />
++                        <meta_attributes id="B1-meta_attributes">
++                            <nvpair id="B1-meta_attributes-is-managed"
++                                name="is-managed" value="false" />
++                            <nvpair id="B1-meta_attributes-target-role"
++                                name="target-role" value="Stopped" />
++                        </meta_attributes>
++                    </bundle>
++                </resources>
++            """
++        )
++
++    def test_disabled(self):
++        self.assert_command_effect(
++            self.fixture_cib_pre,
++            lambda: resource.bundle_create(
++                self.env, "B1", "docker",
++                container_options={"image": "pcs:test", },
++                ensure_disabled=True
++            ),
++            """
++                <resources>
++                    <bundle id="B1">
++                        <meta_attributes id="B1-meta_attributes">
++                            <nvpair id="B1-meta_attributes-target-role"
++                                name="target-role" value="Stopped" />
++                        </meta_attributes>
++                        <docker image="pcs:test" />
++                    </bundle>
++                </resources>
++            """
++        )
++
+ class CreateWithAllOptions(CommonTest):
+     def test_success(self):
+         self.assert_command_effect(
+             self.fixture_cib_pre,
+             lambda: resource.bundle_create(
+                 self.env, "B1", "docker",
+-                {
++                container_options={
+                     "image": "pcs:test",
+                     "masters": "0",
+                     "network": "extra network settings",
+@@ -947,13 +995,13 @@ class CreateWithAllOptions(CommonTest):
+                     "replicas": "4",
+                     "replicas-per-host": "2",
+                 },
+-                {
++                network_options={
+                     "control-port": "12345",
+                     "host-interface": "eth0",
+                     "host-netmask": "24",
+                     "ip-range-start": "192.168.100.200",
+                 },
+-                [
++                port_map=[
+                     {
+                         "port": "1001",
+                     },
+@@ -967,7 +1015,7 @@ class CreateWithAllOptions(CommonTest):
+                         "range": "3000-3300",
+                     },
+                 ],
+-                [
++                storage_map=[
+                     {
+                         "source-dir": "/tmp/docker1a",
+                         "target-dir": "/tmp/docker1b",
+@@ -1082,21 +1130,26 @@ class Wait(CommonTest):
+         </resources>
+     """
+ 
+-    timeout = 10
++    fixture_resources_bundle_simple_disabled = """
++        <resources>
++            <bundle id="B1">
++                <meta_attributes id="B1-meta_attributes">
++                    <nvpair id="B1-meta_attributes-target-role"
++                        name="target-role" value="Stopped" />
++                </meta_attributes>
++                <docker image="pcs:test" />
++            </bundle>
++        </resources>
++    """
+ 
+-    def fixture_calls_initial(self):
+-        return (
+-            fixture.call_wait_supported() +
+-            fixture.calls_cib(
+-                self.fixture_cib_pre,
+-                self.fixture_resources_bundle_simple,
+-                cib_base_file=self.cib_base_file,
+-            )
+-        )
++    timeout = 10
+ 
+-    def simple_bundle_create(self, wait=False):
++    def simple_bundle_create(self, wait=False, disabled=False):
+         return resource.bundle_create(
+-            self.env, "B1", "docker", {"image": "pcs:test"}, wait=wait,
++            self.env, "B1", "docker",
++            container_options={"image": "pcs:test"},
++            ensure_disabled=disabled,
++            wait=wait,
+         )
+ 
+     def test_wait_fail(self):
+@@ -1108,7 +1161,14 @@ class Wait(CommonTest):
+             """
+         )
+         self.runner.set_runs(
+-            self.fixture_calls_initial() +
++            fixture.call_wait_supported()
++            +
++            fixture.calls_cib(
++                self.fixture_cib_pre,
++                self.fixture_resources_bundle_simple,
++                cib_base_file=self.cib_base_file,
++            )
++            +
+             fixture.call_wait(self.timeout, 62, fixture_wait_timeout_error)
+         )
+         assert_raise_library_error(
+@@ -1122,8 +1182,16 @@ class Wait(CommonTest):
+     @skip_unless_pacemaker_supports_bundle
+     def test_wait_ok_run_ok(self):
+         self.runner.set_runs(
+-            self.fixture_calls_initial() +
+-            fixture.call_wait(self.timeout) +
++            fixture.call_wait_supported()
++            +
++            fixture.calls_cib(
++                self.fixture_cib_pre,
++                self.fixture_resources_bundle_simple,
++                cib_base_file=self.cib_base_file,
++            )
++            +
++            fixture.call_wait(self.timeout)
++            +
+             fixture.call_status(fixture.state_complete(
+                 self.fixture_status_running
+             ))
+@@ -1139,8 +1207,16 @@ class Wait(CommonTest):
+     @skip_unless_pacemaker_supports_bundle
+     def test_wait_ok_run_fail(self):
+         self.runner.set_runs(
+-            self.fixture_calls_initial() +
+-            fixture.call_wait(self.timeout) +
++            fixture.call_wait_supported()
++            +
++            fixture.calls_cib(
++                self.fixture_cib_pre,
++                self.fixture_resources_bundle_simple,
++                cib_base_file=self.cib_base_file,
++            )
++            +
++            fixture.call_wait(self.timeout)
++            +
+             fixture.call_status(fixture.state_complete(
+                 self.fixture_status_not_running
+             ))
+@@ -1150,3 +1226,48 @@ class Wait(CommonTest):
+             fixture.report_resource_not_running("B1", severities.ERROR),
+         )
+         self.runner.assert_everything_launched()
++
++    @skip_unless_pacemaker_supports_bundle
++    def test_disabled_wait_ok_run_ok(self):
++        self.runner.set_runs(
++            fixture.call_wait_supported()
++            +
++            fixture.calls_cib(
++                self.fixture_cib_pre,
++                self.fixture_resources_bundle_simple_disabled,
++                cib_base_file=self.cib_base_file,
++            )
++            +
++            fixture.call_wait(self.timeout)
++            +
++            fixture.call_status(fixture.state_complete(
++                self.fixture_status_not_running
++            ))
++        )
++        self.simple_bundle_create(self.timeout, disabled=True)
++        self.runner.assert_everything_launched()
++
++    @skip_unless_pacemaker_supports_bundle
++    def test_disabled_wait_ok_run_fail(self):
++        self.runner.set_runs(
++            fixture.call_wait_supported()
++            +
++            fixture.calls_cib(
++                self.fixture_cib_pre,
++                self.fixture_resources_bundle_simple_disabled,
++                cib_base_file=self.cib_base_file,
++            )
++            +
++            fixture.call_wait(self.timeout)
++            +
++            fixture.call_status(fixture.state_complete(
++                self.fixture_status_running
++            ))
++        )
++        assert_raise_library_error(
++            lambda: self.simple_bundle_create(self.timeout, disabled=True),
++            fixture.report_resource_running(
++                "B1", {"Started": ["node1", "node2"]}, severities.ERROR
++            )
++        )
++        self.runner.assert_everything_launched()
+diff --git a/pcs/lib/commands/test/resource/test_bundle_update.py b/pcs/lib/commands/test/resource/test_bundle_update.py
+index 55cfa7b..7a1ee49 100644
+--- a/pcs/lib/commands/test/resource/test_bundle_update.py
++++ b/pcs/lib/commands/test/resource/test_bundle_update.py
+@@ -709,6 +709,96 @@ class StorageMap(CommonTest):
+         self.runner.assert_everything_launched()
+ 
+ 
++class Meta(CommonTest):
++    fixture_no_meta = """
++        <resources>
++            <bundle id="B1">
++                <docker image="pcs:test" masters="3" replicas="6"/>
++            </bundle>
++        </resources>
++    """
++
++    fixture_meta_stopped = """
++        <resources>
++            <bundle id="B1">
++                <meta_attributes id="B1-meta_attributes">
++                <nvpair id="B1-meta_attributes-target-role"
++                    name="target-role" value="Stopped" />
++                </meta_attributes>
++                <docker image="pcs:test" masters="3" replicas="6"/>
++            </bundle>
++        </resources>
++    """
++
++    def test_add_meta_element(self):
++        self.assert_command_effect(
++            self.fixture_no_meta,
++            lambda: resource.bundle_update(
++                self.env, "B1",
++                meta_attributes={
++                    "target-role": "Stopped",
++                }
++            ),
++            self.fixture_meta_stopped
++        )
++
++    def test_remove_meta_element(self):
++        self.assert_command_effect(
++            self.fixture_meta_stopped,
++            lambda: resource.bundle_update(
++                self.env, "B1",
++                meta_attributes={
++                    "target-role": "",
++                }
++            ),
++            self.fixture_no_meta
++        )
++
++    def test_change_meta(self):
++        fixture_cib_pre = """
++            <resources>
++                <bundle id="B1">
++                    <meta_attributes id="B1-meta_attributes">
++                    <nvpair id="B1-meta_attributes-target-role"
++                        name="target-role" value="Stopped" />
++                    <nvpair id="B1-meta_attributes-priority"
++                        name="priority" value="15" />
++                    <nvpair id="B1-meta_attributes-is-managed"
++                        name="is-managed" value="false" />
++                    </meta_attributes>
++                    <docker image="pcs:test" masters="3" replicas="6"/>
++                </bundle>
++            </resources>
++        """
++        fixture_cib_post = """
++            <resources>
++                <bundle id="B1">
++                    <meta_attributes id="B1-meta_attributes">
++                    <nvpair id="B1-meta_attributes-target-role"
++                        name="target-role" value="Stopped" />
++                    <nvpair id="B1-meta_attributes-priority"
++                        name="priority" value="10" />
++                    <nvpair id="B1-meta_attributes-resource-stickiness"
++                        name="resource-stickiness" value="100" />
++                    </meta_attributes>
++                    <docker image="pcs:test" masters="3" replicas="6"/>
++                </bundle>
++            </resources>
++        """
++        self.assert_command_effect(
++            fixture_cib_pre,
++            lambda: resource.bundle_update(
++                self.env, "B1",
++                meta_attributes={
++                    "priority": "10",
++                    "resource-stickiness": "100",
++                    "is-managed": "",
++                }
++            ),
++            fixture_cib_post
++        )
++
++
+ class Wait(CommonTest):
+     fixture_status_running = """
+         <resources>
+@@ -794,7 +884,7 @@ class Wait(CommonTest):
+         self.runner.assert_everything_launched()
+ 
+     @skip_unless_pacemaker_supports_bundle
+-    def test_wait_ok_run_ok(self):
++    def test_wait_ok_running(self):
+         self.runner.set_runs(
+             self.fixture_calls_initial() +
+             fixture.call_wait(self.timeout) +
+@@ -811,7 +901,7 @@ class Wait(CommonTest):
+         self.runner.assert_everything_launched()
+ 
+     @skip_unless_pacemaker_supports_bundle
+-    def test_wait_ok_run_fail(self):
++    def test_wait_ok_not_running(self):
+         self.runner.set_runs(
+             self.fixture_calls_initial() +
+             fixture.call_wait(self.timeout) +
+@@ -819,8 +909,8 @@ class Wait(CommonTest):
+                 self.fixture_status_not_running
+             ))
+         )
+-        assert_raise_library_error(
+-            lambda: self.simple_bundle_update(self.timeout),
+-            fixture.report_resource_not_running("B1", severities.ERROR),
+-        )
++        self.simple_bundle_update(self.timeout)
++        self.env.report_processor.assert_reports([
++            fixture.report_resource_not_running("B1", severities.INFO),
++        ])
+         self.runner.assert_everything_launched()
+diff --git a/pcs/lib/commands/test/resource/test_resource_enable_disable.py b/pcs/lib/commands/test/resource/test_resource_enable_disable.py
+index 91ac068..b03740b 100644
+--- a/pcs/lib/commands/test/resource/test_resource_enable_disable.py
++++ b/pcs/lib/commands/test/resource/test_resource_enable_disable.py
+@@ -469,6 +469,35 @@ fixture_bundle_cib_disabled_primitive = """
+         </bundle>
+     </resources>
+ """
++fixture_bundle_cib_disabled_bundle = """
++    <resources>
++        <bundle id="A-bundle">
++            <meta_attributes id="A-bundle-meta_attributes">
++                <nvpair id="A-bundle-meta_attributes-target-role"
++                    name="target-role" value="Stopped" />
++            </meta_attributes>
++            <docker image="pcs:test" />
++            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy" />
++        </bundle>
++    </resources>
++"""
++fixture_bundle_cib_disabled_both = """
++    <resources>
++        <bundle id="A-bundle">
++            <meta_attributes id="A-bundle-meta_attributes">
++                <nvpair id="A-bundle-meta_attributes-target-role"
++                    name="target-role" value="Stopped" />
++            </meta_attributes>
++            <docker image="pcs:test" />
++            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
++                <meta_attributes id="A-meta_attributes">
++                    <nvpair id="A-meta_attributes-target-role"
++                        name="target-role" value="Stopped" />
++                </meta_attributes>
++            </primitive>
++        </bundle>
++    </resources>
++"""
+ fixture_bundle_status_managed = """
+     <resources>
+         <bundle id="A-bundle" type="docker" image="pcmktest:http"
+@@ -486,7 +515,7 @@ fixture_bundle_status_managed = """
+ fixture_bundle_status_unmanaged = """
+     <resources>
+         <bundle id="A-bundle" type="docker" image="pcmktest:http"
+-            unique="false" managed="true" failed="false"
++            unique="false" managed="false" failed="false"
+         >
+             <replica id="0">
+                 <resource id="A" managed="false" />
+@@ -1460,17 +1489,12 @@ class DisableBundle(ResourceWithStateTest):
+         )
+ 
+     def test_bundle(self):
+-        self.runner.set_runs(
+-            fixture.call_cib_load(
+-                fixture.cib_resources(fixture_bundle_cib_enabled)
+-            )
+-        )
+-
+-        assert_raise_library_error(
++        self.assert_command_effect(
++            fixture_bundle_cib_enabled,
++            fixture_bundle_status_managed,
+             lambda: resource.disable(self.env, ["A-bundle"], False),
+-            fixture.report_not_for_bundles("A-bundle")
++            fixture_bundle_cib_disabled_bundle
+         )
+-        self.runner.assert_everything_launched()
+ 
+     def test_primitive_unmanaged(self):
+         self.assert_command_effect(
+@@ -1483,6 +1507,17 @@ class DisableBundle(ResourceWithStateTest):
+             ]
+         )
+ 
++    def test_bundle_unmanaged(self):
++        self.assert_command_effect(
++            fixture_bundle_cib_enabled,
++            fixture_bundle_status_unmanaged,
++            lambda: resource.disable(self.env, ["A-bundle"], False),
++            fixture_bundle_cib_disabled_bundle,
++            reports=[
++                fixture_report_unmanaged("A-bundle"),
++            ]
++        )
++
+ 
+ @skip_unless_pacemaker_supports_bundle
+ class EnableBundle(ResourceWithStateTest):
+@@ -1494,18 +1529,29 @@ class EnableBundle(ResourceWithStateTest):
+             fixture_bundle_cib_enabled
+         )
+ 
++    def test_primitive_disabled_both(self):
++        self.assert_command_effect(
++            fixture_bundle_cib_disabled_both,
++            fixture_bundle_status_managed,
++            lambda: resource.enable(self.env, ["A"], False),
++            fixture_bundle_cib_enabled
++        )
++
+     def test_bundle(self):
+-        self.runner.set_runs(
+-            fixture.call_cib_load(
+-                fixture.cib_resources(fixture_bundle_cib_enabled)
+-            )
++        self.assert_command_effect(
++            fixture_bundle_cib_disabled_bundle,
++            fixture_bundle_status_managed,
++            lambda: resource.enable(self.env, ["A-bundle"], False),
++            fixture_bundle_cib_enabled
+         )
+ 
+-        assert_raise_library_error(
++    def test_bundle_disabled_both(self):
++        self.assert_command_effect(
++            fixture_bundle_cib_disabled_both,
++            fixture_bundle_status_managed,
+             lambda: resource.enable(self.env, ["A-bundle"], False),
+-            fixture.report_not_for_bundles("A-bundle")
++            fixture_bundle_cib_enabled
+         )
+-        self.runner.assert_everything_launched()
+ 
+     def test_primitive_unmanaged(self):
+         self.assert_command_effect(
+@@ -1515,5 +1561,18 @@ class EnableBundle(ResourceWithStateTest):
+             fixture_bundle_cib_enabled,
+             reports=[
+                 fixture_report_unmanaged("A"),
++                fixture_report_unmanaged("A-bundle"),
++            ]
++        )
++
++    def test_bundle_unmanaged(self):
++        self.assert_command_effect(
++            fixture_bundle_cib_disabled_primitive,
++            fixture_bundle_status_unmanaged,
++            lambda: resource.enable(self.env, ["A-bundle"], False),
++            fixture_bundle_cib_enabled,
++            reports=[
++                fixture_report_unmanaged("A-bundle"),
++                fixture_report_unmanaged("A"),
+             ]
+         )
+diff --git a/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py b/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py
+index 6d8c787..95b44bc 100644
+--- a/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py
++++ b/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py
+@@ -517,6 +517,26 @@ fixture_clone_group_cib_unmanaged_all_primitives_op_disabled = """
+     </resources>
+ """
+ 
++
++fixture_bundle_empty_cib_managed = """
++    <resources>
++        <bundle id="A-bundle">
++            <docker image="pcs:test" />
++        </bundle>
++    </resources>
++"""
++fixture_bundle_empty_cib_unmanaged_bundle = """
++    <resources>
++        <bundle id="A-bundle">
++            <meta_attributes id="A-bundle-meta_attributes">
++                <nvpair id="A-bundle-meta_attributes-is-managed"
++                    name="is-managed" value="false" />
++            </meta_attributes>
++            <docker image="pcs:test" />
++        </bundle>
++    </resources>
++"""
++
+ fixture_bundle_cib_managed = """
+     <resources>
+         <bundle id="A-bundle">
+@@ -526,7 +546,19 @@ fixture_bundle_cib_managed = """
+         </bundle>
+     </resources>
+ """
+-
++fixture_bundle_cib_unmanaged_bundle = """
++    <resources>
++        <bundle id="A-bundle">
++            <meta_attributes id="A-bundle-meta_attributes">
++                <nvpair id="A-bundle-meta_attributes-is-managed"
++                    name="is-managed" value="false" />
++            </meta_attributes>
++            <docker image="pcs:test" />
++            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
++            </primitive>
++        </bundle>
++    </resources>
++"""
+ fixture_bundle_cib_unmanaged_primitive = """
+     <resources>
+         <bundle id="A-bundle">
+@@ -540,6 +572,78 @@ fixture_bundle_cib_unmanaged_primitive = """
+         </bundle>
+     </resources>
+ """
++fixture_bundle_cib_unmanaged_both = """
++    <resources>
++        <bundle id="A-bundle">
++            <meta_attributes id="A-bundle-meta_attributes">
++                <nvpair id="A-bundle-meta_attributes-is-managed"
++                    name="is-managed" value="false" />
++            </meta_attributes>
++            <docker image="pcs:test" />
++            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
++                <meta_attributes id="A-meta_attributes">
++                    <nvpair id="A-meta_attributes-is-managed"
++                        name="is-managed" value="false" />
++                </meta_attributes>
++            </primitive>
++        </bundle>
++    </resources>
++"""
++
++fixture_bundle_cib_managed_op_enabled = """
++    <resources>
++        <bundle id="A-bundle">
++            <docker image="pcs:test" />
++            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
++                <operations>
++                    <op id="A-start" name="start" />
++                    <op id="A-stop" name="stop" />
++                    <op id="A-monitor" name="monitor"/>
++                </operations>
++            </primitive>
++        </bundle>
++    </resources>
++"""
++fixture_bundle_cib_unmanaged_primitive_op_disabled = """
++    <resources>
++        <bundle id="A-bundle">
++            <docker image="pcs:test" />
++            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
++                <meta_attributes id="A-meta_attributes">
++                    <nvpair id="A-meta_attributes-is-managed"
++                        name="is-managed" value="false" />
++                </meta_attributes>
++                <operations>
++                    <op id="A-start" name="start" />
++                    <op id="A-stop" name="stop" />
++                    <op id="A-monitor" name="monitor" enabled="false"/>
++                </operations>
++            </primitive>
++        </bundle>
++    </resources>
++"""
++fixture_bundle_cib_unmanaged_both_op_disabled = """
++    <resources>
++        <bundle id="A-bundle">
++            <meta_attributes id="A-bundle-meta_attributes">
++                <nvpair id="A-bundle-meta_attributes-is-managed"
++                    name="is-managed" value="false" />
++            </meta_attributes>
++            <docker image="pcs:test" />
++            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
++                <meta_attributes id="A-meta_attributes">
++                    <nvpair id="A-meta_attributes-is-managed"
++                        name="is-managed" value="false" />
++                </meta_attributes>
++                <operations>
++                    <op id="A-start" name="start" />
++                    <op id="A-stop" name="stop" />
++                    <op id="A-monitor" name="monitor" enabled="false"/>
++                </operations>
++            </primitive>
++        </bundle>
++    </resources>
++"""
+ 
+ def fixture_report_no_monitors(resource):
+     return (
+@@ -852,17 +956,18 @@ class UnmanageBundle(ResourceWithoutStateTest):
+         )
+ 
+     def test_bundle(self):
+-        self.runner.set_runs(
+-            fixture.call_cib_load(
+-                fixture.cib_resources(fixture_bundle_cib_managed)
+-            )
++        self.assert_command_effect(
++            fixture_bundle_cib_managed,
++            lambda: resource.unmanage(self.env, ["A-bundle"]),
++            fixture_bundle_cib_unmanaged_both
+         )
+ 
+-        assert_raise_library_error(
+-            lambda: resource.unmanage(self.env, ["A-bundle"], False),
+-            fixture.report_not_for_bundles("A-bundle")
++    def test_bundle_empty(self):
++        self.assert_command_effect(
++            fixture_bundle_empty_cib_managed,
++            lambda: resource.unmanage(self.env, ["A-bundle"]),
++            fixture_bundle_empty_cib_unmanaged_bundle
+         )
+-        self.runner.assert_everything_launched()
+ 
+ 
+ class ManageBundle(ResourceWithoutStateTest):
+@@ -873,18 +978,47 @@ class ManageBundle(ResourceWithoutStateTest):
+             fixture_bundle_cib_managed,
+         )
+ 
++    def test_primitive_unmanaged_bundle(self):
++        self.assert_command_effect(
++            fixture_bundle_cib_unmanaged_bundle,
++            lambda: resource.manage(self.env, ["A"]),
++            fixture_bundle_cib_managed,
++        )
++
++    def test_primitive_unmanaged_both(self):
++        self.assert_command_effect(
++            fixture_bundle_cib_unmanaged_both,
++            lambda: resource.manage(self.env, ["A"]),
++            fixture_bundle_cib_managed,
++        )
++
+     def test_bundle(self):
+-        self.runner.set_runs(
+-            fixture.call_cib_load(
+-                fixture.cib_resources(fixture_bundle_cib_unmanaged_primitive)
+-            )
++        self.assert_command_effect(
++            fixture_bundle_cib_unmanaged_bundle,
++            lambda: resource.manage(self.env, ["A-bundle"]),
++            fixture_bundle_cib_managed,
+         )
+ 
+-        assert_raise_library_error(
+-            lambda: resource.manage(self.env, ["A-bundle"], False),
+-            fixture.report_not_for_bundles("A-bundle")
++    def test_bundle_unmanaged_primitive(self):
++        self.assert_command_effect(
++            fixture_bundle_cib_unmanaged_primitive,
++            lambda: resource.manage(self.env, ["A-bundle"]),
++            fixture_bundle_cib_managed,
++        )
++
++    def test_bundle_unmanaged_both(self):
++        self.assert_command_effect(
++            fixture_bundle_cib_unmanaged_both,
++            lambda: resource.manage(self.env, ["A-bundle"]),
++            fixture_bundle_cib_managed,
++        )
++
++    def test_bundle_empty(self):
++        self.assert_command_effect(
++            fixture_bundle_empty_cib_unmanaged_bundle,
++            lambda: resource.manage(self.env, ["A-bundle"]),
++            fixture_bundle_empty_cib_managed
+         )
+-        self.runner.assert_everything_launched()
+ 
+ 
+ class MoreResources(ResourceWithoutStateTest):
+@@ -1090,3 +1224,24 @@ class WithMonitor(ResourceWithoutStateTest):
+             lambda: resource.unmanage(self.env, ["A1"], True),
+             fixture_clone_group_cib_unmanaged_primitive_op_disabled
+         )
++
++    def test_unmanage_bundle(self):
++        self.assert_command_effect(
++            fixture_bundle_cib_managed_op_enabled,
++            lambda: resource.unmanage(self.env, ["A-bundle"], True),
++            fixture_bundle_cib_unmanaged_both_op_disabled
++        )
++
++    def test_unmanage_in_bundle(self):
++        self.assert_command_effect(
++            fixture_bundle_cib_managed_op_enabled,
++            lambda: resource.unmanage(self.env, ["A"], True),
++            fixture_bundle_cib_unmanaged_primitive_op_disabled
++        )
++
++    def test_unmanage_bundle_empty(self):
++        self.assert_command_effect(
++            fixture_bundle_empty_cib_managed,
++            lambda: resource.unmanage(self.env, ["A-bundle"], True),
++            fixture_bundle_empty_cib_unmanaged_bundle
++        )
+diff --git a/pcs/lib/pacemaker/state.py b/pcs/lib/pacemaker/state.py
+index 71809db..be3e7ad 100644
+--- a/pcs/lib/pacemaker/state.py
++++ b/pcs/lib/pacemaker/state.py
+@@ -201,6 +201,25 @@ def _get_primitive_roles_with_nodes(primitive_el_list):
+         for role, nodes in roles_with_nodes.items()
+     ])
+ 
++def info_resource_state(cluster_state, resource_id):
++    roles_with_nodes = _get_primitive_roles_with_nodes(
++        _get_primitives_for_state_check(
++            cluster_state,
++            resource_id,
++            expected_running=True
++        )
++    )
++    if not roles_with_nodes:
++        return reports.resource_does_not_run(
++            resource_id,
++            severities.INFO
++        )
++    return reports.resource_running_on_nodes(
++        resource_id,
++        roles_with_nodes,
++        severities.INFO
++    )
++
+ def ensure_resource_state(expected_running, cluster_state, resource_id):
+     roles_with_nodes = _get_primitive_roles_with_nodes(
+         _get_primitives_for_state_check(
+@@ -244,18 +263,25 @@ def is_resource_managed(cluster_state, resource_id):
+         for primitive in primitive_list:
+             if is_false(primitive.attrib.get("managed", "")):
+                 return False
+-            clone = find_parent(primitive, ["clone"])
+-            if clone is not None and is_false(clone.attrib.get("managed", "")):
++            parent = find_parent(primitive, ["clone", "bundle"])
++            if (
++                parent is not None
++                and
++                is_false(parent.attrib.get("managed", ""))
++            ):
+                 return False
+         return True
+ 
+-    clone_list = cluster_state.xpath(
+-        """.//clone[@id="{0}"]""".format(resource_id)
++    parent_list = cluster_state.xpath("""
++        .//clone[@id="{0}"]
++        |
++        .//bundle[@id="{0}"]
++        """.format(resource_id)
+     )
+-    for clone in clone_list:
+-        if is_false(clone.attrib.get("managed", "")):
++    for parent in parent_list:
++        if is_false(parent.attrib.get("managed", "")):
+             return False
+-        for primitive in clone.xpath(".//resource"):
++        for primitive in parent.xpath(".//resource"):
+             if is_false(primitive.attrib.get("managed", "")):
+                 return False
+         return True
+diff --git a/pcs/lib/pacemaker/test/test_state.py b/pcs/lib/pacemaker/test/test_state.py
+index a29eddf..5de9426 100644
+--- a/pcs/lib/pacemaker/test/test_state.py
++++ b/pcs/lib/pacemaker/test/test_state.py
+@@ -491,7 +491,7 @@ class GetPrimitivesForStateCheck(TestCase):
+         self.assert_primitives("B2-R2", ["B2-R2", "B2-R2"], False)
+ 
+ 
+-class EnsureResourceState(TestCase):
++class CommonResourceState(TestCase):
+     resource_id = "R"
+     def setUp(self):
+         self.cluster_state = "state"
+@@ -526,6 +526,8 @@ class EnsureResourceState(TestCase):
+             "resource_id": self.resource_id
+         })
+ 
++
++class EnsureResourceState(CommonResourceState):
+     def assert_running_info_transform(self, run_info, report, expected_running):
+         self.get_primitives_for_state_check.return_value = ["elem1", "elem2"]
+         self.get_primitive_roles_with_nodes.return_value = run_info
+@@ -575,6 +577,35 @@ class EnsureResourceState(TestCase):
+         )
+ 
+ 
++class InfoResourceState(CommonResourceState):
++    def assert_running_info_transform(self, run_info, report):
++        self.get_primitives_for_state_check.return_value = ["elem1", "elem2"]
++        self.get_primitive_roles_with_nodes.return_value = run_info
++        assert_report_item_equal(
++            state.info_resource_state(self.cluster_state, self.resource_id),
++            report
++        )
++        self.get_primitives_for_state_check.assert_called_once_with(
++            self.cluster_state,
++            self.resource_id,
++            expected_running=True
++        )
++        self.get_primitive_roles_with_nodes.assert_called_once_with(
++            ["elem1", "elem2"]
++        )
++
++    def test_report_info_running(self):
++        self.assert_running_info_transform(
++            self.fixture_running_state_info(),
++            self.fixture_running_report(severities.INFO)
++        )
++    def test_report_info_not_running(self):
++        self.assert_running_info_transform(
++            [],
++            self.fixture_not_running_report(severities.INFO)
++        )
++
++
+ class IsResourceManaged(TestCase):
+     status_xml = etree.fromstring("""
+         <resources>
+@@ -733,6 +764,60 @@ class IsResourceManaged(TestCase):
+                     <resource id="R38:1" managed="false" />
+                 </group>
+             </clone>
++
++            <bundle id="B1" managed="true" />
++            <bundle id="B2" managed="false" />
++
++            <bundle id="B3" managed="true">
++                <replica id="0">
++                    <resource id="R39" managed="true" />
++                    <resource id="R40" managed="true" />
++                </replica>
++                <replica id="1">
++                    <resource id="R39" managed="true" />
++                    <resource id="R40" managed="true" />
++                </replica>
++            </bundle>
++            <bundle id="B4" managed="false">
++                <replica id="0">
++                    <resource id="R41" managed="true" />
++                    <resource id="R42" managed="true" />
++                </replica>
++                <replica id="1">
++                    <resource id="R41" managed="true" />
++                    <resource id="R42" managed="true" />
++                </replica>
++            </bundle>
++            <bundle id="B5" managed="true">
++                <replica id="0">
++                    <resource id="R43" managed="false" />
++                    <resource id="R44" managed="true" />
++                </replica>
++                <replica id="1">
++                    <resource id="R43" managed="false" />
++                    <resource id="R44" managed="true" />
++                </replica>
++            </bundle>
++            <bundle id="B6" managed="true">
++                <replica id="0">
++                    <resource id="R45" managed="true" />
++                    <resource id="R46" managed="false" />
++                </replica>
++                <replica id="1">
++                    <resource id="R45" managed="true" />
++                    <resource id="R46" managed="false" />
++                </replica>
++            </bundle>
++            <bundle id="B7" managed="false">
++                <replica id="0">
++                    <resource id="R47" managed="false" />
++                    <resource id="R48" managed="false" />
++                </replica>
++                <replica id="1">
++                    <resource id="R47" managed="false" />
++                    <resource id="R48" managed="false" />
++                </replica>
++            </bundle>
+         </resources>
+     """)
+ 
+@@ -856,3 +941,24 @@ class IsResourceManaged(TestCase):
+         self.assert_managed("R36", False)
+         self.assert_managed("R37", False)
+         self.assert_managed("R38", False)
++
++    def test_bundle(self):
++        self.assert_managed("B1", True)
++        self.assert_managed("B2", False)
++        self.assert_managed("B3", True)
++        self.assert_managed("B4", False)
++        self.assert_managed("B5", False)
++        self.assert_managed("B6", False)
++        self.assert_managed("B7", False)
++
++    def test_primitive_in_bundle(self):
++        self.assert_managed("R39", True)
++        self.assert_managed("R40", True)
++        self.assert_managed("R41", False)
++        self.assert_managed("R42", False)
++        self.assert_managed("R43", False)
++        self.assert_managed("R44", True)
++        self.assert_managed("R45", True)
++        self.assert_managed("R46", False)
++        self.assert_managed("R47", False)
++        self.assert_managed("R48", False)
+diff --git a/pcs/pcs.8 b/pcs/pcs.8
+index 446e7b3..20b5c2e 100644
+--- a/pcs/pcs.8
++++ b/pcs/pcs.8
+@@ -162,10 +162,10 @@ Remove the clone which contains the specified group or resource (the resource or
+ master [<master/slave id>] <resource id | group id> [options] [\fB\-\-wait\fR[=n]]
+ Configure a resource or group as a multi\-state (master/slave) resource.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including starting and promoting resource instances if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.  Note: to remove a master you must remove the resource/group it contains.
+ .TP
+-bundle create <bundle id> [container [<container type>] <container options>] [network <network options>] [port\-map <port options>]... [storage\-map <storage options>]... [\fB\-\-wait\fR[=n]]
+-Create a new bundle encapsulating no resources. The bundle can be used either as it is or a resource may be put into it at any time. If the container type is not specified, it defaults to 'docker'.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the bundle to start and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
++bundle create <bundle id> [container [<container type>] <container options>] [network <network options>] [port\-map <port options>]... [storage\-map <storage options>]... [meta <meta options>] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
++Create a new bundle encapsulating no resources. The bundle can be used either as it is or a resource may be put into it at any time. If the container type is not specified, it defaults to 'docker'. If \fB\-\-disabled\fR is specified, the bundle is not started automatically. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the bundle to start and then return 0 on success or 1 on error. If 'n' is not specified it defaults to 60 minutes.
+ .TP
+-bundle update <bundle id> [container <container options>] [network <network options>] [port\-map (add <port options>) | (remove <id>...)]... [storage\-map (add <storage options>) | (remove <id>...)]... [\fB\-\-wait\fR[=n]]
++bundle update <bundle id> [container <container options>] [network <network options>] [port\-map (add <port options>) | (remove <id>...)]... [storage\-map (add <storage options>) | (remove <id>...)]... [meta <meta options>] [\fB\-\-wait\fR[=n]]
+ Add, remove or change options to specified bundle. If you wish to update a resource encapsulated in the bundle, use the 'pcs resource update' command instead and specify the resource id.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
+ .TP
+ manage <resource id>... [\fB\-\-monitor\fR]
+diff --git a/pcs/resource.py b/pcs/resource.py
+index dc6da13..467faa5 100644
+--- a/pcs/resource.py
++++ b/pcs/resource.py
+@@ -20,7 +20,7 @@ from pcs import (
+ )
+ from pcs.settings import pacemaker_wait_timeout_status as \
+     PACEMAKER_WAIT_TIMEOUT_STATUS
+-import pcs.lib.cib.acl as lib_acl
++from pcs.cli.common.console_report import error, warn
+ from pcs.cli.common.errors import CmdLineInputError
+ from pcs.cli.common.parse_args import prepare_options
+ from pcs.cli.resource.parse_args import (
+@@ -28,16 +28,21 @@ from pcs.cli.resource.parse_args import (
+     parse_bundle_update_options,
+     parse_create as parse_create_args,
+ )
+-from pcs.lib.errors import LibraryError
++import pcs.lib.cib.acl as lib_acl
+ from pcs.lib.cib.resource import guest_node
+-import pcs.lib.pacemaker.live as lib_pacemaker
+-from pcs.lib.pacemaker.values import timeout_to_seconds
+-import pcs.lib.resource_agent as lib_ra
+-from pcs.cli.common.console_report import error, warn
+ from pcs.lib.commands.resource import(
+     _validate_guest_change,
+     _get_nodes_to_validate_against,
+ )
++from pcs.lib.errors import LibraryError
++import pcs.lib.pacemaker.live as lib_pacemaker
++from pcs.lib.pacemaker.state import (
++    get_cluster_state_dom,
++    _get_primitive_roles_with_nodes,
++    _get_primitives_for_state_check,
++)
++from pcs.lib.pacemaker.values import timeout_to_seconds
++import pcs.lib.resource_agent as lib_ra
+ 
+ 
+ RESOURCE_RELOCATE_CONSTRAINT_PREFIX = "pcs-relocate-"
+@@ -1432,6 +1437,18 @@ def resource_master_create(dom, argv, update=False, master_id=None):
+     return dom, master_element.getAttribute("id")
+ 
+ def resource_remove(resource_id, output=True, is_remove_remote_context=False):
++    def is_bundle_running(bundle_id):
++        roles_with_nodes = _get_primitive_roles_with_nodes(
++            _get_primitives_for_state_check(
++                get_cluster_state_dom(
++                    lib_pacemaker.get_cluster_status_xml(utils.cmd_runner())
++                ),
++                bundle_id,
++                expected_running=True
++            )
++        )
++        return True if roles_with_nodes else False
++
+     dom = utils.get_cib_dom()
+     # if resource is a clone or a master, work with its child instead
+     cloned_resource = utils.dom_get_clone_ms_resource(dom, resource_id)
+@@ -1441,6 +1458,40 @@ def resource_remove(resource_id, output=True, is_remove_remote_context=False):
+     bundle = utils.dom_get_bundle(dom, resource_id)
+     if bundle is not None:
+         primitive_el = utils.dom_get_resource_bundle(bundle)
++        if primitive_el is None:
++            print("Deleting bundle '{0}'".format(resource_id))
++        else:
++            print(
++                "Deleting bundle '{0}' and its inner resource '{1}'".format(
++                    resource_id,
++                    primitive_el.getAttribute("id")
++                )
++            )
++
++        if (
++            "--force" not in utils.pcs_options
++            and
++            not utils.usefile
++            and
++            is_bundle_running(resource_id)
++        ):
++            sys.stdout.write("Stopping bundle '{0}'... ".format(resource_id))
++            sys.stdout.flush()
++            lib = utils.get_library_wrapper()
++            lib.resource.disable([resource_id], False)
++            output, retval = utils.run(["crm_resource", "--wait"])
++            # pacemaker which supports bundles supports --wait as well
++            if is_bundle_running(resource_id):
++                msg = [
++                    "Unable to stop: %s before deleting "
++                    "(re-run with --force to force deletion)"
++                    % resource_id
++                ]
++                if retval != 0 and output:
++                    msg.append("\n" + output)
++                utils.err("\n".join(msg).strip())
++            print("Stopped")
++
+         if primitive_el is not None:
+             resource_remove(primitive_el.getAttribute("id"))
+         utils.replace_cib_configuration(
+@@ -1498,7 +1549,7 @@ def resource_remove(resource_id, output=True, is_remove_remote_context=False):
+             resource_remove(res.getAttribute("id"))
+         sys.exit(0)
+ 
+-    # now we know resource is not a group, a clone nor a master
++    # now we know resource is not a group, a clone, a master nor a bundle
+     # because of the conditions above
+     if not utils.does_exist('//resources/descendant::primitive[@id="'+resource_id+'"]'):
+         utils.err("Resource '{0}' does not exist.".format(resource_id))
+@@ -1517,7 +1568,7 @@ def resource_remove(resource_id, output=True, is_remove_remote_context=False):
+         and
+         utils.resource_running_on(resource_id)["is_running"]
+     ):
+-        sys.stdout.write("Attempting to stop: "+ resource_id + "...")
++        sys.stdout.write("Attempting to stop: "+ resource_id + "... ")
+         sys.stdout.flush()
+         lib = utils.get_library_wrapper()
+         # we are not using wait from disable command, because if wait is not
+@@ -2246,6 +2297,7 @@ def print_node(node, tab = 0):
+             node.findall("storage/storage-mapping"),
+             spaces + " "
+         )
++        print_meta_vars_string(node, spaces)
+         for child in node:
+             print_node(child, tab + 1)
+         return
+@@ -2675,12 +2727,14 @@ def resource_bundle_create_cmd(lib, argv, modifiers):
+     lib.resource.bundle_create(
+         bundle_id,
+         parts["container_type"],
+-        parts["container"],
+-        parts["network"],
+-        parts["port_map"],
+-        parts["storage_map"],
+-        modifiers["force"],
+-        modifiers["wait"]
++        container_options=parts["container"],
++        network_options=parts["network"],
++        port_map=parts["port_map"],
++        storage_map=parts["storage_map"],
++        meta_attributes=parts["meta"],
++        force_options=modifiers["force"],
++        ensure_disabled=modifiers["disabled"],
++        wait=modifiers["wait"]
+     )
+ 
+ def resource_bundle_update_cmd(lib, argv, modifiers):
+@@ -2691,12 +2745,13 @@ def resource_bundle_update_cmd(lib, argv, modifiers):
+     parts = parse_bundle_update_options(argv[1:])
+     lib.resource.bundle_update(
+         bundle_id,
+-        parts["container"],
+-        parts["network"],
+-        parts["port_map_add"],
+-        parts["port_map_remove"],
+-        parts["storage_map_add"],
+-        parts["storage_map_remove"],
+-        modifiers["force"],
+-        modifiers["wait"]
++        container_options=parts["container"],
++        network_options=parts["network"],
++        port_map_add=parts["port_map_add"],
++        port_map_remove=parts["port_map_remove"],
++        storage_map_add=parts["storage_map_add"],
++        storage_map_remove=parts["storage_map_remove"],
++        meta_attributes=parts["meta"],
++        force_options=modifiers["force"],
++        wait=modifiers["wait"]
+     )
+diff --git a/pcs/test/cib_resource/test_bundle.py b/pcs/test/cib_resource/test_bundle.py
+index d8c97c6..29e4339 100644
+--- a/pcs/test/cib_resource/test_bundle.py
++++ b/pcs/test/cib_resource/test_bundle.py
+@@ -75,6 +75,7 @@ class BundleCreate(BundleCreateCommon):
+                 resource bundle create B1
+                 container replicas=4 replicas-per-host=2 run-command=/bin/true
+                 port-map port=1001
++                meta target-role=Stopped
+                 network control-port=12345 host-interface=eth0 host-netmask=24
+                 port-map id=B1-port-map-1001 internal-port=2002 port=2000
+                 port-map range=3000-3300
+@@ -83,6 +84,7 @@ class BundleCreate(BundleCreateCommon):
+                 storage-map id=B1-storage-map source-dir=/tmp/docker2a
+                     target-dir=/tmp/docker2b
+                 container image=pcs:test masters=0
++                meta is-managed=false
+                 storage-map source-dir-root=/tmp/docker3a
+                     target-dir=/tmp/docker3b
+                 storage-map id=B1-port-map-1001-1 source-dir-root=/tmp/docker4a
+@@ -140,6 +142,18 @@ class BundleCreate(BundleCreateCommon):
+                                 target-dir="/tmp/docker4b"
+                             />
+                         </storage>
++                        <meta_attributes id="B1-meta_attributes">
++                            <nvpair
++                                id="B1-meta_attributes-is-managed"
++                                name="is-managed"
++                                value="false"
++                            />
++                            <nvpair
++                                id="B1-meta_attributes-target-role"
++                                name="target-role"
++                                value="Stopped"
++                            />
++                        </meta_attributes>
+                     </bundle>
+                 </resources>
+             """
+@@ -215,6 +229,9 @@ class BundleCreate(BundleCreateCommon):
+     def test_empty_port_map(self):
+         self.assert_no_options("port-map")
+ 
++    def test_empty_meta(self):
++        self.assert_no_options("meta")
++
+ 
+ @skip_unless_pacemaker_supports_bundle
+ class BundleUpdate(BundleCreateCommon):
+@@ -239,6 +256,7 @@ class BundleUpdate(BundleCreateCommon):
+                 "storage-map source-dir=/tmp/docker1a target-dir=/tmp/docker1b "
+                 "storage-map source-dir=/tmp/docker2a target-dir=/tmp/docker2b "
+                 "storage-map source-dir=/tmp/docker3a target-dir=/tmp/docker3b "
++                "meta priority=15 resource-stickiness=100 is-managed=false "
+             ).format(name)
+         )
+ 
+@@ -282,6 +300,7 @@ class BundleUpdate(BundleCreateCommon):
+                 port-map add internal-port=1003 port=2003
+                 storage-map remove B-storage-map B-storage-map-2
+                 storage-map add source-dir=/tmp/docker4a target-dir=/tmp/docker4b
++                meta priority=10 is-managed= target-role=Stopped
+             """,
+             """
+                 <resources>
+@@ -319,6 +338,14 @@ class BundleUpdate(BundleCreateCommon):
+                                 target-dir="/tmp/docker4b"
+                             />
+                         </storage>
++                        <meta_attributes id="B-meta_attributes">
++                            <nvpair id="B-meta_attributes-priority"
++                                name="priority" value="10" />
++                            <nvpair id="B-meta_attributes-resource-stickiness"
++                                name="resource-stickiness" value="100" />
++                            <nvpair id="B-meta_attributes-target-role"
++                                name="target-role" value="Stopped" />
++                        </meta_attributes>
+                     </bundle>
+                 </resources>
+             """
+@@ -373,6 +400,9 @@ class BundleUpdate(BundleCreateCommon):
+     def test_empty_port_map(self):
+         self.assert_no_options("port-map")
+ 
++    def test_empty_meta(self):
++        self.assert_no_options("meta")
++
+ 
+ @skip_unless_pacemaker_supports_bundle
+ class BundleShow(TestCase, AssertPcsMixin):
+@@ -463,6 +493,35 @@ class BundleShow(TestCase, AssertPcsMixin):
+             """
+         ))
+ 
++    def test_meta(self):
++        self.assert_pcs_success(
++            "resource bundle create B1 container image=pcs:test --disabled"
++        )
++        self.assert_pcs_success("resource show B1", outdent(
++            # pylint:disable=trailing-whitespace
++            """\
++             Bundle: B1
++              Docker: image=pcs:test
++              Meta Attrs: target-role=Stopped 
++            """
++        ))
++
++    def test_resource(self):
++        self.assert_pcs_success(
++            "resource bundle create B1 container image=pcs:test"
++        )
++        self.assert_pcs_success(
++            "resource create A ocf:pacemaker:Dummy bundle B1 --no-default-ops"
++        )
++        self.assert_pcs_success("resource show B1", outdent(
++            """\
++             Bundle: B1
++              Docker: image=pcs:test
++              Resource: A (class=ocf provider=pacemaker type=Dummy)
++               Operations: monitor interval=10 timeout=20 (A-monitor-interval-10)
++            """
++        ))
++
+     def test_all(self):
+         self.assert_pcs_success(
+             """
+@@ -474,9 +533,14 @@ class BundleShow(TestCase, AssertPcsMixin):
+                 storage-map source-dir=/tmp/docker1a target-dir=/tmp/docker1b
+                 storage-map id=my-storage-map source-dir=/tmp/docker2a
+                     target-dir=/tmp/docker2b
++                meta target-role=Stopped is-managed=false
+             """
+         )
++        self.assert_pcs_success(
++            "resource create A ocf:pacemaker:Dummy bundle B1 --no-default-ops"
++        )
+         self.assert_pcs_success("resource show B1", outdent(
++            # pylint:disable=trailing-whitespace
+             """\
+              Bundle: B1
+               Docker: image=pcs:test masters=2 options="a b c" replicas=4
+@@ -487,5 +551,8 @@ class BundleShow(TestCase, AssertPcsMixin):
+               Storage Mapping:
+                source-dir=/tmp/docker1a target-dir=/tmp/docker1b (B1-storage-map)
+                source-dir=/tmp/docker2a target-dir=/tmp/docker2b (my-storage-map)
++              Meta Attrs: is-managed=false target-role=Stopped 
++              Resource: A (class=ocf provider=pacemaker type=Dummy)
++               Operations: monitor interval=10 timeout=20 (A-monitor-interval-10)
+             """
+         ))
+diff --git a/pcs/test/cib_resource/test_manage_unmanage.py b/pcs/test/cib_resource/test_manage_unmanage.py
+index 5b78646..2a87cd3 100644
+--- a/pcs/test/cib_resource/test_manage_unmanage.py
++++ b/pcs/test/cib_resource/test_manage_unmanage.py
+@@ -18,6 +18,7 @@ class ManageUnmanage(
+     TestCase,
+     get_assert_pcs_effect_mixin(
+         lambda cib: etree.tostring(
++            # pylint:disable=undefined-variable
+             etree.parse(cib).findall(".//resources")[0]
+         )
+     )
+@@ -234,7 +235,7 @@ class ManageUnmanage(
+ 
+         self.assert_pcs_fail(
+             "resource unmanage A B",
+-            "Error: resource/clone/master/group 'B' does not exist\n"
++            "Error: resource/clone/master/group/bundle 'B' does not exist\n"
+         )
+         self.assert_resources_xml_in_cib(
+             """
+@@ -255,7 +256,7 @@ class ManageUnmanage(
+ 
+         self.assert_pcs_fail(
+             "resource manage A B",
+-            "Error: resource/clone/master/group 'B' does not exist\n"
++            "Error: resource/clone/master/group/bundle 'B' does not exist\n"
+         )
+         self.assert_resources_xml_in_cib(
+             """
+diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
+index 96eae8f..4bdc194 100644
+--- a/pcs/test/test_resource.py
++++ b/pcs/test/test_resource.py
+@@ -8,6 +8,7 @@ from __future__ import (
+ from lxml import etree
+ import re
+ import shutil
++from textwrap import dedent
+ 
+ from pcs.test.tools import pcs_unittest as unittest
+ from pcs.test.tools.assertions import AssertPcsMixin
+@@ -3321,11 +3322,11 @@ Error: Cannot remove more than one resource from cloned group
+ 
+         # bad resource name
+         o,r = pcs(temp_cib, "resource enable NoExist")
+-        ac(o,"Error: resource/clone/master/group 'NoExist' does not exist\n")
++        ac(o,"Error: resource/clone/master/group/bundle 'NoExist' does not exist\n")
+         assert r == 1
+ 
+         o,r = pcs(temp_cib, "resource disable NoExist")
+-        ac(o,"Error: resource/clone/master/group 'NoExist' does not exist\n")
++        ac(o,"Error: resource/clone/master/group/bundle 'NoExist' does not exist\n")
+         assert r == 1
+ 
+         # cloned group
+@@ -3829,7 +3830,7 @@ Error: Cannot remove more than one resource from cloned group
+ 
+         self.assert_pcs_fail_regardless_of_force(
+             "resource enable dummy3 dummyX",
+-            "Error: resource/clone/master/group 'dummyX' does not exist\n"
++            "Error: resource/clone/master/group/bundle 'dummyX' does not exist\n"
+         )
+         self.assert_pcs_success(
+             "resource show --full",
+@@ -3849,7 +3850,7 @@ Error: Cannot remove more than one resource from cloned group
+ 
+         self.assert_pcs_fail_regardless_of_force(
+             "resource disable dummy1 dummyX",
+-            "Error: resource/clone/master/group 'dummyX' does not exist\n"
++            "Error: resource/clone/master/group/bundle 'dummyX' does not exist\n"
+         )
+         self.assert_pcs_success(
+             "resource show --full",
+@@ -4719,7 +4720,11 @@ class BundleCommon(
+ class BundleDeleteTest(BundleCommon):
+     def test_without_primitive(self):
+         self.fixture_bundle("B")
+-        self.assert_effect("resource delete B", "<resources/>")
++        self.assert_effect(
++            "resource delete B",
++            "<resources/>",
++            "Deleting bundle 'B'\n"
++        )
+ 
+     def test_with_primitive(self):
+         self.fixture_bundle("B")
+@@ -4727,7 +4732,10 @@ class BundleDeleteTest(BundleCommon):
+         self.assert_effect(
+             "resource delete B",
+             "<resources/>",
+-            "Deleting Resource - R\n",
++            dedent("""\
++                Deleting bundle 'B' and its inner resource 'R'
++                Deleting Resource - R
++            """),
+         )
+ 
+     def test_remove_primitive(self):
+@@ -4823,30 +4831,26 @@ class BundleCloneMaster(BundleCommon):
+ class BundleMiscCommands(BundleCommon):
+     def test_resource_enable_bundle(self):
+         self.fixture_bundle("B")
+-        self.assert_pcs_fail_regardless_of_force(
+-            "resource enable B",
+-            "Error: 'B' is not clone/master/a group/primitive\n"
++        self.assert_pcs_success(
++            "resource enable B"
+         )
+ 
+     def test_resource_disable_bundle(self):
+         self.fixture_bundle("B")
+-        self.assert_pcs_fail_regardless_of_force(
+-            "resource disable B",
+-            "Error: 'B' is not clone/master/a group/primitive\n"
++        self.assert_pcs_success(
++            "resource disable B"
+         )
+ 
+     def test_resource_manage_bundle(self):
+         self.fixture_bundle("B")
+-        self.assert_pcs_fail_regardless_of_force(
+-            "resource manage B",
+-            "Error: 'B' is not clone/master/a group/primitive\n"
++        self.assert_pcs_success(
++            "resource manage B"
+         )
+ 
+     def test_resource_unmanage_bundle(self):
+         self.fixture_bundle("B")
+-        self.assert_pcs_fail_regardless_of_force(
+-            "resource unmanage B",
+-            "Error: 'B' is not clone/master/a group/primitive\n"
++        self.assert_pcs_success(
++            "resource unmanage B"
+         )
+ 
+     def test_op_add(self):
+diff --git a/pcs/usage.py b/pcs/usage.py
+index d2262a6..75cb118 100644
+--- a/pcs/usage.py
++++ b/pcs/usage.py
+@@ -430,10 +430,12 @@ Commands:
+ 
+     bundle create <bundle id> [container [<container type>] <container options>]
+             [network <network options>] [port-map <port options>]...
+-            [storage-map <storage options>]... [--wait[=n]]
++            [storage-map <storage options>]... [meta <meta options>]
++            [--disabled] [--wait[=n]]
+         Create a new bundle encapsulating no resources. The bundle can be used
+         either as it is or a resource may be put into it at any time.
+         If the container type is not specified, it defaults to 'docker'.
++        If --disabled is specified, the bundle is not started automatically.
+         If --wait is specified, pcs will wait up to 'n' seconds for the bundle
+         to start and then return 0 on success or 1 on error. If 'n' is not
+         specified it defaults to 60 minutes.
+@@ -442,13 +444,14 @@ Commands:
+             [network <network options>]
+             [port-map (add <port options>) | (remove <id>...)]...
+             [storage-map (add <storage options>) | (remove <id>...)]...
++            [meta <meta options>]
+             [--wait[=n]]
+         Add, remove or change options to specified bundle. If you wish to update
+         a resource encapsulated in the bundle, use the 'pcs resource update'
+-        command instead and specify the resource id.  If --wait is specified,
++        command instead and specify the resource id. If --wait is specified,
+         pcs will wait up to 'n' seconds for the operation to finish (including
+         moving resources if appropriate) and then return 0 on success or 1 on
+-        error.  If 'n' is not specified it defaults to 60 minutes.
++        error. If 'n' is not specified it defaults to 60 minutes.
+ 
+     manage <resource id>... [--monitor]
+         Set resources listed to managed mode (default). If --monitor is
+-- 
+1.8.3.1
+
diff --git a/SOURCES/bz1458153-01-give-back-orig.-master-behav.-resource-create.patch b/SOURCES/bz1458153-01-give-back-orig.-master-behav.-resource-create.patch
new file mode 100644
index 0000000..590c8a0
--- /dev/null
+++ b/SOURCES/bz1458153-01-give-back-orig.-master-behav.-resource-create.patch
@@ -0,0 +1,571 @@
+From 9fadaf3189f3c19bf6c2802f7a4a2b9c8c6638c1 Mon Sep 17 00:00:00 2001
+From: Ivan Devat <idevat@redhat.com>
+Date: Mon, 5 Jun 2017 17:13:41 +0200
+Subject: [PATCH] give back orig. --master behav. (resource create)
+
+---
+ pcs/cli/common/parse_args.py           |   8 +-
+ pcs/cli/common/test/test_parse_args.py |  34 ++++++-
+ pcs/resource.py                        |  19 ++++
+ pcs/test/cib_resource/test_create.py   | 181 ++++++++++++++++++++++++++-------
+ pcs/test/test_constraints.py           |  28 ++---
+ pcs/test/test_resource.py              |  10 +-
+ pcs/utils.py                           |   7 ++
+ 7 files changed, 228 insertions(+), 59 deletions(-)
+
+diff --git a/pcs/cli/common/parse_args.py b/pcs/cli/common/parse_args.py
+index d72a6d4..3cd96c9 100644
+--- a/pcs/cli/common/parse_args.py
++++ b/pcs/cli/common/parse_args.py
+@@ -300,7 +300,13 @@ def upgrade_args(arg_list):
+             and
+             args_without_options[:2] == ["resource", "create"]
+         ):
+-            upgraded_args.append("master")
++            #upgraded_args.append("master")
++
++            #We do not replace flag --master with keyword "manster" here because
++            #we want to give grace period to openstack that uses original
++            #missbehaviour.
++            #see https://bugzilla.redhat.com/show_bug.cgi?id=1458153
++            upgraded_args.append(arg)
+         else:
+             upgraded_args.append(arg)
+     return upgraded_args
+diff --git a/pcs/cli/common/test/test_parse_args.py b/pcs/cli/common/test/test_parse_args.py
+index 5b79b85..1ce03b4 100644
+--- a/pcs/cli/common/test/test_parse_args.py
++++ b/pcs/cli/common/test/test_parse_args.py
+@@ -487,9 +487,21 @@ class UpgradeArgs(TestCase):
+             upgrade_args(["first", "--cloneopt=1", "second"])
+         )
+ 
+-    def test_upgrade_2dash_master_in_resource_create(self):
+-        self.assertEqual(
+-            ["resource", "create", "master", "second"],
++    # def test_upgrade_2dash_master_in_resource_create(self):
++    #     self.assertEqual(
++    #         ["resource", "create", "master", "second"],
++    #         upgrade_args(["resource", "create", "--master", "second"])
++    #     )
++
++    def test_do_not_upgrade_2dash_master_in_resource_create__original_behaviour(
++        self
++    ):
++        """
++        downstream temporary behaviour
++        fixes bz 1458153
++        """
++        self.assertEqual(
++            ["resource", "create", "--master", "second"],
+             upgrade_args(["resource", "create", "--master", "second"])
+         )
+ 
+@@ -499,10 +511,22 @@ class UpgradeArgs(TestCase):
+             upgrade_args(["first", "--master", "second"])
+         )
+ 
+-    def test_upgrade_2dash_master_in_resource_create_with_complications(self):
++    # def test_upgrade_2dash_master_in_resource_create_with_complications(self):
++    #     self.assertEqual(
++    #         [
++    #             "-f", "path/to/file", "resource", "-V", "create", "master",
++    #             "second"
++    #         ],
++    #         upgrade_args([
++    #             "-f", "path/to/file", "resource", "-V", "create", "--master",
++    #             "second"
++    #         ])
++    #     )
++
++    def test_no_upgrade_2dash_master_complications__original_behaviour(self):
+         self.assertEqual(
+             [
+-                "-f", "path/to/file", "resource", "-V", "create", "master",
++                "-f", "path/to/file", "resource", "-V", "create", "--master",
+                 "second"
+             ],
+             upgrade_args([
+diff --git a/pcs/resource.py b/pcs/resource.py
+index 467faa5..818cb5b 100644
+--- a/pcs/resource.py
++++ b/pcs/resource.py
+@@ -375,6 +375,25 @@ def resource_create(lib, argv, modifiers):
+     ra_type = argv[1]
+ 
+     parts = parse_create_args(argv[2:])
++
++    if modifiers["master"] and "master" in parts:
++        raise error("you cannot specify both --master and master")
++
++    #This is for `pcs resource create`. Fix of the bug
++    #https://bugzilla.redhat.com/show_bug.cgi?id=1378107
++    #caused problems in openstack which uses `pcs resource create`
++    #see https://bugzilla.redhat.com/show_bug.cgi?id=1458153
++    #so we give back the original misbehavior of master here temporarily.
++    #When user uses `--master` she gets the original behaviour. With `master`
++    #she gets new behaviour.
++    if modifiers["master"]:
++        warn(
++            "flag '--master' is deprecated, use keyword 'master' instead (see"
++            " the usage)"
++        )
++        parts["master"] = parts["meta"]
++        parts["meta"] = {}
++
+     parts_sections = ["clone", "master", "bundle"]
+     defined_options = [opt for opt in parts_sections if opt in parts]
+     if modifiers["group"]:
+diff --git a/pcs/test/cib_resource/test_create.py b/pcs/test/cib_resource/test_create.py
+index 2492ba9..045ce68 100644
+--- a/pcs/test/cib_resource/test_create.py
++++ b/pcs/test/cib_resource/test_create.py
+@@ -223,7 +223,7 @@ class Success(ResourceTest):
+     def test_with_master(self):
+         self.assert_effect(
+             [
+-                "resource create R ocf:heartbeat:Dummy --no-default-ops --master",
++                # "resource create R ocf:heartbeat:Dummy --no-default-ops --master",
+                 "resource create R ocf:heartbeat:Dummy --no-default-ops master",
+             ],
+             """<resources>
+@@ -609,7 +609,7 @@ class SuccessGroup(ResourceTest):
+ class SuccessMaster(ResourceTest):
+     def test_disable_is_on_master_element(self):
+         self.assert_effect(
+-            "resource create R ocf:heartbeat:Dummy --no-default-ops --disabled --master",
++            "resource create R ocf:heartbeat:Dummy --no-default-ops --disabled master",
+             """<resources>
+                 <master id="R-master">
+                     <meta_attributes id="R-master-meta_attributes">
+@@ -630,13 +630,55 @@ class SuccessMaster(ResourceTest):
+             </resources>"""
+         )
+ 
+-    def test_put_options_after_master_as_its_meta_fix_1(self):
++    # def test_put_options_after_master_as_its_meta_fix_1(self):
++    #     """
++    #     fixes bz 1378107 (do not use master options as primitive options)
++    #     """
++    #     self.assert_effect(
++    #         "resource create R ocf:heartbeat:Dummy state=a"
++    #             " --master is-managed=false --force"
++    #         ,
++    #         """<resources>
++    #             <master id="R-master">
++    #                 <primitive class="ocf" id="R" provider="heartbeat"
++    #                     type="Dummy"
++    #                 >
++    #                     <instance_attributes id="R-instance_attributes">
++    #                         <nvpair id="R-instance_attributes-state"
++    #                             name="state" value="a"
++    #                         />
++    #                     </instance_attributes>
++    #                     <operations>
++    #                         <op id="R-monitor-interval-10" interval="10"
++    #                             name="monitor" timeout="20"
++    #                         />
++    #                         <op id="R-start-interval-0s" interval="0s"
++    #                             name="start" timeout="20"
++    #                         />
++    #                         <op id="R-stop-interval-0s" interval="0s"
++    #                             name="stop" timeout="20"
++    #                         />
++    #                     </operations>
++    #                 </primitive>
++    #                 <meta_attributes id="R-master-meta_attributes">
++    #                     <nvpair id="R-master-meta_attributes-is-managed"
++    #                         name="is-managed" value="false"
++    #                 />
++    #                 </meta_attributes>
++    #             </master>
++    #         </resources>"""
++    #     )
++
++    def test_put_options_after_master_as_primitive_options__original_behaviour(
++        self
++    ):
+         """
+-        fixes bz 1378107 (do not use master options as primitive options)
++        downstream temporary behaviour
++        fixes bz 1458153
+         """
+         self.assert_effect(
+             "resource create R ocf:heartbeat:Dummy state=a"
+-                " --master is-managed=false --force"
++                " --master fake=false --force"
+             ,
+             """<resources>
+                 <master id="R-master">
+@@ -644,6 +686,9 @@ class SuccessMaster(ResourceTest):
+                         type="Dummy"
+                     >
+                         <instance_attributes id="R-instance_attributes">
++                            <nvpair id="R-instance_attributes-fake" name="fake"
++                                value="false"
++                            />
+                             <nvpair id="R-instance_attributes-state"
+                                 name="state" value="a"
+                             />
+@@ -660,22 +705,58 @@ class SuccessMaster(ResourceTest):
+                             />
+                         </operations>
+                     </primitive>
+-                    <meta_attributes id="R-master-meta_attributes">
+-                        <nvpair id="R-master-meta_attributes-is-managed"
+-                            name="is-managed" value="false"
+-                    />
+-                    </meta_attributes>
+                 </master>
+             </resources>"""
+-        )
+-
+-    def test_put_options_after_master_as_its_meta_fix_2(self):
++            ,
++            output="Warning: flag '--master' is deprecated, use keyword"
++                " 'master' instead (see the usage)\n"
++        )
++
++
++    # def test_put_options_after_master_as_its_meta_fix_2(self):
++    #     """
++    #     fixes bz 1378107 (do not use master options as operations)
++    #     """
++    #     self.assert_effect(
++    #         "resource create R ocf:heartbeat:Dummy state=a op monitor"
++    #             " interval=10s --master is-managed=false --force"
++    #             " --no-default-ops"
++    #         ,
++    #         """<resources>
++    #             <master id="R-master">
++    #                 <primitive class="ocf" id="R" provider="heartbeat"
++    #                     type="Dummy"
++    #                 >
++    #                     <instance_attributes id="R-instance_attributes">
++    #                         <nvpair id="R-instance_attributes-state"
++    #                             name="state" value="a"
++    #                         />
++    #                     </instance_attributes>
++    #                     <operations>
++    #                         <op id="R-monitor-interval-10s" interval="10s"
++    #                             name="monitor"
++    #                         />
++    #                     </operations>
++    #                 </primitive>
++    #                 <meta_attributes id="R-master-meta_attributes">
++    #                     <nvpair id="R-master-meta_attributes-is-managed"
++    #                         name="is-managed" value="false"
++    #                 />
++    #                 </meta_attributes>
++    #             </master>
++    #         </resources>"""
++    #     )
++
++    def test_put_options_after_master_as_operation_opts__original_behaviour(
++        self
++    ):
+         """
+-        fixes bz 1378107 (do not use master options as operations)
++        downstream temporary behaviour
++        fixes bz 1458153
+         """
+         self.assert_effect(
+             "resource create R ocf:heartbeat:Dummy state=a op monitor"
+-                " interval=10s --master is-managed=false --force"
++                " interval=10s --master timeout=3m --force"
+                 " --no-default-ops"
+             ,
+             """<resources>
+@@ -690,22 +771,53 @@ class SuccessMaster(ResourceTest):
+                         </instance_attributes>
+                         <operations>
+                             <op id="R-monitor-interval-10s" interval="10s"
+-                                name="monitor"
++                                name="monitor" timeout="3m"
+                             />
+                         </operations>
+                     </primitive>
+-                    <meta_attributes id="R-master-meta_attributes">
+-                        <nvpair id="R-master-meta_attributes-is-managed"
+-                            name="is-managed" value="false"
+-                    />
+-                    </meta_attributes>
+                 </master>
+             </resources>"""
+-        )
+-
+-    def test_do_not_steal_primitive_meta_options(self):
++            ,
++            output="Warning: flag '--master' is deprecated, use keyword"
++                " 'master' instead (see the usage)\n"
++        )
++
++    # def test_do_not_steal_primitive_meta_options(self):
++    #     """
++    #     fixes bz 1378107
++    #     """
++    #     self.assert_effect(
++    #         "resource create R ocf:heartbeat:Dummy meta a=b --master b=c"
++    #             " --no-default-ops"
++    #         ,
++    #         """<resources>
++    #             <master id="R-master">
++    #                 <primitive class="ocf" id="R" provider="heartbeat"
++    #                     type="Dummy"
++    #                 >
++    #                     <meta_attributes id="R-meta_attributes">
++    #                         <nvpair id="R-meta_attributes-a" name="a"
++    #                             value="b"
++    #                         />
++    #                     </meta_attributes>
++    #                     <operations>
++    #                         <op id="R-monitor-interval-10" interval="10"
++    #                             name="monitor" timeout="20"
++    #                         />
++    #                     </operations>
++    #                 </primitive>
++    #                 <meta_attributes id="R-master-meta_attributes">
++    #                     <nvpair id="R-master-meta_attributes-b" name="b"
++    #                         value="c"
++    #                     />
++    #                 </meta_attributes>
++    #             </master>
++    #         </resources>"""
++    #     )
++    def test_steals_primitive_meta_options__original_behaviour(self):
+         """
+-        fixes bz 1378107
++        downstream temporary behaviour
++        fixes bz 1458153
+         """
+         self.assert_effect(
+             "resource create R ocf:heartbeat:Dummy meta a=b --master b=c"
+@@ -716,11 +828,6 @@ class SuccessMaster(ResourceTest):
+                     <primitive class="ocf" id="R" provider="heartbeat"
+                         type="Dummy"
+                     >
+-                        <meta_attributes id="R-meta_attributes">
+-                            <nvpair id="R-meta_attributes-a" name="a"
+-                                value="b"
+-                            />
+-                        </meta_attributes>
+                         <operations>
+                             <op id="R-monitor-interval-10" interval="10"
+                                 name="monitor" timeout="20"
+@@ -728,18 +835,24 @@ class SuccessMaster(ResourceTest):
+                         </operations>
+                     </primitive>
+                     <meta_attributes id="R-master-meta_attributes">
++                        <nvpair id="R-master-meta_attributes-a" name="a"
++                            value="b"
++                        />
+                         <nvpair id="R-master-meta_attributes-b" name="b"
+                             value="c"
+                         />
+                     </meta_attributes>
+                 </master>
+             </resources>"""
++            ,
++            output="Warning: flag '--master' is deprecated, use keyword"
++                " 'master' instead (see the usage)\n"
+         )
+ 
+     def test_takes_master_meta_attributes(self):
+         self.assert_effect(
+             "resource create --no-default-ops R ocf:heartbeat:IPaddr2"
+-                " ip=192.168.0.99 --master cidr_netmask=32"
++                " ip=192.168.0.99 master cidr_netmask=32"
+             ,
+             """<resources>
+                 <master id="R-master">
+@@ -960,7 +1073,7 @@ class FailOrWarn(ResourceTest):
+     def test_error_master_clone_combination(self):
+         self.assert_pcs_fail(
+             "resource create R ocf:heartbeat:Dummy --no-default-ops --clone"
+-                " --master"
++                " master"
+             ,
+             "Error: you can specify only one of clone, master, bundle or"
+                 " --group\n"
+@@ -968,7 +1081,7 @@ class FailOrWarn(ResourceTest):
+ 
+     def test_error_master_group_combination(self):
+         self.assert_pcs_fail(
+-            "resource create R ocf:heartbeat:Dummy --no-default-ops --master"
++            "resource create R ocf:heartbeat:Dummy --no-default-ops master"
+                 " --group G"
+             ,
+             "Error: you can specify only one of clone, master, bundle or"
+@@ -986,7 +1099,7 @@ class FailOrWarn(ResourceTest):
+ 
+     def test_error_bundle_master_combination(self):
+         self.assert_pcs_fail(
+-            "resource create R ocf:heartbeat:Dummy --no-default-ops --master"
++            "resource create R ocf:heartbeat:Dummy --no-default-ops master"
+                 " bundle bundle_id"
+             ,
+             "Error: you can specify only one of clone, master, bundle or"
+diff --git a/pcs/test/test_constraints.py b/pcs/test/test_constraints.py
+index 4160b01..152218e 100644
+--- a/pcs/test/test_constraints.py
++++ b/pcs/test/test_constraints.py
+@@ -341,43 +341,43 @@ Ticket Constraints:
+ 
+     def testColocationConstraints(self):
+         # see also BundleColocation
+-        line = "resource create M1 ocf:heartbeat:Dummy --master"
++        line = "resource create M1 ocf:heartbeat:Dummy master"
+         output, returnVal = pcs(temp_cib, line)
+         assert returnVal == 0 and output == ""
+ 
+-        line = "resource create M2 ocf:heartbeat:Dummy --master"
++        line = "resource create M2 ocf:heartbeat:Dummy master"
+         output, returnVal = pcs(temp_cib, line)
+         assert returnVal == 0 and output == ""
+ 
+-        line = "resource create M3 ocf:heartbeat:Dummy --master"
++        line = "resource create M3 ocf:heartbeat:Dummy master"
+         output, returnVal = pcs(temp_cib, line)
+         assert returnVal == 0 and output == "",[returnVal, output]
+ 
+-        line = "resource create M4 ocf:heartbeat:Dummy --master"
++        line = "resource create M4 ocf:heartbeat:Dummy master"
+         output, returnVal = pcs(temp_cib, line)
+         assert returnVal == 0 and output == "",[returnVal, output]
+ 
+-        line = "resource create M5 ocf:heartbeat:Dummy --master"
++        line = "resource create M5 ocf:heartbeat:Dummy master"
+         output, returnVal = pcs(temp_cib, line)
+         assert returnVal == 0 and output == "",[returnVal, output]
+ 
+-        line = "resource create M6 ocf:heartbeat:Dummy --master"
++        line = "resource create M6 ocf:heartbeat:Dummy master"
+         output, returnVal = pcs(temp_cib, line)
+         assert returnVal == 0 and output == "",[returnVal, output]
+ 
+-        line = "resource create M7 ocf:heartbeat:Dummy --master"
++        line = "resource create M7 ocf:heartbeat:Dummy master"
+         output, returnVal = pcs(temp_cib, line)
+         assert returnVal == 0 and output == "",[returnVal, output]
+ 
+-        line = "resource create M8 ocf:heartbeat:Dummy --master"
++        line = "resource create M8 ocf:heartbeat:Dummy master"
+         output, returnVal = pcs(temp_cib, line)
+         assert returnVal == 0 and output == "",[returnVal, output]
+ 
+-        line = "resource create M9 ocf:heartbeat:Dummy --master"
++        line = "resource create M9 ocf:heartbeat:Dummy master"
+         output, returnVal = pcs(temp_cib, line)
+         assert returnVal == 0 and output == "",[returnVal, output]
+ 
+-        line = "resource create M10 ocf:heartbeat:Dummy --master"
++        line = "resource create M10 ocf:heartbeat:Dummy master"
+         output, returnVal = pcs(temp_cib, line)
+         assert returnVal == 0 and output == ""
+ 
+@@ -929,7 +929,7 @@ Ticket Constraints:
+         assert returnVal == 1
+ 
+     def testLocationBadRules(self):
+-        o,r = pcs("resource create stateful0 ocf:heartbeat:Dummy --master")
++        o,r = pcs("resource create stateful0 ocf:heartbeat:Dummy master")
+         ac(o,"")
+         assert r == 0
+ 
+@@ -950,7 +950,7 @@ Ticket Constraints:
+ """)
+         assert r == 0
+ 
+-        o,r = pcs("resource create stateful1 ocf:heartbeat:Dummy --master")
++        o,r = pcs("resource create stateful1 ocf:heartbeat:Dummy master")
+         ac(o,"")
+         assert r == 0
+ 
+@@ -989,7 +989,7 @@ Ticket Constraints:
+         ac(o,"")
+         assert r == 0
+ 
+-        o,r = pcs("resource create stateful1 ocf:pacemaker:Stateful --master")
++        o,r = pcs("resource create stateful1 ocf:pacemaker:Stateful master")
+         ac(o, """\
+ Warning: changing a monitor operation interval from 10 to 11 to make the operation unique
+ """)
+@@ -1110,7 +1110,7 @@ Ticket Constraints:
+         self.assertEqual(0, returnVal)
+ 
+         output, returnVal = pcs(
+-            "resource create stateful1 ocf:pacemaker:Stateful --master"
++            "resource create stateful1 ocf:pacemaker:Stateful master"
+         )
+         ac(output, """\
+ Warning: changing a monitor operation interval from 10 to 11 to make the operation unique
+diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
+index c015fa4..5bc9517 100644
+--- a/pcs/test/test_resource.py
++++ b/pcs/test/test_resource.py
+@@ -2729,7 +2729,7 @@ Ticket Constraints:
+ 
+         output, returnVal  = pcs(
+             temp_cib,
+-            "resource create --no-default-ops D2 ocf:heartbeat:Dummy --master"
++            "resource create --no-default-ops D2 ocf:heartbeat:Dummy master"
+         )
+         assert returnVal == 0
+         assert output == "", [output]
+@@ -2797,7 +2797,7 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
+         ac(o,"")
+         assert r == 0
+ 
+-        o,r = pcs("resource create D3 ocf:heartbeat:Dummy --master")
++        o,r = pcs("resource create D3 ocf:heartbeat:Dummy master")
+         ac(o,"")
+         assert r == 0
+ 
+@@ -3011,7 +3011,7 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
+ 
+         output, returnVal = pcs(
+             temp_cib,
+-            "resource create --no-default-ops dummy ocf:heartbeat:Dummy --master"
++            "resource create --no-default-ops dummy ocf:heartbeat:Dummy master"
+         )
+         ac(output, "")
+         self.assertEqual(0, returnVal)
+@@ -3602,7 +3602,7 @@ Error: Cannot remove more than one resource from cloned group
+         # However those test the pcs library. I'm leaving these tests here to
+         # test the cli part for now.
+         self.assert_pcs_success(
+-            "resource create --no-default-ops dummy ocf:pacemaker:Stateful --master",
++            "resource create --no-default-ops dummy ocf:pacemaker:Stateful master",
+             "Warning: changing a monitor operation interval from 10 to 11 to make the operation unique\n"
+         )
+ 
+@@ -4630,7 +4630,7 @@ class CloneMasterUpdate(unittest.TestCase, AssertPcsMixin):
+ 
+     def test_no_op_allowed_in_master_update(self):
+         self.assert_pcs_success(
+-            "resource create dummy ocf:heartbeat:Dummy --master"
++            "resource create dummy ocf:heartbeat:Dummy master"
+         )
+         self.assert_pcs_success("resource show dummy-master", outdent(
+             """\
+diff --git a/pcs/utils.py b/pcs/utils.py
+index d6aabf4..255fcf8 100644
+--- a/pcs/utils.py
++++ b/pcs/utils.py
+@@ -2890,6 +2890,13 @@ def get_modificators():
+         "start": "--start" in pcs_options,
+         "wait": pcs_options.get("--wait", False),
+         "watchdog": pcs_options.get("--watchdog", []),
++
++        #This is for `pcs resource create`. Fix of the bug
++        #https://bugzilla.redhat.com/show_bug.cgi?id=1378107
++        #caused problems in openstack which uses `pcs resource create`
++        #see https://bugzilla.redhat.com/show_bug.cgi?id=1458153
++        #so we give back the original misbehavior of master here temporarily.
++        "master": "--master" in pcs_options,
+     }
+ 
+ def exit_on_cmdline_input_errror(error, main_name, usage_name):
+-- 
+1.8.3.1
+
diff --git a/SOURCES/bz1459503-01-OSP-workarounds-not-compatible-wi.patch b/SOURCES/bz1459503-01-OSP-workarounds-not-compatible-wi.patch
new file mode 100644
index 0000000..133846c
--- /dev/null
+++ b/SOURCES/bz1459503-01-OSP-workarounds-not-compatible-wi.patch
@@ -0,0 +1,162 @@
+From 764f74bb363613c63c3757637267bf37ee2381a0 Mon Sep 17 00:00:00 2001
+From: Ivan Devat <idevat@redhat.com>
+Date: Wed, 7 Jun 2017 14:36:05 +0200
+Subject: [PATCH] squash bz1459503 OSP workarounds not compatible wi
+
+reuse existing pcmk authkey during setup
+
+show only warn if `resource create` creates remote
+---
+ pcs/cluster.py                       | 10 +++++++++-
+ pcs/lib/commands/resource.py         |  6 ++++--
+ pcs/test/cib_resource/test_create.py | 13 ++++++-------
+ pcs/test/test_resource.py            | 24 ++++++++++++------------
+ 4 files changed, 31 insertions(+), 22 deletions(-)
+
+diff --git a/pcs/cluster.py b/pcs/cluster.py
+index d896b0c..16eb0c5 100644
+--- a/pcs/cluster.py
++++ b/pcs/cluster.py
+@@ -452,13 +452,21 @@ def cluster_setup(argv):
+         print("Destroying cluster on nodes: {0}...".format(
+             ", ".join(primary_addr_list)
+         ))
++
++        try:
++            pcmk_authkey_content = (
++                open(settings.pacemaker_authkey_file, "rb").read()
++            )
++        except EnvironmentError as e:
++            pcmk_authkey_content = generate_key()
++
+         destroy_cluster(primary_addr_list)
+         print()
+ 
+         try:
+             file_definitions = {}
+             file_definitions.update(
+-                node_communication_format.pcmk_authkey_file(generate_key())
++                node_communication_format.pcmk_authkey_file(pcmk_authkey_content)
+             )
+             if modifiers["encryption"] == "1":
+                 file_definitions.update(
+diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py
+index 0c5f682..0c4d6fc 100644
+--- a/pcs/lib/commands/resource.py
++++ b/pcs/lib/commands/resource.py
+@@ -69,7 +69,8 @@ def _validate_remote_connection(
+     report_list.append(
+         reports.get_problem_creator(
+             report_codes.FORCE_NOT_SUITABLE_COMMAND,
+-            allow_not_suitable_command
++            is_forced=True,
++            # allow_not_suitable_command
+         )(reports.use_command_node_add_remote)
+     )
+ 
+@@ -99,7 +100,8 @@ def _validate_guest_change(
+     report_list.append(
+         reports.get_problem_creator(
+             report_codes.FORCE_NOT_SUITABLE_COMMAND,
+-            allow_not_suitable_command
++            is_forced=True
++            # allow_not_suitable_command
+         )(create_report)
+     )
+ 
+diff --git a/pcs/test/cib_resource/test_create.py b/pcs/test/cib_resource/test_create.py
+index 045ce68..afd7d0a 100644
+--- a/pcs/test/cib_resource/test_create.py
++++ b/pcs/test/cib_resource/test_create.py
+@@ -1463,11 +1463,10 @@ class FailOrWarnGroup(ResourceTest):
+         )
+ 
+     def test_fail_when_on_pacemaker_remote_attempt(self):
+-        self.assert_pcs_fail(
++        self.assert_pcs_success(
+             "resource create R2 ocf:pacemaker:remote",
+-            "Error: this command is not sufficient for creating a remote"
+-                " connection, use 'pcs cluster node add-remote'"
+-                ", use --force to override\n"
++            "Warning: this command is not sufficient for creating a remote"
++                " connection, use 'pcs cluster node add-remote'\n"
+         )
+ 
+     def test_warn_when_on_pacemaker_remote_attempt(self):
+@@ -1567,10 +1566,10 @@ class FailOrWarnGroup(ResourceTest):
+         )
+ 
+     def test_fail_when_on_pacemaker_remote_guest_attempt(self):
+-        self.assert_pcs_fail(
++        self.assert_pcs_success(
+             "resource create R2 ocf:heartbeat:Dummy meta remote-node=HOST",
+-            "Error: this command is not sufficient for creating a guest node,"
+-            " use 'pcs cluster node add-guest', use --force to override\n"
++            "Warning: this command is not sufficient for creating a guest node,"
++            " use 'pcs cluster node add-guest'\n"
+         )
+ 
+     def test_warn_when_on_pacemaker_remote_guest_attempt(self):
+diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
+index 5bc9517..9ab1dd5 100644
+--- a/pcs/test/test_resource.py
++++ b/pcs/test/test_resource.py
+@@ -4973,10 +4973,10 @@ class ResourceUpdateSpcialChecks(unittest.TestCase, AssertPcsMixin):
+         self.assert_pcs_success(
+             "resource create R ocf:heartbeat:Dummy",
+         )
+-        self.assert_pcs_fail(
++        self.assert_pcs_success(
+             "resource update R meta remote-node=HOST",
+-            "Error: this command is not sufficient for creating a guest node,"
+-            " use 'pcs cluster node add-guest', use --force to override\n"
++            "Warning: this command is not sufficient for creating a guest node,"
++            " use 'pcs cluster node add-guest'\n"
+         )
+     def test_update_warn_on_pacemaker_guest_attempt(self):
+         self.assert_pcs_success(
+@@ -4995,10 +4995,10 @@ class ResourceUpdateSpcialChecks(unittest.TestCase, AssertPcsMixin):
+             "Warning: this command is not sufficient for creating a guest node,"
+             " use 'pcs cluster node add-guest'\n"
+         )
+-        self.assert_pcs_fail(
++        self.assert_pcs_success(
+             "resource update R meta remote-node=",
+-            "Error: this command is not sufficient for removing a guest node,"
+-            " use 'pcs cluster node remove-guest', use --force to override\n"
++            "Warning: this command is not sufficient for removing a guest node,"
++            " use 'pcs cluster node remove-guest'\n"
+         )
+ 
+     def test_update_warn_on_pacemaker_guest_attempt_remove(self):
+@@ -5019,10 +5019,10 @@ class ResourceUpdateSpcialChecks(unittest.TestCase, AssertPcsMixin):
+         self.assert_pcs_success(
+             "resource create R ocf:heartbeat:Dummy",
+         )
+-        self.assert_pcs_fail(
++        self.assert_pcs_success(
+             "resource meta R remote-node=HOST",
+-            "Error: this command is not sufficient for creating a guest node,"
+-            " use 'pcs cluster node add-guest', use --force to override\n"
++            "Warning: this command is not sufficient for creating a guest node,"
++            " use 'pcs cluster node add-guest'\n"
+         )
+ 
+     def test_meta_warn_on_pacemaker_guest_attempt(self):
+@@ -5043,10 +5043,10 @@ class ResourceUpdateSpcialChecks(unittest.TestCase, AssertPcsMixin):
+             "Warning: this command is not sufficient for creating a guest node,"
+             " use 'pcs cluster node add-guest'\n"
+         )
+-        self.assert_pcs_fail(
++        self.assert_pcs_success(
+             "resource meta R remote-node=",
+-            "Error: this command is not sufficient for removing a guest node,"
+-            " use 'pcs cluster node remove-guest', use --force to override\n"
++            "Warning: this command is not sufficient for removing a guest node,"
++            " use 'pcs cluster node remove-guest'\n"
+         )
+ 
+     def test_meta_warn_on_pacemaker_guest_attempt_remove(self):
+-- 
+1.8.3.1
+
diff --git a/SOURCES/change-cman-to-rhel6-in-messages.patch b/SOURCES/change-cman-to-rhel6-in-messages.patch
index c7b0d88..3315132 100644
--- a/SOURCES/change-cman-to-rhel6-in-messages.patch
+++ b/SOURCES/change-cman-to-rhel6-in-messages.patch
@@ -1,41 +1,81 @@
-From 0c8d98bb420b5ea366de361758c6c01851f94630 Mon Sep 17 00:00:00 2001
+From 25b32eed71eb0d22330867f962c34ec5f515e3c9 Mon Sep 17 00:00:00 2001
 From: Ivan Devat <idevat@redhat.com>
 Date: Mon, 23 May 2016 17:00:13 +0200
 Subject: [PATCH] change cman to rhel6 in messages
 
 ---
- pcs/cluster.py          |  6 +++---
- pcs/lib/reports.py      |  8 ++++----
- pcs/pcs.8               | 10 +++++-----
- pcs/usage.py            | 20 ++++++++++----------
- pcsd/views/_dialogs.erb |  2 +-
- pcsd/views/manage.erb   | 16 ++++++++--------
- pcsd/views/nodes.erb    |  2 +-
- 7 files changed, 32 insertions(+), 32 deletions(-)
+ pcs/cli/common/console_report.py |  8 ++++----
+ pcs/cluster.py                   |  6 +++---
+ pcs/config.py                    |  2 +-
+ pcs/pcs.8                        | 10 +++++-----
+ pcs/quorum.py                    |  2 +-
+ pcs/test/test_cluster.py         | 26 +++++++++++++-------------
+ pcs/usage.py                     | 20 ++++++++++----------
+ pcsd/views/_dialogs.erb          |  2 +-
+ pcsd/views/manage.erb            | 16 ++++++++--------
+ pcsd/views/nodes.erb             |  2 +-
+ 10 files changed, 47 insertions(+), 47 deletions(-)
 
+diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py
+index 793ff8d..57b2d64 100644
+--- a/pcs/cli/common/console_report.py
++++ b/pcs/cli/common/console_report.py
+@@ -583,7 +583,7 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
+     ,
+ 
+     codes.CMAN_UNSUPPORTED_COMMAND:
+-        "This command is not supported on CMAN clusters"
++        "This command is not supported on RHEL 6 clusters"
+     ,
+ 
+     codes.ID_ALREADY_EXISTS: lambda info:
+@@ -813,7 +813,7 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
+     ,
+ 
+     codes.IGNORED_CMAN_UNSUPPORTED_OPTION: lambda info:
+-        "{option_name} ignored as it is not supported on CMAN clusters"
++        "{option_name} ignored as it is not supported on RHEL 6 clusters"
+         .format(**info)
+     ,
+ 
+@@ -822,12 +822,12 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
+     ,
+ 
+     codes.CMAN_UDPU_RESTART_REQUIRED: (
+-        "Using udpu transport on a CMAN cluster,"
++        "Using udpu transport on a RHEL 6 cluster,"
+         " cluster restart is required after node add or remove"
+     ),
+ 
+     codes.CMAN_BROADCAST_ALL_RINGS: (
+-        "Enabling broadcast for all rings as CMAN does not support"
++        "Enabling broadcast for all rings as RHEL 6 does not support"
+         " broadcast in only one ring"
+     ),
+ 
 diff --git a/pcs/cluster.py b/pcs/cluster.py
-index e5ad1ec..4572643 100644
+index 16eb0c5..7720771 100644
 --- a/pcs/cluster.py
 +++ b/pcs/cluster.py
-@@ -1553,7 +1553,7 @@ def cluster_node(argv):
-         else:
-             utils.err("Unable to update any nodes")
-         if utils.is_cman_with_udpu_transport():
--            print("Warning: Using udpu transport on a CMAN cluster, "
-+            print("Warning: Using udpu transport on a RHEL 6 cluster, "
-                 + "cluster restart is required to apply node addition")
-         if wait:
-             print()
-@@ -1628,7 +1628,7 @@ def cluster_node(argv):
-         output, retval = utils.reloadCorosync()
-         output, retval = utils.run(["crm_node", "--force", "-R", node0])
-         if utils.is_cman_with_udpu_transport():
--            print("Warning: Using udpu transport on a CMAN cluster, "
-+            print("Warning: Using udpu transport on a RHEL 6 cluster, "
-                 + "cluster restart is required to apply node removal")
+@@ -1862,7 +1862,7 @@ def node_add(lib_env, node0, node1, modifiers):
+     else:
+         utils.err("Unable to update any nodes")
+     if utils.is_cman_with_udpu_transport():
+-        print("Warning: Using udpu transport on a CMAN cluster, "
++        print("Warning: Using udpu transport on a RHEL 6 cluster, "
+             + "cluster restart is required to apply node addition")
+     if wait:
+         print()
+@@ -1938,7 +1938,7 @@ def node_remove(lib_env, node0, modifiers):
+     output, retval = utils.reloadCorosync()
+     output, retval = utils.run(["crm_node", "--force", "-R", node0])
+     if utils.is_cman_with_udpu_transport():
+-        print("Warning: Using udpu transport on a CMAN cluster, "
++        print("Warning: Using udpu transport on a RHEL 6 cluster, "
+             + "cluster restart is required to apply node removal")
  
  def cluster_localnode(argv):
-@@ -1796,7 +1796,7 @@ def cluster_uidgid(argv, silent_list = False):
+@@ -2106,7 +2106,7 @@ def cluster_uidgid(argv, silent_list = False):
  
  def cluster_get_corosync_conf(argv):
      if utils.is_rhel6():
@@ -44,54 +84,27 @@ index e5ad1ec..4572643 100644
  
      if len(argv) > 1:
          usage.cluster()
-diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
-index cff491c..89888c5 100644
---- a/pcs/lib/reports.py
-+++ b/pcs/lib/reports.py
-@@ -893,7 +893,7 @@ def cman_unsupported_command():
-     """
-     return ReportItem.error(
-         report_codes.CMAN_UNSUPPORTED_COMMAND,
--        "This command is not supported on CMAN clusters"
-+        "This command is not supported on RHEL 6 clusters"
-     )
- 
- def id_already_exists(id):
-@@ -1138,7 +1138,7 @@ def cman_ignored_option(option):
-     """
-     return ReportItem.warning(
-         report_codes.IGNORED_CMAN_UNSUPPORTED_OPTION,
--        '{option_name} ignored as it is not supported on CMAN clusters',
-+        '{option_name} ignored as it is not supported on RHEL 6 clusters',
-         info={'option_name': option}
-     )
- 
-@@ -1159,7 +1159,7 @@ def cman_udpu_restart_required():
-     """
-     return ReportItem.warning(
-         report_codes.CMAN_UDPU_RESTART_REQUIRED,
--        "Using udpu transport on a CMAN cluster, "
-+        "Using udpu transport on a RHEL 6 cluster, "
-             + "cluster restart is required after node add or remove"
-     )
- 
-@@ -1169,7 +1169,7 @@ def cman_broadcast_all_rings():
-     """
-     return ReportItem.warning(
-         report_codes.CMAN_BROADCAST_ALL_RINGS,
--        "Enabling broadcast for all rings as CMAN does not support "
-+        "Enabling broadcast for all rings as RHEL 6 does not support "
-             + "broadcast in only one ring"
-     )
+diff --git a/pcs/config.py b/pcs/config.py
+index 5526eb5..389122b 100644
+--- a/pcs/config.py
++++ b/pcs/config.py
+@@ -614,7 +614,7 @@ def config_checkpoint_restore(argv):
  
+ def config_import_cman(argv):
+     if no_clufter:
+-        utils.err("Unable to perform a CMAN cluster conversion due to missing python-clufter package")
++        utils.err("Unable to perform a RHEL 6 cluster conversion due to missing python-clufter package")
+     # prepare convertor options
+     cluster_conf = settings.cluster_conf_file
+     dry_run_output = None
 diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 1efe8f4..dffaddd 100644
+index 27298a7..4acddb8 100644
 --- a/pcs/pcs.8
 +++ b/pcs/pcs.8
-@@ -197,13 +197,13 @@ auth [node] [...] [\fB\-u\fR username] [\fB\-p\fR password] [\fB\-\-force\fR] [\
- Authenticate pcs to pcsd on nodes specified, or on all nodes configured in corosync.conf if no nodes are specified (authorization tokens are stored in ~/.pcs/tokens or /var/lib/pcsd/tokens for root).  By default all nodes are also authenticated to each other, using \fB\-\-local\fR only authenticates the local node (and does not authenticate the remote nodes with each other).  Using \fB\-\-force\fR forces re-authentication to occur.
+@@ -206,13 +206,13 @@ auth [node] [...] [\fB\-u\fR username] [\fB\-p\fR password] [\fB\-\-force\fR] [\
+ Authenticate pcs to pcsd on nodes specified, or on all nodes configured in the local cluster if no nodes are specified (authorization tokens are stored in ~/.pcs/tokens or /var/lib/pcsd/tokens for root). By default all nodes are also authenticated to each other, using \fB\-\-local\fR only authenticates the local node (and does not authenticate the remote nodes with each other). Using \fB\-\-force\fR forces re\-authentication to occur.
  .TP
- setup [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1-altaddr]> [<node2[,node2-altaddr]>] [...] [\fB\-\-transport\fR udpu|udp] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] | [\fB\-\-broadcast1\fR]]]] [\fB\-\-wait_for_all\fR=<0|1>] [\fB\-\-auto_tie_breaker\fR=<0|1>] [\fB\-\-last_man_standing\fR=<0|1> [\fB\-\-last_man_standing_window\fR=<time in ms>]] [\fB\-\-ipv6\fR] [\fB\-\-token\fR <timeout>] [\fB\-\-token_coefficient\fR <timeout>] [\fB\-\-join\fR <timeout>] [\fB\-\-consensus\fR <timeout>] [\fB\-\-miss_count_const\fR <count>] [\fB\-\-fail_recv_const\fR <failures>]
+ setup [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1\-altaddr]> [<node2[,node2\-altaddr]>] [...] [\fB\-\-transport\fR udpu|udp] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] | [\fB\-\-broadcast1\fR]]]] [\fB\-\-wait_for_all\fR=<0|1>] [\fB\-\-auto_tie_breaker\fR=<0|1>] [\fB\-\-last_man_standing\fR=<0|1> [\fB\-\-last_man_standing_window\fR=<time in ms>]] [\fB\-\-ipv6\fR] [\fB\-\-token\fR <timeout>] [\fB\-\-token_coefficient\fR <timeout>] [\fB\-\-join\fR <timeout>] [\fB\-\-consensus\fR <timeout>] [\fB\-\-miss_count_const\fR <count>] [\fB\-\-fail_recv_const\fR <failures>] [\fB\-\-encryption\fR 0|1]
 -Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR will only perform changes on the local node, \fB\-\-start\fR will also start the cluster on the specified nodes, \fB\-\-wait\fR will wait up to 'n' seconds for the nodes to start, \fB\-\-enable\fR will enable corosync and pacemaker on node startup, \fB\-\-transport\fR allows specification of corosync transport (default: udpu; udp for CMAN clusters), \fB\-\-rrpmode\fR allows you to set the RRP mode of the system. Currently only 'passive' is supported or tested (using 'active' is not recommended). The \fB\-\-wait_for_all\fR, \fB\-\-auto_tie_breaker\fR, \fB\-\-last_man_standing\fR, \fB\-\-last_man_standing_window\fR options are all documented in corosync's votequorum(5) man page. These options are not supported on CMAN clusters.
 +Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR will only perform changes on the local node, \fB\-\-start\fR will also start the cluster on the specified nodes, \fB\-\-wait\fR will wait up to 'n' seconds for the nodes to start, \fB\-\-enable\fR will enable corosync and pacemaker on node startup, \fB\-\-transport\fR allows specification of corosync transport (default: udpu; udp for RHEL 6 clusters), \fB\-\-rrpmode\fR allows you to set the RRP mode of the system. Currently only 'passive' is supported or tested (using 'active' is not recommended). The \fB\-\-wait_for_all\fR, \fB\-\-auto_tie_breaker\fR, \fB\-\-last_man_standing\fR, \fB\-\-last_man_standing_window\fR options are all documented in corosync's votequorum(5) man page. These options are not supported on RHEL 6 clusters.
  
@@ -105,24 +118,137 @@ index 1efe8f4..dffaddd 100644
  
  \fB\-\-join\fR <timeout> sets time in milliseconds to wait for join messages (default 50 ms)
  
-@@ -684,10 +684,10 @@ checkpoint restore <checkpoint_number>
+@@ -729,10 +729,10 @@ checkpoint restore <checkpoint_number>
  Restore cluster configuration to specified checkpoint.
  .TP
  import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] [output\-format=corosync.conf|cluster.conf] [dist=<dist>]
--Converts CMAN cluster configuration to Pacemaker cluster configuration.  Converted configuration will be saved to 'output' file.  To send the configuration to the cluster nodes the 'pcs config restore' command can be used.  If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually.  If no input is specified /etc/cluster/cluster.conf will be used.  You can force to create output containing either cluster.conf or corosync.conf using the output-format option.  Optionally you can specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  You can get the list of supported dist values by running the "clufter \fB\-\-list-dists\fR" command.  If 'dist' is not specified, it defaults to this node's version if that matches output-format, otherwise redhat,6.7 is used for cluster.conf and redhat,7.1 is used for corosync.conf.
-+Converts RHEL 6 (CMAN) cluster configuration to Pacemaker cluster configuration.  Converted configuration will be saved to 'output' file.  To send the configuration to the cluster nodes the 'pcs config restore' command can be used.  If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually.  If no input is specified /etc/cluster/cluster.conf will be used.  You can force to create output containing either cluster.conf or corosync.conf using the output-format option.  Optionally you can specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  You can get the list of supported dist values by running the "clufter \fB\-\-list-dists\fR" command.  If 'dist' is not specified, it defaults to this node's version if that matches output-format, otherwise redhat,6.7 is used for cluster.conf and redhat,7.1 is used for corosync.conf.
+-Converts CMAN cluster configuration to Pacemaker cluster configuration.  Converted configuration will be saved to 'output' file.  To send the configuration to the cluster nodes the 'pcs config restore' command can be used.  If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually.  If no input is specified /etc/cluster/cluster.conf will be used.  You can force to create output containing either cluster.conf or corosync.conf using the output\-format option.  Optionally you can specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  You can get the list of supported dist values by running the "clufter \fB\-\-list\-dists\fR" command.  If 'dist' is not specified, it defaults to this node's version if that matches output\-format, otherwise redhat,6.7 is used for cluster.conf and redhat,7.1 is used for corosync.conf.
++Converts RHEL 6 (CMAN) cluster configuration to Pacemaker cluster configuration.  Converted configuration will be saved to 'output' file.  To send the configuration to the cluster nodes the 'pcs config restore' command can be used.  If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually.  If no input is specified /etc/cluster/cluster.conf will be used.  You can force to create output containing either cluster.conf or corosync.conf using the output\-format option.  Optionally you can specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  You can get the list of supported dist values by running the "clufter \fB\-\-list\-dists\fR" command.  If 'dist' is not specified, it defaults to this node's version if that matches output\-format, otherwise redhat,6.7 is used for cluster.conf and redhat,7.1 is used for corosync.conf.
  .TP
- import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] output\-format=pcs-commands|pcs-commands-verbose [dist=<dist>]
+ import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] output\-format=pcs\-commands|pcs\-commands\-verbose [dist=<dist>]
 -Converts CMAN cluster configuration to a list of pcs commands which recreates the same cluster as Pacemaker cluster when executed.  Commands will be saved to 'output' file.  For other options see above.
 +Converts RHEL 6 (CMAN) cluster configuration to a list of pcs commands which recreates the same cluster as Pacemaker cluster when executed.  Commands will be saved to 'output' file.  For other options see above.
  .TP
  export pcs\-commands|pcs\-commands\-verbose [output=<filename>] [dist=<dist>]
- Creates a list of pcs commands which upon execution recreates the current cluster running on this node.  Commands will be saved to 'output' file or written to stdout if 'output' is not specified.  Use pcs\-commands to get a simple list of commands, whereas pcs\-commands\-verbose creates a list including comments and debug messages.  Optionally specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  You can get the list of supported dist values by running the "clufter \fB\-\-list-dists\fR" command.  If 'dist' is not specified, it defaults to this node's version.
+ Creates a list of pcs commands which upon execution recreates the current cluster running on this node.  Commands will be saved to 'output' file or written to stdout if 'output' is not specified.  Use pcs\-commands to get a simple list of commands, whereas pcs\-commands\-verbose creates a list including comments and debug messages.  Optionally specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  You can get the list of supported dist values by running the "clufter \fB\-\-list\-dists\fR" command.  If 'dist' is not specified, it defaults to this node's version.
+diff --git a/pcs/quorum.py b/pcs/quorum.py
+index 937b057..8e8431c 100644
+--- a/pcs/quorum.py
++++ b/pcs/quorum.py
+@@ -196,7 +196,7 @@ def quorum_unblock_cmd(argv):
+         sys.exit(1)
+ 
+     if utils.is_rhel6():
+-        utils.err("operation is not supported on CMAN clusters")
++        utils.err("operation is not supported on RHEL 6 clusters")
+ 
+     output, retval = utils.run(
+         ["corosync-cmapctl", "-g", "runtime.votequorum.wait_for_all_status"]
+diff --git a/pcs/test/test_cluster.py b/pcs/test/test_cluster.py
+index 5c7a4a1..a3836be 100644
+--- a/pcs/test/test_cluster.py
++++ b/pcs/test/test_cluster.py
+@@ -1228,7 +1228,7 @@ logging {
+             .format(cluster_conf_tmp)
+         )
+         ac(output, """\
+-Warning: Using udpu transport on a CMAN cluster, cluster restart is required after node add or remove
++Warning: Using udpu transport on a RHEL 6 cluster, cluster restart is required after node add or remove
+ """)
+         self.assertEqual(returnVal, 0)
+         with open(cluster_conf_tmp) as f:
+@@ -1320,7 +1320,7 @@ logging {
+             .format(cluster_conf_tmp)
+         )
+         ac(output, """\
+-Warning: --ipv6 ignored as it is not supported on CMAN clusters
++Warning: --ipv6 ignored as it is not supported on RHEL 6 clusters
+ """)
+         self.assertEqual(returnVal, 0)
+         with open(cluster_conf_tmp) as f:
+@@ -1914,7 +1914,7 @@ logging {
+         )
+         ac(output, """\
+ Error: 'blah' is not a valid RRP mode value, use active, passive, use --force to override
+-Warning: Enabling broadcast for all rings as CMAN does not support broadcast in only one ring
++Warning: Enabling broadcast for all rings as RHEL 6 does not support broadcast in only one ring
+ """)
+         self.assertEqual(returnVal, 1)
+ 
+@@ -2193,7 +2193,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
+         )
+         ac(output, """\
+ Error: using a RRP mode of 'active' is not supported or tested, use --force to override
+-Warning: Enabling broadcast for all rings as CMAN does not support broadcast in only one ring
++Warning: Enabling broadcast for all rings as RHEL 6 does not support broadcast in only one ring
+ """)
+         self.assertEqual(returnVal, 1)
+ 
+@@ -2203,7 +2203,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
+             .format(cluster_conf_tmp)
+         )
+         ac(output, """\
+-Warning: Enabling broadcast for all rings as CMAN does not support broadcast in only one ring
++Warning: Enabling broadcast for all rings as RHEL 6 does not support broadcast in only one ring
+ Warning: using a RRP mode of 'active' is not supported or tested
+ """)
+         self.assertEqual(returnVal, 0)
+@@ -2272,7 +2272,7 @@ Error: if one node is configured for RRP, all nodes must be configured for RRP
+         )
+         ac(output, """\
+ Error: --addr0 and --addr1 can only be used with --transport=udp
+-Warning: Using udpu transport on a CMAN cluster, cluster restart is required after node add or remove
++Warning: Using udpu transport on a RHEL 6 cluster, cluster restart is required after node add or remove
+ """)
+         self.assertEqual(returnVal, 1)
+ 
+@@ -2362,7 +2362,7 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
+             .format(cluster_conf_tmp)
+         )
+         ac(output, """\
+-Warning: Enabling broadcast for all rings as CMAN does not support broadcast in only one ring
++Warning: Enabling broadcast for all rings as RHEL 6 does not support broadcast in only one ring
+ """)
+         self.assertEqual(returnVal, 0)
+         with open(cluster_conf_tmp) as f:
+@@ -2377,7 +2377,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
+             .format(cluster_conf_tmp)
+         )
+         ac(output, """\
+-Warning: Enabling broadcast for all rings as CMAN does not support broadcast in only one ring
++Warning: Enabling broadcast for all rings as RHEL 6 does not support broadcast in only one ring
+ """)
+         self.assertEqual(returnVal, 0)
+         with open(cluster_conf_tmp) as f:
+@@ -2395,10 +2395,10 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
+             .format(cluster_conf_tmp)
+         )
+         ac(output, """\
+-Warning: --wait_for_all ignored as it is not supported on CMAN clusters
+-Warning: --auto_tie_breaker ignored as it is not supported on CMAN clusters
+-Warning: --last_man_standing ignored as it is not supported on CMAN clusters
+-Warning: --last_man_standing_window ignored as it is not supported on CMAN clusters
++Warning: --wait_for_all ignored as it is not supported on RHEL 6 clusters
++Warning: --auto_tie_breaker ignored as it is not supported on RHEL 6 clusters
++Warning: --last_man_standing ignored as it is not supported on RHEL 6 clusters
++Warning: --last_man_standing_window ignored as it is not supported on RHEL 6 clusters
+ """)
+         self.assertEqual(returnVal, 0)
+         with open(cluster_conf_tmp) as f:
+@@ -2494,7 +2494,7 @@ logging {
+             .format(cluster_conf_tmp)
+         )
+         ac(output, """\
+-Warning: --token_coefficient ignored as it is not supported on CMAN clusters
++Warning: --token_coefficient ignored as it is not supported on RHEL 6 clusters
+ """)
+         self.assertEqual(returnVal, 0)
+         with open(cluster_conf_tmp) as f:
 diff --git a/pcs/usage.py b/pcs/usage.py
-index ea407c3..0ebebe0 100644
+index 9cbf7de..4a9f2f2 100644
 --- a/pcs/usage.py
 +++ b/pcs/usage.py
-@@ -553,22 +553,22 @@ Commands:
+@@ -585,23 +585,23 @@ Commands:
          --wait will wait up to 'n' seconds for the nodes to start,
          --enable will enable corosync and pacemaker on node startup,
          --transport allows specification of corosync transport (default: udpu;
@@ -141,15 +267,16 @@ index ea407c3..0ebebe0 100644
 +            option is not supported on RHEL 6 clusters.
          --token <timeout> sets time in milliseconds until a token loss is
              declared after not receiving a token (default 1000 ms)
-         --token_coefficient <timeout> sets time in milliseconds used for clusters
-             with at least 3 nodes as a coefficient for real token timeout calculation
+         --token_coefficient <timeout> sets time in milliseconds used for
+             clusters with at least 3 nodes as a coefficient for real token
+             timeout calculation
              (token + (number_of_nodes - 2) * token_coefficient) (default 650 ms)
 -            This option is not supported on CMAN clusters.
 +            This option is not supported on RHEL 6 clusters.
          --join <timeout> sets time in milliseconds to wait for join messages
              (default 50 ms)
          --consensus <timeout> sets time in milliseconds to wait for consensus
-@@ -1187,9 +1187,9 @@ Commands:
+@@ -1325,9 +1325,9 @@ Commands:
  
      import-cman output=<filename> [input=<filename>] [--interactive]
              [output-format=corosync.conf|cluster.conf] [dist=<dist>]
@@ -162,7 +289,7 @@ index ea407c3..0ebebe0 100644
          command can be used.  If --interactive is specified you will be
          prompted to solve incompatibilities manually.  If no input is specified
          /etc/cluster/cluster.conf will be used.  You can force to create output
-@@ -1203,9 +1203,9 @@ Commands:
+@@ -1341,9 +1341,9 @@ Commands:
  
      import-cman output=<filename> [input=<filename>] [--interactive]
              output-format=pcs-commands|pcs-commands-verbose [dist=<dist>]
@@ -189,7 +316,7 @@ index d18ac71..21be443 100644
  </div>
  
 diff --git a/pcsd/views/manage.erb b/pcsd/views/manage.erb
-index 39ab41f..cacd6cb 100644
+index 2b12aaa..4129de5 100644
 --- a/pcsd/views/manage.erb
 +++ b/pcsd/views/manage.erb
 @@ -213,9 +213,9 @@
@@ -204,7 +331,7 @@ index 39ab41f..cacd6cb 100644
      </table>
      <span onclick='$(".advanced_open").toggle();$("#advanced_cluster_create_options").toggle();'><span class="advanced_open rightarrow sprites"></span><span class="advanced_open downarrow sprites" style="display:none;"></span>Advanced Options:</span>
      <div id="advanced_cluster_create_options" style="display:none;">
-@@ -245,7 +245,7 @@ remaining 3 nodes will be fenced.
+@@ -248,7 +248,7 @@ remaining 3 nodes will be fenced.
  
  It is very useful when combined with Last Man Standing.
  
@@ -213,7 +340,7 @@ index 39ab41f..cacd6cb 100644
  	<% auto_tie_desc = "\
  Enables Auto Tie Breaker (ATB) feature (default: off).
  
-@@ -258,7 +258,7 @@ partition, or the set of nodes that are still in contact with the \
+@@ -261,7 +261,7 @@ partition, or the set of nodes that are still in contact with the \
  node that has the lowest nodeid will remain quorate. The other nodes \
  will be inquorate.
  
@@ -222,7 +349,7 @@ index 39ab41f..cacd6cb 100644
  	<% last_man_desc = "\
  Enables Last Man Standing (LMS) feature (default: off).
  
-@@ -279,18 +279,18 @@ Using the above 8 node cluster example, with LMS enabled the cluster \
+@@ -282,18 +282,18 @@ Using the above 8 node cluster example, with LMS enabled the cluster \
  can retain quorum and continue operating by losing, in a cascade \
  fashion, up to 6 nodes with only 2 remaining active.
  
@@ -244,7 +371,7 @@ index 39ab41f..cacd6cb 100644
          <% token_timeout = "\
  Sets time in milliseconds until a token loss is declared after not receiving \
  a token (default: 1000 ms)" %>
-@@ -299,7 +299,7 @@ Sets time in milliseconds used for clusters with at least 3 nodes \
+@@ -302,7 +302,7 @@ Sets time in milliseconds used for clusters with at least 3 nodes \
  as a coefficient for real token timeout calculation \
  (token + (number_of_nodes - 2) * token_coefficient) (default: 650 ms)
  
@@ -254,10 +381,10 @@ index 39ab41f..cacd6cb 100644
  Sets time in milliseconds to wait for join messages (default: 50 ms)" %>
          <% consensus_timeout = "\
 diff --git a/pcsd/views/nodes.erb b/pcsd/views/nodes.erb
-index 8fccd25..517e19d 100644
+index 3c3aeed..4b03f06 100644
 --- a/pcsd/views/nodes.erb
 +++ b/pcsd/views/nodes.erb
-@@ -350,7 +350,7 @@
+@@ -359,7 +359,7 @@
        {{/if}}
        {{#if Pcs.is_cman_with_udpu_transport}}
        <tr>
diff --git a/SOURCES/fix-pcs-constraint-ticket-set-help.patch b/SOURCES/fix-pcs-constraint-ticket-set-help.patch
deleted file mode 100644
index 7d73ab0..0000000
--- a/SOURCES/fix-pcs-constraint-ticket-set-help.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From 10ed9eaf5f9485b0186fdc1546e22bb321d47e85 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Thu, 23 Jun 2016 13:31:29 +0200
-Subject: [PATCH] fix "pcs constraint ticket set" help
-
----
- pcs/pcs.8    | 2 +-
- pcs/usage.py | 4 ++--
- 2 files changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/pcs/pcs.8 b/pcs/pcs.8
-index 38a4913..0e230b7 100644
---- a/pcs/pcs.8
-+++ b/pcs/pcs.8
-@@ -482,7 +482,7 @@ ticket add <ticket> [<role>] <resource id> [options] [id=constraint\-id]
- Create a ticket constraint for <resource id>. Available option is loss-policy=fence/stop/freeze/demote. A role can be master, slave, started or stopped.
- .TP
- ticket set <resource1> [resourceN]... [options] [set <resourceX> ... [options]] [setoptions [constraint_options]]
--Create a ticket constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Required constraint option is ticket. Optional constraint option is loss-policy=fence/stop/freeze/demote.
-+Create a ticket constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Required constraint option is ticket=<ticket>. Optional constraint options are id=<constraint-id> and loss-policy=fence/stop/freeze/demote.
- .TP
- remove [constraint id]...
- Remove constraint(s) or constraint rules with the specified id(s).
-diff --git a/pcs/usage.py b/pcs/usage.py
-index 9d24b78..c4c417a 100644
---- a/pcs/usage.py
-+++ b/pcs/usage.py
-@@ -998,8 +998,8 @@ Commands:
-         Create a ticket constraint with a resource set.
-         Available options are sequential=true/false, require-all=true/false,
-         action=start/promote/demote/stop and role=Stopped/Started/Master/Slave.
--        Required constraint option is ticket.
--        Optional constraint option is loss-policy=fence/stop/freeze/demote.
-+        Required constraint option is ticket=<ticket>. Optional constraint
-+        options are id=<constraint-id> and loss-policy=fence/stop/freeze/demote.
- 
-     remove [constraint id]...
-         Remove constraint(s) or constraint rules with the specified id(s).
--- 
-1.8.3.1
-
diff --git a/SOURCES/fix-qdevice-tests-failing-due-to-multithreading.patch b/SOURCES/fix-qdevice-tests-failing-due-to-multithreading.patch
deleted file mode 100644
index a0a7a6a..0000000
--- a/SOURCES/fix-qdevice-tests-failing-due-to-multithreading.patch
+++ /dev/null
@@ -1,151 +0,0 @@
-From c178935290a5387fdfbd54330769ee159d8916b6 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Fri, 1 Jul 2016 10:39:48 +0200
-Subject: [PATCH] test: fix qdevice tests failing due to multithreading
-
----
- pcs/test/test_lib_commands_quorum.py | 37 ++++++++++++++++++++++--------------
- 1 file changed, 23 insertions(+), 14 deletions(-)
-
-diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py
-index c12ab66..826251a 100644
---- a/pcs/test/test_lib_commands_quorum.py
-+++ b/pcs/test/test_lib_commands_quorum.py
-@@ -763,7 +763,7 @@ class AddDeviceNetTest(TestCase):
-             len(client_setup_calls),
-             len(mock_client_setup.mock_calls)
-         )
--        mock_client_setup.assert_has_calls(client_setup_calls)
-+        mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
-         mock_get_cert_request.assert_called_once_with(
-             "mock_runner",
-             self.cluster_name
-@@ -786,7 +786,7 @@ class AddDeviceNetTest(TestCase):
-             len(client_import_calls),
-             len(mock_import_cert.mock_calls)
-         )
--        mock_import_cert.assert_has_calls(client_import_calls)
-+        mock_import_cert.assert_has_calls(client_import_calls, any_order=True)
- 
-     def test_error_get_ca_cert(
-         self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-@@ -894,7 +894,7 @@ class AddDeviceNetTest(TestCase):
-             len(client_setup_calls),
-             len(mock_client_setup.mock_calls)
-         )
--        mock_client_setup.assert_has_calls(client_setup_calls)
-+        mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
- 
-     def test_error_client_setup_skip_offline(
-         self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-@@ -959,7 +959,7 @@ class AddDeviceNetTest(TestCase):
-             len(client_setup_calls),
-             len(mock_client_setup.mock_calls)
-         )
--        mock_client_setup.assert_has_calls(client_setup_calls)
-+        mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
- 
-     def test_generate_cert_request_error(
-         self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-@@ -1004,7 +1004,7 @@ class AddDeviceNetTest(TestCase):
-             len(client_setup_calls),
-             len(mock_client_setup.mock_calls)
-         )
--        mock_client_setup.assert_has_calls(client_setup_calls)
-+        mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
-         mock_get_cert_request.assert_called_once_with(
-             "mock_runner",
-             self.cluster_name
-@@ -1062,7 +1062,7 @@ class AddDeviceNetTest(TestCase):
-             len(client_setup_calls),
-             len(mock_client_setup.mock_calls)
-         )
--        mock_client_setup.assert_has_calls(client_setup_calls)
-+        mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
-         mock_get_cert_request.assert_called_once_with(
-             "mock_runner",
-             self.cluster_name
-@@ -1119,7 +1119,7 @@ class AddDeviceNetTest(TestCase):
-             len(client_setup_calls),
-             len(mock_client_setup.mock_calls)
-         )
--        mock_client_setup.assert_has_calls(client_setup_calls)
-+        mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
-         mock_get_cert_request.assert_called_once_with(
-             "mock_runner",
-             self.cluster_name
-@@ -1201,7 +1201,7 @@ class AddDeviceNetTest(TestCase):
-             len(client_setup_calls),
-             len(mock_client_setup.mock_calls)
-         )
--        mock_client_setup.assert_has_calls(client_setup_calls)
-+        mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
-         mock_get_cert_request.assert_called_once_with(
-             "mock_runner",
-             self.cluster_name
-@@ -1224,7 +1224,7 @@ class AddDeviceNetTest(TestCase):
-             len(client_import_calls),
-             len(mock_import_cert.mock_calls)
-         )
--        mock_import_cert.assert_has_calls(client_import_calls)
-+        mock_import_cert.assert_has_calls(client_import_calls, any_order=True)
- 
-     def test_client_import_cert_error_skip_offline(
-         self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-@@ -1282,7 +1282,7 @@ class AddDeviceNetTest(TestCase):
-             len(client_setup_calls),
-             len(mock_client_setup.mock_calls)
-         )
--        mock_client_setup.assert_has_calls(client_setup_calls)
-+        mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
-         mock_get_cert_request.assert_called_once_with(
-             "mock_runner",
-             self.cluster_name
-@@ -1305,7 +1305,7 @@ class AddDeviceNetTest(TestCase):
-             len(client_import_calls),
-             len(mock_import_cert.mock_calls)
-         )
--        mock_import_cert.assert_has_calls(client_import_calls)
-+        mock_import_cert.assert_has_calls(client_import_calls, any_order=True)
- 
- 
- @mock.patch.object(LibraryEnvironment, "push_corosync_conf")
-@@ -1507,7 +1507,10 @@ class RemoveDeviceNetTest(TestCase):
-             len(client_destroy_calls),
-             len(mock_client_destroy.mock_calls)
-         )
--        mock_client_destroy.assert_has_calls(client_destroy_calls)
-+        mock_client_destroy.assert_has_calls(
-+            client_destroy_calls,
-+            any_order=True
-+        )
- 
-     def test_error_client_destroy(self, mock_client_destroy):
-         def raiser(communicator, node):
-@@ -1561,7 +1564,10 @@ class RemoveDeviceNetTest(TestCase):
-             len(client_destroy_calls),
-             len(mock_client_destroy.mock_calls)
-         )
--        mock_client_destroy.assert_has_calls(client_destroy_calls)
-+        mock_client_destroy.assert_has_calls(
-+            client_destroy_calls,
-+            any_order=True
-+        )
- 
-     def test_error_client_destroy_skip_offline(self, mock_client_destroy):
-         def raiser(communicator, node):
-@@ -1606,7 +1612,10 @@ class RemoveDeviceNetTest(TestCase):
-             len(client_destroy_calls),
-             len(mock_client_destroy.mock_calls)
-         )
--        mock_client_destroy.assert_has_calls(client_destroy_calls)
-+        mock_client_destroy.assert_has_calls(
-+            client_destroy_calls,
-+            any_order=True
-+        )
- 
- 
- @mock.patch.object(LibraryEnvironment, "push_corosync_conf")
--- 
-1.8.3.1
-
diff --git a/SOURCES/pcsd-fix-syntax-error-on-ruby-1.8.patch b/SOURCES/pcsd-fix-syntax-error-on-ruby-1.8.patch
deleted file mode 100644
index da88ed6..0000000
--- a/SOURCES/pcsd-fix-syntax-error-on-ruby-1.8.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From 96885f66dde45fd8edf2b916ce63bbc236cfe28a Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Mon, 18 Jul 2016 15:03:42 +0200
-Subject: [PATCH] pcsd: fix syntax error on ruby 1.8
-
----
- pcsd/remote.rb | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/pcsd/remote.rb b/pcsd/remote.rb
-index 6a3a692..25fb74d 100644
---- a/pcsd/remote.rb
-+++ b/pcsd/remote.rb
-@@ -1558,7 +1558,7 @@ def remove_resource(params, request, auth_user)
-       end
-       cmd = [PCS, '-f', tmp_file.path, 'resource', 'disable']
-       resource_list.each { |resource|
--        _, err, retval = run_cmd(user, *cmd, resource)
-+        _, err, retval = run_cmd(user, *(cmd + [resource]))
-         if retval != 0
-           unless (
-             err.join('').index('unable to find a resource') != -1 and
--- 
-1.8.3.1
-
diff --git a/SOURCES/rhel7.patch b/SOURCES/rhel7.patch
index 5f73344..706a433 100644
--- a/SOURCES/rhel7.patch
+++ b/SOURCES/rhel7.patch
@@ -1,4 +1,4 @@
-From e27fb389233d1f66dcda32fa7d06192a82f5944f Mon Sep 17 00:00:00 2001
+From cff71cf607ae7d0b42f61fbc0159e2bccd559ee8 Mon Sep 17 00:00:00 2001
 From: Ivan Devat <idevat@redhat.com>
 Date: Tue, 24 May 2016 07:26:15 +0200
 Subject: [PATCH] adapt working with gems to rhel 7
@@ -10,7 +10,7 @@ Subject: [PATCH] adapt working with gems to rhel 7
  3 files changed, 2 insertions(+), 13 deletions(-)
 
 diff --git a/pcsd/Gemfile b/pcsd/Gemfile
-index e851eaf..6526f53 100644
+index e01b31c..02134fd 100644
 --- a/pcsd/Gemfile
 +++ b/pcsd/Gemfile
 @@ -1,9 +1,5 @@
@@ -23,8 +23,8 @@ index e851eaf..6526f53 100644
  gem 'sinatra'
  gem 'sinatra-contrib'
  gem 'rack'
-@@ -12,8 +8,7 @@ gem 'tilt'
- gem 'eventmachine'
+@@ -11,8 +7,7 @@ gem 'rack-protection'
+ gem 'tilt'
  gem 'rack-test'
  gem 'backports'
 -gem 'rpam-ruby19', :platform => [:ruby_19, :ruby_20, :ruby_21, :ruby_22]
@@ -34,40 +34,44 @@ index e851eaf..6526f53 100644
  gem 'open4'
  gem 'orderedhash'
 diff --git a/pcsd/Gemfile.lock b/pcsd/Gemfile.lock
-index eff055a..15ee9b3 100644
+index a3fab96..36853bd 100644
 --- a/pcsd/Gemfile.lock
 +++ b/pcsd/Gemfile.lock
-@@ -1,10 +1,8 @@
+@@ -1,11 +1,9 @@
  GEM
    remote: https://rubygems.org/
 -  remote: https://tojeline.fedorapeople.org/rubygems/
    specs:
      backports (3.6.8)
-     eventmachine (1.2.0.1)
--    json (1.8.3)
-     multi_json (1.12.0)
+     ethon (0.10.1)
+     ffi (1.9.17)
+-    json (2.0.3)
+     multi_json (1.12.1)
      open4 (1.3.4)
      orderedhash (0.0.6)
-@@ -33,7 +31,6 @@ PLATFORMS
- DEPENDENCIES
+@@ -35,7 +33,6 @@ DEPENDENCIES
    backports
-   eventmachine
+   ethon
+   ffi
 -  json
    multi_json
    open4
    orderedhash
 diff --git a/pcsd/Makefile b/pcsd/Makefile
-index 798a8bd..08d9cf1 100644
+index 2ecd4de..10719e1 100644
 --- a/pcsd/Makefile
 +++ b/pcsd/Makefile
-@@ -1,4 +1,4 @@
+@@ -1,7 +1,7 @@
+ FFI_VERSION="1.9.17"
+ FFI_C_DIR=vendor/bundle/ruby/gems/ffi-${FFI_VERSION}/ext/ffi_c
+ 
 -build_gems: get_gems
 +build_gems:
  	bundle install --local --deployment
- 
- # RHEL6 needs special rpam-ruby19 gem to work with 1.8.7
-@@ -21,8 +21,5 @@ build_gems_rhel6:
- 	vendor/cache/sinatra-contrib-1.4.7.gem \
+ 	#ffi makes symlink with absolute path. Let's change it to relative path.
+ 	for fname in `ls ${FFI_C_DIR}/libffi-*/include/ffitarget.h`; do \
+@@ -33,8 +33,5 @@ build_gems_rhel6:
+ 	vendor/cache/tilt-2.0.6.gem \
  	-- '--with-ldflags="-Wl,-z,now -Wl,-z,relro"'
  
 -get_gems:
diff --git a/SOURCES/show-only-warning-when-crm_mon-xml-is-invalid.patch b/SOURCES/show-only-warning-when-crm_mon-xml-is-invalid.patch
index 59e9748..f31bd23 100644
--- a/SOURCES/show-only-warning-when-crm_mon-xml-is-invalid.patch
+++ b/SOURCES/show-only-warning-when-crm_mon-xml-is-invalid.patch
@@ -1,21 +1,21 @@
-From 7e4c0a0ea1cadf2c887994afa1e0f728ce64c1aa Mon Sep 17 00:00:00 2001
+From 07ea4bec19958563e82bd8e863f64ba4f36850c0 Mon Sep 17 00:00:00 2001
 From: Ivan Devat <idevat@redhat.com>
 Date: Mon, 29 Aug 2016 18:16:41 +0200
 Subject: [PATCH] show only warning when crm_mon xml is invalid
 
 ---
- pcs/lib/pacemaker_state.py           | 13 ++++++++++---
- pcs/test/test_lib_pacemaker_state.py | 24 ++++++++++++++++++++----
+ pcs/lib/pacemaker/state.py           | 13 ++++++++++---
+ pcs/lib/pacemaker/test/test_state.py | 24 ++++++++++++++++++++----
  2 files changed, 30 insertions(+), 7 deletions(-)
 
-diff --git a/pcs/lib/pacemaker_state.py b/pcs/lib/pacemaker_state.py
-index b413b90..e300da7 100644
---- a/pcs/lib/pacemaker_state.py
-+++ b/pcs/lib/pacemaker_state.py
-@@ -136,10 +136,17 @@ class _NodeSection(_Element):
- def _get_valid_cluster_state_dom(xml):
+diff --git a/pcs/lib/pacemaker/state.py b/pcs/lib/pacemaker/state.py
+index be3e7ad..cab72c8 100644
+--- a/pcs/lib/pacemaker/state.py
++++ b/pcs/lib/pacemaker/state.py
+@@ -145,10 +145,17 @@ class _NodeSection(_Element):
+ def get_cluster_state_dom(xml):
      try:
-         dom = etree.fromstring(xml)
+         dom = xml_fromstring(xml)
 -        if os.path.isfile(settings.crm_mon_schema):
 -            etree.RelaxNG(file=settings.crm_mon_schema).assertValid(dom)
 +        if(
@@ -33,10 +33,10 @@ index b413b90..e300da7 100644
          raise LibraryError(reports.cluster_state_invalid_format())
  
  class ClusterState(_Element):
-diff --git a/pcs/test/test_lib_pacemaker_state.py b/pcs/test/test_lib_pacemaker_state.py
-index 13f6eb0..83b30a3 100644
---- a/pcs/test/test_lib_pacemaker_state.py
-+++ b/pcs/test/test_lib_pacemaker_state.py
+diff --git a/pcs/lib/pacemaker/test/test_state.py b/pcs/lib/pacemaker/test/test_state.py
+index 5de9426..70bd886 100644
+--- a/pcs/lib/pacemaker/test/test_state.py
++++ b/pcs/lib/pacemaker/test/test_state.py
 @@ -5,6 +5,14 @@ from __future__ import (
      unicode_literals,
  )
@@ -49,10 +49,10 @@ index 13f6eb0..83b30a3 100644
 +    from io import StringIO
 +
 +
- from pcs.test.tools.pcs_unittest import TestCase
+ from pcs.test.tools.pcs_unittest import TestCase, mock
  from lxml import etree
  
-@@ -84,16 +92,24 @@ class ClusterStatusTest(TestBase):
+@@ -87,16 +95,24 @@ class ClusterStatusTest(TestBase):
          )
  
      def test_refuse_invalid_document(self):
@@ -61,11 +61,12 @@ index 13f6eb0..83b30a3 100644
 +        tmp_stdout = sys.stdout
 +        stdout_catpture = StringIO()
 +        sys.stdout = stdout_catpture
++
          self.covered_status.append_to_first_tag_name(
              'nodes',
              '<node without="required attributes" />'
          )
--
+ 
 -        assert_raise_library_error(
 -            lambda: ClusterState(str(self.covered_status)),
 -            (severities.ERROR, report_codes.BAD_CLUSTER_STATE_FORMAT, {})
@@ -75,9 +76,8 @@ index 13f6eb0..83b30a3 100644
 +            "Warning: xml with cluster status does not conform to the crm_mon"
 +                " schema\n"
          )
- 
+-
 +        sys.stdout = tmp_stdout
-+
  
  class WorkWithClusterStatusNodesTest(TestBase):
      def fixture_node_string(self, **kwargs):
diff --git a/SOURCES/test-corrections.patch b/SOURCES/test-corrections.patch
deleted file mode 100644
index ff3d123..0000000
--- a/SOURCES/test-corrections.patch
+++ /dev/null
@@ -1,1312 +0,0 @@
-From 60a297aa6a1e2d31619da281d843235edcaa43bb Mon Sep 17 00:00:00 2001
-From: Ivan Devat <idevat@redhat.com>
-Date: Wed, 14 Sep 2016 09:04:57 +0200
-Subject: [PATCH] squash test corrections
-
-test: import correct unittest version
-
-test: merge modules importing unittest with version care
-
-test: factor out recurring fixture in tests
----
- pcs/cli/booth/test/test_command.py                    |  4 ++--
- pcs/cli/booth/test/test_env.py                        |  4 ++--
- pcs/cli/common/test/test_completion.py                |  2 +-
- pcs/cli/common/test/test_console_report.py            |  2 +-
- pcs/cli/common/test/test_lib_wrapper.py               |  4 ++--
- pcs/cli/common/test/test_middleware.py                |  2 +-
- pcs/cli/common/test/test_parse_args.py                |  2 +-
- pcs/cli/constraint/test/test_command.py               |  4 ++--
- pcs/cli/constraint/test/test_console_report.py        |  2 +-
- pcs/cli/constraint/test/test_parse_args.py            |  9 ++-------
- pcs/cli/constraint_all/test/test_console_report.py    |  4 ++--
- pcs/cli/constraint_ticket/test/test_command.py        |  4 ++--
- pcs/cli/constraint_ticket/test/test_console_report.py |  2 +-
- pcs/cli/constraint_ticket/test/test_parse_args.py     |  2 +-
- pcs/lib/booth/test/test_config_exchange.py            |  2 +-
- pcs/lib/booth/test/test_config_files.py               | 10 ++++------
- pcs/lib/booth/test/test_config_structure.py           |  4 ++--
- pcs/lib/booth/test/test_env.py                        | 11 ++++-------
- pcs/lib/booth/test/test_resource.py                   |  4 ++--
- pcs/lib/booth/test/test_status.py                     |  4 ++--
- pcs/lib/booth/test/test_sync.py                       |  4 ++--
- pcs/lib/cib/test/test_alert.py                        |  4 ++--
- pcs/lib/cib/test/test_constraint.py                   |  4 ++--
- pcs/lib/cib/test/test_constraint_colocation.py        |  4 ++--
- pcs/lib/cib/test/test_constraint_order.py             |  4 ++--
- pcs/lib/cib/test/test_constraint_ticket.py            |  4 ++--
- pcs/lib/cib/test/test_nvpair.py                       |  2 +-
- pcs/lib/cib/test/test_resource.py                     |  2 +-
- pcs/lib/cib/test/test_resource_set.py                 |  4 ++--
- pcs/lib/commands/test/test_alert.py                   |  4 ++--
- pcs/lib/commands/test/test_booth.py                   | 10 ++++------
- pcs/lib/commands/test/test_constraint_common.py       |  4 ++--
- pcs/lib/commands/test/test_ticket.py                  |  2 +-
- pcs/lib/test/misc.py                                  |  2 +-
- pcs/lib/test/test_env_file.py                         |  4 ++--
- pcs/lib/test/test_errors.py                           |  2 +-
- pcs/lib/test/test_pacemaker_values.py                 |  2 +-
- pcs/test/test_acl.py                                  |  2 +-
- pcs/test/test_cluster.py                              |  2 +-
- pcs/test/test_common_tools.py                         |  2 +-
- pcs/test/test_constraints.py                          |  2 +-
- pcs/test/test_lib_cib_acl.py                          |  2 +-
- pcs/test/test_lib_cib_tools.py                        |  4 ++--
- pcs/test/test_lib_commands_qdevice.py                 |  4 ++--
- pcs/test/test_lib_commands_quorum.py                  |  4 ++--
- pcs/test/test_lib_commands_sbd.py                     |  4 ++--
- pcs/test/test_lib_corosync_config_facade.py           |  2 +-
- pcs/test/test_lib_corosync_config_parser.py           |  2 +-
- pcs/test/test_lib_corosync_live.py                    |  4 ++--
- pcs/test/test_lib_corosync_qdevice_client.py          |  4 ++--
- pcs/test/test_lib_corosync_qdevice_net.py             |  4 ++--
- pcs/test/test_lib_env.py                              |  4 ++--
- pcs/test/test_lib_external.py                         |  4 ++--
- pcs/test/test_lib_node.py                             |  2 +-
- pcs/test/test_lib_nodes_task.py                       |  4 ++--
- pcs/test/test_lib_pacemaker.py                        |  4 ++--
- pcs/test/test_lib_pacemaker_state.py                  |  2 +-
- pcs/test/test_lib_resource_agent.py                   |  4 ++--
- pcs/test/test_lib_sbd.py                              |  4 ++--
- pcs/test/test_lib_tools.py                            |  2 +-
- pcs/test/test_node.py                                 |  2 +-
- pcs/test/test_properties.py                           |  2 +-
- pcs/test/test_quorum.py                               |  2 +-
- pcs/test/test_resource.py                             |  2 +-
- pcs/test/test_rule.py                                 |  2 +-
- pcs/test/test_stonith.py                              |  2 +-
- pcs/test/test_utils.py                                |  2 +-
- pcs/test/tools/misc.py                                | 14 ++++++++++++++
- pcs/test/tools/pcs_mock.py                            | 13 -------------
- pcs/test/tools/pcs_unittest.py                        | 19 +++++++++++++++++++
- 70 files changed, 142 insertions(+), 134 deletions(-)
- delete mode 100644 pcs/test/tools/pcs_mock.py
-
-diff --git a/pcs/cli/booth/test/test_command.py b/pcs/cli/booth/test/test_command.py
-index 019a74f..44d7a12 100644
---- a/pcs/cli/booth/test/test_command.py
-+++ b/pcs/cli/booth/test/test_command.py
-@@ -5,10 +5,10 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.cli.booth import command
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- 
- 
- class ConfigSetupTest(TestCase):
-diff --git a/pcs/cli/booth/test/test_env.py b/pcs/cli/booth/test/test_env.py
-index 1ead6f2..b1d80aa 100644
---- a/pcs/cli/booth/test/test_env.py
-+++ b/pcs/cli/booth/test/test_env.py
-@@ -5,12 +5,12 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.cli.booth.env import middleware_config
- from pcs.common import report_codes, env_file_role_codes
- from pcs.lib.errors import LibraryEnvError, ReportItem
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- 
- 
- class BoothConfTest(TestCase):
-diff --git a/pcs/cli/common/test/test_completion.py b/pcs/cli/common/test/test_completion.py
-index 865da2c..daec1bc 100644
---- a/pcs/cli/common/test/test_completion.py
-+++ b/pcs/cli/common/test/test_completion.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.cli.common.completion import (
-     _find_suggestions,
-diff --git a/pcs/cli/common/test/test_console_report.py b/pcs/cli/common/test/test_console_report.py
-index 23cf8e9..63fe55c 100644
---- a/pcs/cli/common/test/test_console_report.py
-+++ b/pcs/cli/common/test/test_console_report.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- from pcs.cli.common.console_report import indent
- 
- class IndentTest(TestCase):
-diff --git a/pcs/cli/common/test/test_lib_wrapper.py b/pcs/cli/common/test/test_lib_wrapper.py
-index c10bb62..149e612 100644
---- a/pcs/cli/common/test/test_lib_wrapper.py
-+++ b/pcs/cli/common/test/test_lib_wrapper.py
-@@ -4,10 +4,10 @@ from __future__ import (
-     print_function,
-     unicode_literals,
- )
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.cli.common.lib_wrapper import Library, bind
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.lib.errors import ReportItem
- from pcs.lib.errors import LibraryEnvError
- 
-diff --git a/pcs/cli/common/test/test_middleware.py b/pcs/cli/common/test/test_middleware.py
-index c030cd9..7eefbca 100644
---- a/pcs/cli/common/test/test_middleware.py
-+++ b/pcs/cli/common/test/test_middleware.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.cli.common import middleware
- 
-diff --git a/pcs/cli/common/test/test_parse_args.py b/pcs/cli/common/test/test_parse_args.py
-index eb358a5..23704b9 100644
---- a/pcs/cli/common/test/test_parse_args.py
-+++ b/pcs/cli/common/test/test_parse_args.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- from pcs.cli.common.parse_args import(
-     split_list,
-     prepare_options,
-diff --git a/pcs/cli/constraint/test/test_command.py b/pcs/cli/constraint/test/test_command.py
-index 5b493cd..6a79e00 100644
---- a/pcs/cli/constraint/test/test_command.py
-+++ b/pcs/cli/constraint/test/test_command.py
-@@ -5,10 +5,10 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- from pcs.cli.constraint import command
- 
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- 
- def fixture_constraint():
-     return {
-diff --git a/pcs/cli/constraint/test/test_console_report.py b/pcs/cli/constraint/test/test_console_report.py
-index b20bc80..084124c 100644
---- a/pcs/cli/constraint/test/test_console_report.py
-+++ b/pcs/cli/constraint/test/test_console_report.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- from pcs.cli.constraint import console_report
- 
- class OptionsTest(TestCase):
-diff --git a/pcs/cli/constraint/test/test_parse_args.py b/pcs/cli/constraint/test/test_parse_args.py
-index 7673023..484cb8d 100644
---- a/pcs/cli/constraint/test/test_parse_args.py
-+++ b/pcs/cli/constraint/test/test_parse_args.py
-@@ -5,16 +5,11 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.cli.common.errors import CmdLineInputError
- from pcs.cli.constraint.parse_args import prepare_set_args, prepare_resource_sets
--
--
--try:
--    import unittest.mock as mock
--except ImportError:
--    import mock
-+from pcs.test.tools.pcs_unittest import mock
- 
- 
- @mock.patch("pcs.cli.common.parse_args.prepare_options")
-diff --git a/pcs/cli/constraint_all/test/test_console_report.py b/pcs/cli/constraint_all/test/test_console_report.py
-index 1cf5721..61be2cc 100644
---- a/pcs/cli/constraint_all/test/test_console_report.py
-+++ b/pcs/cli/constraint_all/test/test_console_report.py
-@@ -5,8 +5,8 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import TestCase
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.cli.constraint_all import console_report
- 
- class ConstraintTest(TestCase):
-diff --git a/pcs/cli/constraint_ticket/test/test_command.py b/pcs/cli/constraint_ticket/test/test_command.py
-index 045d336..d40d421 100644
---- a/pcs/cli/constraint_ticket/test/test_command.py
-+++ b/pcs/cli/constraint_ticket/test/test_command.py
-@@ -5,9 +5,9 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.cli.common.errors import CmdLineInputError
- from pcs.cli.constraint_ticket import command
- 
-diff --git a/pcs/cli/constraint_ticket/test/test_console_report.py b/pcs/cli/constraint_ticket/test/test_console_report.py
-index b352287..11af2e2 100644
---- a/pcs/cli/constraint_ticket/test/test_console_report.py
-+++ b/pcs/cli/constraint_ticket/test/test_console_report.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- from pcs.cli.constraint_ticket import console_report
- 
- class ConstraintPlainTest(TestCase):
-diff --git a/pcs/cli/constraint_ticket/test/test_parse_args.py b/pcs/cli/constraint_ticket/test/test_parse_args.py
-index 9d23167..4a592c2 100644
---- a/pcs/cli/constraint_ticket/test/test_parse_args.py
-+++ b/pcs/cli/constraint_ticket/test/test_parse_args.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- from pcs.cli.constraint_ticket import parse_args
- from pcs.cli.common.errors import CmdLineInputError
- 
-diff --git a/pcs/lib/booth/test/test_config_exchange.py b/pcs/lib/booth/test/test_config_exchange.py
-index eb1885c..9717a96 100644
---- a/pcs/lib/booth/test/test_config_exchange.py
-+++ b/pcs/lib/booth/test/test_config_exchange.py
-@@ -4,7 +4,7 @@ from __future__ import (
-     print_function,
-     unicode_literals,
- )
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- from pcs.lib.booth import config_structure, config_exchange
- 
- 
-diff --git a/pcs/lib/booth/test/test_config_files.py b/pcs/lib/booth/test/test_config_files.py
-index 8266cac..d0df256 100644
---- a/pcs/lib/booth/test/test_config_files.py
-+++ b/pcs/lib/booth/test/test_config_files.py
-@@ -6,7 +6,7 @@ from __future__ import (
- )
- 
- import os.path
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.common import report_codes, env_file_role_codes as file_roles
- from pcs.lib.booth import config_files
-@@ -14,12 +14,10 @@ from pcs.lib.errors import ReportItemSeverity as severities
- from pcs.settings import booth_config_dir as BOOTH_CONFIG_DIR
- from pcs.test.tools.assertions import assert_raise_library_error, assert_report_item_list_equal
- from pcs.test.tools.custom_mock import MockLibraryReportProcessor
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.misc import create_patcher
-+from pcs.test.tools.pcs_unittest import mock
- 
--def patch_config_files(target, *args, **kwargs):
--    return mock.patch(
--        "pcs.lib.booth.config_files.{0}".format(target), *args, **kwargs
--    )
-+patch_config_files = create_patcher("pcs.lib.booth.config_files")
- 
- @mock.patch("os.path.isdir")
- @mock.patch("os.listdir")
-diff --git a/pcs/lib/booth/test/test_config_structure.py b/pcs/lib/booth/test/test_config_structure.py
-index 1dd07cb..5e7ac68 100644
---- a/pcs/lib/booth/test/test_config_structure.py
-+++ b/pcs/lib/booth/test/test_config_structure.py
-@@ -5,13 +5,13 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.common import report_codes
- from pcs.lib.booth import config_structure
- from pcs.lib.errors import ReportItemSeverity as severities
- from pcs.test.tools.assertions import assert_raise_library_error
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- 
- 
- class ValidateTicketExistsTest(TestCase):
-diff --git a/pcs/lib/booth/test/test_env.py b/pcs/lib/booth/test/test_env.py
-index 77e0944..993d709 100644
---- a/pcs/lib/booth/test/test_env.py
-+++ b/pcs/lib/booth/test/test_env.py
-@@ -8,20 +8,17 @@ from __future__ import (
- import grp
- import os
- import pwd
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs import settings
- from pcs.common import report_codes
- from pcs.lib.booth import env
- from pcs.lib.errors import ReportItemSeverity as severities
- from pcs.test.tools.assertions import assert_raise_library_error
--from pcs.test.tools.misc import get_test_resource as rc
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.misc import get_test_resource as rc, create_patcher
-+from pcs.test.tools.pcs_unittest import mock
- 
--def patch_env(target, *args, **kwargs):
--    return mock.patch(
--        "pcs.lib.booth.env.{0}".format(target), *args, **kwargs
--    )
-+patch_env = create_patcher("pcs.lib.booth.env")
- 
- class GetConfigFileNameTest(TestCase):
-     @patch_env("os.path.exists")
-diff --git a/pcs/lib/booth/test/test_resource.py b/pcs/lib/booth/test/test_resource.py
-index dd72c1e..8971438 100644
---- a/pcs/lib/booth/test/test_resource.py
-+++ b/pcs/lib/booth/test/test_resource.py
-@@ -5,12 +5,12 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from lxml import etree
- 
- import pcs.lib.booth.resource as booth_resource
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.test.tools.misc import get_test_resource as rc
- 
- 
-diff --git a/pcs/lib/booth/test/test_status.py b/pcs/lib/booth/test/test_status.py
-index 0ea837a..d47ffca 100644
---- a/pcs/lib/booth/test/test_status.py
-+++ b/pcs/lib/booth/test/test_status.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- try:
-     # python 2
-@@ -15,7 +15,7 @@ except ImportError:
-     # python 3
-     from urllib.parse import parse_qs as url_decode
- 
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.test.tools.assertions import assert_raise_library_error
- 
- from pcs import settings
-diff --git a/pcs/lib/booth/test/test_sync.py b/pcs/lib/booth/test/test_sync.py
-index 9ba6e80..701b086 100644
---- a/pcs/lib/booth/test/test_sync.py
-+++ b/pcs/lib/booth/test/test_sync.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- import json
- import base64
-@@ -16,7 +16,7 @@ except ImportError:
-     # python 3
-     from urllib.parse import parse_qs as url_decode
- 
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.test.tools.assertions import (
-     assert_report_item_list_equal,
-     assert_raise_library_error,
-diff --git a/pcs/lib/cib/test/test_alert.py b/pcs/lib/cib/test/test_alert.py
-index 50eaef6..c47dd1e 100644
---- a/pcs/lib/cib/test/test_alert.py
-+++ b/pcs/lib/cib/test/test_alert.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from lxml import etree
- 
-@@ -17,7 +17,7 @@ from pcs.test.tools.assertions import(
-     assert_xml_equal,
-     assert_report_item_list_equal,
- )
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.test.tools.custom_mock import MockLibraryReportProcessor
- 
- 
-diff --git a/pcs/lib/cib/test/test_constraint.py b/pcs/lib/cib/test/test_constraint.py
-index 961f8b0..a4ee636 100644
---- a/pcs/lib/cib/test/test_constraint.py
-+++ b/pcs/lib/cib/test/test_constraint.py
-@@ -6,7 +6,7 @@ from __future__ import (
- )
- 
- from functools import partial
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from lxml import etree
- 
-@@ -18,7 +18,7 @@ from pcs.test.tools.assertions import(
-     assert_xml_equal,
- )
- from pcs.test.tools.custom_mock import MockLibraryReportProcessor
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.test.tools.assertions import (
-     assert_report_item_list_equal,
- )
-diff --git a/pcs/lib/cib/test/test_constraint_colocation.py b/pcs/lib/cib/test/test_constraint_colocation.py
-index 377b981..6a85d8a 100644
---- a/pcs/lib/cib/test/test_constraint_colocation.py
-+++ b/pcs/lib/cib/test/test_constraint_colocation.py
-@@ -5,13 +5,13 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.common import report_codes
- from pcs.lib.cib.constraint import colocation
- from pcs.lib.errors import ReportItemSeverity as severities
- from pcs.test.tools.assertions import assert_raise_library_error
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- 
- 
- #Patch check_new_id_applicable is always desired when working with
-diff --git a/pcs/lib/cib/test/test_constraint_order.py b/pcs/lib/cib/test/test_constraint_order.py
-index 02d1c5f..3cb33d1 100644
---- a/pcs/lib/cib/test/test_constraint_order.py
-+++ b/pcs/lib/cib/test/test_constraint_order.py
-@@ -5,13 +5,13 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.common import report_codes
- from pcs.lib.cib.constraint import order
- from pcs.lib.errors import ReportItemSeverity as severities
- from pcs.test.tools.assertions import assert_raise_library_error
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- 
- 
- #Patch check_new_id_applicable is always desired when working with
-diff --git a/pcs/lib/cib/test/test_constraint_ticket.py b/pcs/lib/cib/test/test_constraint_ticket.py
-index 87fd1e5..ede748e 100644
---- a/pcs/lib/cib/test/test_constraint_ticket.py
-+++ b/pcs/lib/cib/test/test_constraint_ticket.py
-@@ -6,13 +6,13 @@ from __future__ import (
- )
- 
- from functools import partial
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.common import report_codes
- from pcs.lib.cib.constraint import ticket
- from pcs.lib.errors import ReportItemSeverity as severities
- from pcs.test.tools.assertions import assert_raise_library_error
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- 
- 
- @mock.patch("pcs.lib.cib.constraint.ticket.tools.check_new_id_applicable")
-diff --git a/pcs/lib/cib/test/test_nvpair.py b/pcs/lib/cib/test/test_nvpair.py
-index 6907f25..56ba4d1 100644
---- a/pcs/lib/cib/test/test_nvpair.py
-+++ b/pcs/lib/cib/test/test_nvpair.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from lxml import etree
- 
-diff --git a/pcs/lib/cib/test/test_resource.py b/pcs/lib/cib/test/test_resource.py
-index ef33ef6..c1e21a0 100644
---- a/pcs/lib/cib/test/test_resource.py
-+++ b/pcs/lib/cib/test/test_resource.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- from lxml import etree
- from pcs.lib.cib.resource import find_by_id
- 
-diff --git a/pcs/lib/cib/test/test_resource_set.py b/pcs/lib/cib/test/test_resource_set.py
-index 7b77ac4..e4fd8e4 100644
---- a/pcs/lib/cib/test/test_resource_set.py
-+++ b/pcs/lib/cib/test/test_resource_set.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from lxml import etree
- 
-@@ -16,7 +16,7 @@ from pcs.test.tools.assertions import(
-     assert_raise_library_error,
-     assert_xml_equal
- )
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- 
- 
- class PrepareSetTest(TestCase):
-diff --git a/pcs/lib/commands/test/test_alert.py b/pcs/lib/commands/test/test_alert.py
-index bced45e..bc68baf 100644
---- a/pcs/lib/commands/test/test_alert.py
-+++ b/pcs/lib/commands/test/test_alert.py
-@@ -8,9 +8,9 @@ from __future__ import (
- import logging
- from lxml import etree
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.test.tools.assertions import (
-     assert_raise_library_error,
-     assert_xml_equal,
-diff --git a/pcs/lib/commands/test/test_booth.py b/pcs/lib/commands/test/test_booth.py
-index d2429b6..08d2c79 100644
---- a/pcs/lib/commands/test/test_booth.py
-+++ b/pcs/lib/commands/test/test_booth.py
-@@ -8,14 +8,15 @@ from __future__ import (
- import os
- import base64
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.test.tools.custom_mock import MockLibraryReportProcessor
- from pcs.test.tools.assertions import (
-     assert_raise_library_error,
-     assert_report_item_list_equal,
- )
-+from pcs.test.tools.misc import create_patcher
- 
- from pcs import settings
- from pcs.common import report_codes
-@@ -32,10 +33,7 @@ from pcs.lib.external import (
-     StopServiceError
- )
- 
--def patch_commands(target, *args, **kwargs):
--    return mock.patch(
--        "pcs.lib.commands.booth.{0}".format(target), *args, **kwargs
--    )
-+patch_commands = create_patcher("pcs.lib.commands.booth")
- 
- @mock.patch("pcs.lib.booth.config_files.generate_key", return_value="key value")
- @mock.patch("pcs.lib.commands.booth.build", return_value="config content")
-diff --git a/pcs/lib/commands/test/test_constraint_common.py b/pcs/lib/commands/test/test_constraint_common.py
-index e0872ff..cb5e177 100644
---- a/pcs/lib/commands/test/test_constraint_common.py
-+++ b/pcs/lib/commands/test/test_constraint_common.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from lxml import etree
- 
-@@ -17,7 +17,7 @@ from pcs.test.tools.assertions import(
-     assert_xml_equal,
- )
- from pcs.test.tools.custom_mock import MockLibraryReportProcessor
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- 
- 
- def fixture_cib_and_constraints():
-diff --git a/pcs/lib/commands/test/test_ticket.py b/pcs/lib/commands/test/test_ticket.py
-index d8b8a5f..586ca4b 100644
---- a/pcs/lib/commands/test/test_ticket.py
-+++ b/pcs/lib/commands/test/test_ticket.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.common import report_codes
- from pcs.lib.commands.constraint import ticket as ticket_command
-diff --git a/pcs/lib/test/misc.py b/pcs/lib/test/misc.py
-index 1b1670a..be99bb2 100644
---- a/pcs/lib/test/misc.py
-+++ b/pcs/lib/test/misc.py
-@@ -9,7 +9,7 @@ import logging
- 
- from pcs.lib.env import LibraryEnvironment as Env
- from pcs.test.tools.custom_mock import MockLibraryReportProcessor
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- 
- 
- def get_mocked_env(**kwargs):
-diff --git a/pcs/lib/test/test_env_file.py b/pcs/lib/test/test_env_file.py
-index 3e27af1..754b40e 100644
---- a/pcs/lib/test/test_env_file.py
-+++ b/pcs/lib/test/test_env_file.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.common import report_codes
- from pcs.lib.env_file import RealFile, GhostFile
-@@ -15,7 +15,7 @@ from pcs.test.tools.assertions import(
-     assert_report_item_list_equal
- )
- from pcs.test.tools.custom_mock import MockLibraryReportProcessor
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- 
- 
- class GhostFileReadTest(TestCase):
-diff --git a/pcs/lib/test/test_errors.py b/pcs/lib/test/test_errors.py
-index 2e99e19..871aa76 100644
---- a/pcs/lib/test/test_errors.py
-+++ b/pcs/lib/test/test_errors.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.lib.errors import LibraryEnvError
- 
-diff --git a/pcs/lib/test/test_pacemaker_values.py b/pcs/lib/test/test_pacemaker_values.py
-index 7979990..62b8e91 100644
---- a/pcs/lib/test/test_pacemaker_values.py
-+++ b/pcs/lib/test/test_pacemaker_values.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.test.tools.assertions import assert_raise_library_error
- 
-diff --git a/pcs/test/test_acl.py b/pcs/test/test_acl.py
-index b053614..186c035 100644
---- a/pcs/test/test_acl.py
-+++ b/pcs/test/test_acl.py
-@@ -6,7 +6,7 @@ from __future__ import (
- )
- 
- import shutil
--import unittest
-+from pcs.test.tools import pcs_unittest as unittest
- 
- from pcs.test.tools.assertions import AssertPcsMixin
- from pcs.test.tools.misc import (
-diff --git a/pcs/test/test_cluster.py b/pcs/test/test_cluster.py
-index 8a245a2..36f3687 100644
---- a/pcs/test/test_cluster.py
-+++ b/pcs/test/test_cluster.py
-@@ -7,7 +7,7 @@ from __future__ import (
- 
- import os
- import shutil
--import unittest
-+from pcs.test.tools import pcs_unittest as unittest
- 
- from pcs.test.tools.assertions import AssertPcsMixin
- from pcs.test.tools.misc import (
-diff --git a/pcs/test/test_common_tools.py b/pcs/test/test_common_tools.py
-index 5c8482e..5290e6d 100644
---- a/pcs/test/test_common_tools.py
-+++ b/pcs/test/test_common_tools.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- import time
- 
- from pcs.common import tools
-diff --git a/pcs/test/test_constraints.py b/pcs/test/test_constraints.py
-index 364b40d..7c76e09 100644
---- a/pcs/test/test_constraints.py
-+++ b/pcs/test/test_constraints.py
-@@ -7,7 +7,7 @@ from __future__ import (
- 
- import os
- import shutil
--import unittest
-+from pcs.test.tools import pcs_unittest as unittest
- 
- from pcs.test.tools.assertions import AssertPcsMixin, console_report
- from pcs.test.tools.misc import (
-diff --git a/pcs/test/test_lib_cib_acl.py b/pcs/test/test_lib_cib_acl.py
-index 7e1750e..efaad7e 100644
---- a/pcs/test/test_lib_cib_acl.py
-+++ b/pcs/test/test_lib_cib_acl.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.test.tools.assertions import (
-     assert_raise_library_error,
-diff --git a/pcs/test/test_lib_cib_tools.py b/pcs/test/test_lib_cib_tools.py
-index 0fd4d22..ffc2642 100644
---- a/pcs/test/test_lib_cib_tools.py
-+++ b/pcs/test/test_lib_cib_tools.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from os.path import join
- from lxml import etree
-@@ -15,7 +15,7 @@ from pcs.test.tools.assertions import (
-     assert_xml_equal,
- )
- from pcs.test.tools.misc import get_test_resource as rc
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
- 
- from pcs import settings
-diff --git a/pcs/test/test_lib_commands_qdevice.py b/pcs/test/test_lib_commands_qdevice.py
-index ff588d5..10841e9 100644
---- a/pcs/test/test_lib_commands_qdevice.py
-+++ b/pcs/test/test_lib_commands_qdevice.py
-@@ -5,11 +5,11 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- import base64
- import logging
- 
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.test.tools.assertions import (
-     assert_raise_library_error,
-     assert_report_item_list_equal,
-diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py
-index d286a8f..d7701af 100644
---- a/pcs/test/test_lib_commands_quorum.py
-+++ b/pcs/test/test_lib_commands_quorum.py
-@@ -6,7 +6,7 @@ from __future__ import (
- )
- 
- import logging
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.test.tools.assertions import (
-     assert_raise_library_error,
-@@ -17,7 +17,7 @@ from pcs.test.tools.misc import (
-     ac,
-     get_test_resource as rc,
- )
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- 
- from pcs.common import report_codes
- from pcs.lib.env import LibraryEnvironment
-diff --git a/pcs/test/test_lib_commands_sbd.py b/pcs/test/test_lib_commands_sbd.py
-index 0663082..f8146ce 100644
---- a/pcs/test/test_lib_commands_sbd.py
-+++ b/pcs/test/test_lib_commands_sbd.py
-@@ -7,9 +7,9 @@ from __future__ import (
- 
- import logging
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.test.tools.assertions import (
-     assert_raise_library_error,
-     assert_report_item_list_equal,
-diff --git a/pcs/test/test_lib_corosync_config_facade.py b/pcs/test/test_lib_corosync_config_facade.py
-index 91f7b40..4373d65 100644
---- a/pcs/test/test_lib_corosync_config_facade.py
-+++ b/pcs/test/test_lib_corosync_config_facade.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- import re
- 
- from pcs.test.tools.assertions import (
-diff --git a/pcs/test/test_lib_corosync_config_parser.py b/pcs/test/test_lib_corosync_config_parser.py
-index da20889..a68710b 100644
---- a/pcs/test/test_lib_corosync_config_parser.py
-+++ b/pcs/test/test_lib_corosync_config_parser.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--import unittest
-+from pcs.test.tools import pcs_unittest as unittest
- 
- from pcs.test.tools.misc import ac
- 
-diff --git a/pcs/test/test_lib_corosync_live.py b/pcs/test/test_lib_corosync_live.py
-index 0fc5eb2..3173195 100644
---- a/pcs/test/test_lib_corosync_live.py
-+++ b/pcs/test/test_lib_corosync_live.py
-@@ -5,13 +5,13 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- import os.path
- 
- from pcs.test.tools.assertions import assert_raise_library_error
- from pcs.test.tools.misc import get_test_resource as rc
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- 
- from pcs import settings
- from pcs.common import report_codes
-diff --git a/pcs/test/test_lib_corosync_qdevice_client.py b/pcs/test/test_lib_corosync_qdevice_client.py
-index e0332f1..0b5bd67 100644
---- a/pcs/test/test_lib_corosync_qdevice_client.py
-+++ b/pcs/test/test_lib_corosync_qdevice_client.py
-@@ -5,9 +5,9 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.test.tools.assertions import assert_raise_library_error
- 
- from pcs.common import report_codes
-diff --git a/pcs/test/test_lib_corosync_qdevice_net.py b/pcs/test/test_lib_corosync_qdevice_net.py
-index 3d473f7..340a8dc 100644
---- a/pcs/test/test_lib_corosync_qdevice_net.py
-+++ b/pcs/test/test_lib_corosync_qdevice_net.py
-@@ -5,12 +5,12 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- import base64
- import os.path
- 
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.test.tools.assertions import assert_raise_library_error
- from pcs.test.tools.misc import get_test_resource
- 
-diff --git a/pcs/test/test_lib_env.py b/pcs/test/test_lib_env.py
-index c6322b7..205fd60 100644
---- a/pcs/test/test_lib_env.py
-+++ b/pcs/test/test_lib_env.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- import logging
- from lxml import etree
- 
-@@ -16,7 +16,7 @@ from pcs.test.tools.assertions import (
- )
- from pcs.test.tools.custom_mock import MockLibraryReportProcessor
- from pcs.test.tools.misc import get_test_resource as rc
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- 
- from pcs.lib.env import LibraryEnvironment
- from pcs.common import report_codes
-diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py
-index b0ffdbb..aafbe85 100644
---- a/pcs/test/test_lib_external.py
-+++ b/pcs/test/test_lib_external.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- import os.path
- import logging
- try:
-@@ -27,7 +27,7 @@ from pcs.test.tools.assertions import (
-     assert_report_item_list_equal,
- )
- from pcs.test.tools.custom_mock import MockLibraryReportProcessor
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- 
- from pcs import settings
- from pcs.common import report_codes
-diff --git a/pcs/test/test_lib_node.py b/pcs/test/test_lib_node.py
-index 6c841d3..caf128f 100644
---- a/pcs/test/test_lib_node.py
-+++ b/pcs/test/test_lib_node.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- import pcs.lib.node as lib
- 
-diff --git a/pcs/test/test_lib_nodes_task.py b/pcs/test/test_lib_nodes_task.py
-index cff88eb..6f05b15 100644
---- a/pcs/test/test_lib_nodes_task.py
-+++ b/pcs/test/test_lib_nodes_task.py
-@@ -5,14 +5,14 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.test.tools.assertions import (
-     assert_raise_library_error,
-     assert_report_item_list_equal,
- )
- from pcs.test.tools.custom_mock import MockLibraryReportProcessor
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- 
- from pcs.common import report_codes
- from pcs.lib.external import NodeCommunicator, NodeAuthenticationException
-diff --git a/pcs/test/test_lib_pacemaker.py b/pcs/test/test_lib_pacemaker.py
-index 0edee5c..c475db6 100644
---- a/pcs/test/test_lib_pacemaker.py
-+++ b/pcs/test/test_lib_pacemaker.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- import os.path
- 
- from pcs.test.tools.assertions import (
-@@ -13,7 +13,7 @@ from pcs.test.tools.assertions import (
-     assert_xml_equal,
- )
- from pcs.test.tools.misc import get_test_resource as rc
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.test.tools.xml import XmlManipulation
- 
- from pcs import settings
-diff --git a/pcs/test/test_lib_pacemaker_state.py b/pcs/test/test_lib_pacemaker_state.py
-index 54f536d..13f6eb0 100644
---- a/pcs/test/test_lib_pacemaker_state.py
-+++ b/pcs/test/test_lib_pacemaker_state.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- from lxml import etree
- 
- from pcs.test.tools.assertions import assert_raise_library_error
-diff --git a/pcs/test/test_lib_resource_agent.py b/pcs/test/test_lib_resource_agent.py
-index 5704184..08f9061 100644
---- a/pcs/test/test_lib_resource_agent.py
-+++ b/pcs/test/test_lib_resource_agent.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- import os.path
- 
- from lxml import etree
-@@ -14,7 +14,7 @@ from pcs.test.tools.assertions import (
-     ExtendedAssertionsMixin,
-     assert_xml_equal,
- )
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.test.tools.xml import XmlManipulation as XmlMan
- 
- 
-diff --git a/pcs/test/test_lib_sbd.py b/pcs/test/test_lib_sbd.py
-index 516e0bd..720d8b1 100644
---- a/pcs/test/test_lib_sbd.py
-+++ b/pcs/test/test_lib_sbd.py
-@@ -6,9 +6,9 @@ from __future__ import (
- )
- 
- import json
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
--from pcs.test.tools.pcs_mock import mock
-+from pcs.test.tools.pcs_unittest import mock
- from pcs.test.tools.assertions import (
-     assert_raise_library_error,
-     assert_report_item_list_equal,
-diff --git a/pcs/test/test_lib_tools.py b/pcs/test/test_lib_tools.py
-index 5141ca9..606cb05 100644
---- a/pcs/test/test_lib_tools.py
-+++ b/pcs/test/test_lib_tools.py
-@@ -5,7 +5,7 @@ from __future__ import (
-     unicode_literals,
- )
- 
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.lib import tools
- 
-diff --git a/pcs/test/test_node.py b/pcs/test/test_node.py
-index 785c711..9b45e07 100644
---- a/pcs/test/test_node.py
-+++ b/pcs/test/test_node.py
-@@ -6,7 +6,7 @@ from __future__ import (
- )
- 
- import shutil
--import unittest
-+from pcs.test.tools import pcs_unittest as unittest
- 
- from pcs.test.tools.assertions import AssertPcsMixin
- from pcs.test.tools.misc import (
-diff --git a/pcs/test/test_properties.py b/pcs/test/test_properties.py
-index fbaf880..9634cca 100644
---- a/pcs/test/test_properties.py
-+++ b/pcs/test/test_properties.py
-@@ -6,7 +6,7 @@ from __future__ import (
- )
- 
- import shutil
--import unittest
-+from pcs.test.tools import pcs_unittest as unittest
- 
- from pcs.test.tools.assertions import AssertPcsMixin
- from pcs.test.tools.misc import (
-diff --git a/pcs/test/test_quorum.py b/pcs/test/test_quorum.py
-index 86de4c6..4f15d7f 100644
---- a/pcs/test/test_quorum.py
-+++ b/pcs/test/test_quorum.py
-@@ -6,7 +6,7 @@ from __future__ import (
- )
- 
- import shutil
--from unittest import TestCase
-+from pcs.test.tools.pcs_unittest import TestCase
- 
- from pcs.test.tools.assertions import AssertPcsMixin
- from pcs.test.tools.misc import (
-diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
-index 614b895..87a7fa8 100644
---- a/pcs/test/test_resource.py
-+++ b/pcs/test/test_resource.py
-@@ -8,7 +8,7 @@ from __future__ import (
- import os
- import shutil
- import re
--import unittest
-+from pcs.test.tools import pcs_unittest as unittest
- 
- from pcs.test.tools.assertions import AssertPcsMixin
- from pcs.test.tools.misc import (
-diff --git a/pcs/test/test_rule.py b/pcs/test/test_rule.py
-index 8cf717a..ad3448d 100644
---- a/pcs/test/test_rule.py
-+++ b/pcs/test/test_rule.py
-@@ -6,7 +6,7 @@ from __future__ import (
- )
- 
- import shutil
--import unittest
-+from pcs.test.tools import pcs_unittest as unittest
- import xml.dom.minidom
- 
- from pcs import rule
-diff --git a/pcs/test/test_stonith.py b/pcs/test/test_stonith.py
-index a6ee2f5..82b2c84 100644
---- a/pcs/test/test_stonith.py
-+++ b/pcs/test/test_stonith.py
-@@ -6,7 +6,7 @@ from __future__ import (
- )
- 
- import shutil
--import unittest
-+from pcs.test.tools import pcs_unittest as unittest
- 
- from pcs.test.tools.misc import (
-     ac,
-diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py
-index 43145fd..252de30 100644
---- a/pcs/test/test_utils.py
-+++ b/pcs/test/test_utils.py
-@@ -6,7 +6,7 @@ from __future__ import (
- )
- 
- import sys
--import unittest
-+from pcs.test.tools import pcs_unittest as unittest
- import xml.dom.minidom
- import xml.etree.cElementTree as ET
- from time import sleep
-diff --git a/pcs/test/tools/misc.py b/pcs/test/tools/misc.py
-index a78ccdc..745b228 100644
---- a/pcs/test/tools/misc.py
-+++ b/pcs/test/tools/misc.py
-@@ -10,6 +10,7 @@ import os.path
- import re
- 
- from pcs import utils
-+from pcs.test.tools.pcs_unittest import mock
- 
- 
- testdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-@@ -50,3 +51,16 @@ def is_minimum_pacemaker_version(cmajor, cminor, crev):
-         or
-         (major == cmajor and minor == cminor and rev >= crev)
-     )
-+
-+def create_patcher(target_prefix):
-+    """
-+    Return function for patching tests with preconfigured target prefix
-+    string target_prefix is prefix for patched names. Typicaly tested module
-+    like for example "pcs.lib.commands.booth". Between target_prefix and target
-+    is "." (dot)
-+    """
-+    def patch(target, *args, **kwargs):
-+        return mock.patch(
-+            "{0}.{1}".format(target_prefix, target), *args, **kwargs
-+        )
-+    return patch
-diff --git a/pcs/test/tools/pcs_mock.py b/pcs/test/tools/pcs_mock.py
-deleted file mode 100644
-index d84ac67..0000000
---- a/pcs/test/tools/pcs_mock.py
-+++ /dev/null
-@@ -1,13 +0,0 @@
--try:
--    import unittest.mock as mock
--except ImportError:
--    import mock
--
--if not hasattr(mock.Mock, "assert_not_called"):
--    def __assert_not_called(self, *args, **kwargs):
--        if self.call_count != 0:
--            msg = ("Expected '%s' to not have been called. Called %s times." %
--                   (self._mock_name or 'mock', self.call_count))
--            raise AssertionError(msg)
--    mock.Mock.assert_not_called = __assert_not_called
--
-diff --git a/pcs/test/tools/pcs_unittest.py b/pcs/test/tools/pcs_unittest.py
-index 4a3205d..af549ae 100644
---- a/pcs/test/tools/pcs_unittest.py
-+++ b/pcs/test/tools/pcs_unittest.py
-@@ -1,7 +1,26 @@
- import sys
-+#In package unittest there is no module mock before python 3.3. In python 3
-+#module mock is not imported by * because module mock is not imported in
-+#unittest/__init__.py
- major, minor = sys.version_info[:2]
- if major == 2 and minor == 6:
-+    #we use features that are missing before 2.7 (like test skipping,
-+    #assertRaises as context manager...) so we need unittest2
-     from unittest2 import *
-+    import mock
- else:
-     from unittest import *
-+    try:
-+        import unittest.mock as mock
-+    except ImportError:
-+        import mock
- del major, minor, sys
-+
-+#backport of assert_not_called (new in version 3.5)
-+if not hasattr(mock.Mock, "assert_not_called"):
-+    def __assert_not_called(self, *args, **kwargs):
-+        if self.call_count != 0:
-+            msg = ("Expected '%s' to not have been called. Called %s times." %
-+                   (self._mock_name or 'mock', self.call_count))
-+            raise AssertionError(msg)
-+    mock.Mock.assert_not_called = __assert_not_called
--- 
-1.8.3.1
-
diff --git a/SOURCES/test-fix-an-occasional-multithread-test-fail.patch b/SOURCES/test-fix-an-occasional-multithread-test-fail.patch
deleted file mode 100644
index 8a3f660..0000000
--- a/SOURCES/test-fix-an-occasional-multithread-test-fail.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-From c4e916c7ae9f5bb040d8268f93d5949e1cd078f8 Mon Sep 17 00:00:00 2001
-From: Tomas Jelinek <tojeline@redhat.com>
-Date: Tue, 26 Jul 2016 10:17:34 +0200
-Subject: [PATCH] test: fix an occasional multithread test fail
-
----
- pcs/test/test_utils.py | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
-diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py
-index 192048e..43145fd 100644
---- a/pcs/test/test_utils.py
-+++ b/pcs/test/test_utils.py
-@@ -1806,7 +1806,10 @@ class RunParallelTest(unittest.TestCase):
-             wait_seconds=.1
-         )
- 
--        self.assertEqual(log, ['first', 'second'])
-+        self.assertEqual(
-+            sorted(log),
-+            sorted(['first', 'second'])
-+        )
- 
-     def test_wait_for_slower_workers(self):
-         log = []
--- 
-1.8.3.1
-
diff --git a/SPECS/pcs.spec b/SPECS/pcs.spec
index b4320b9..62ff8fd 100644
--- a/SPECS/pcs.spec
+++ b/SPECS/pcs.spec
@@ -1,137 +1,132 @@
-Name: pcs		
-Version: 0.9.152
-Release: 10%{?dist}.3
+Name: pcs
+Version: 0.9.158
+Release: 6%{?dist}
 License: GPLv2
 URL: https://github.com/ClusterLabs/pcs
 Group: System Environment/Base
-Summary: Pacemaker Configuration System	
+Summary: Pacemaker Configuration System
 #building only for architectures with pacemaker and corosync available
-ExclusiveArch: i686 x86_64 s390x
+ExclusiveArch: i686 x86_64 s390x ppc64le
+
 #part after last slash is recognized as filename in look-aside repository
 #desired name is achived by trick with hash anchor
 Source0: %{url}/archive/%{version}.tar.gz#/%{name}-%{version}.tar.gz
 Source1: HAM-logo.png
 Source2: pcsd-bundle-config-1
-Source3: https://rubygems.org/downloads/backports-3.6.8.gem
-Source4: https://rubygems.org/downloads/eventmachine-1.2.0.1.gem
-Source5: https://rubygems.org/downloads/multi_json-1.12.0.gem
-Source6: https://rubygems.org/downloads/open4-1.3.4.gem
-Source7: https://rubygems.org/downloads/orderedhash-0.0.6.gem
-Source8: https://rubygems.org/downloads/rack-protection-1.5.3.gem
-Source9: https://rubygems.org/downloads/rack-test-0.6.3.gem
-Source10: https://rubygems.org/downloads/rack-1.6.4.gem
-Source11: https://rubygems.org/downloads/rpam-ruby19-1.2.1.gem
-Source12: https://rubygems.org/downloads/sinatra-contrib-1.4.7.gem
-Source13: https://rubygems.org/downloads/sinatra-1.4.7.gem
-Source14: https://rubygems.org/downloads/tilt-2.0.3.gem
-Source15: https://github.com/testing-cabal/mock/archive/1.0.1.tar.gz#/mock-1.0.1.tar.gz
-Source99: favicon.ico
-
-Patch0: bz1315371-01-add-support-for-pacemaker-alerts.patch
-Patch1: bz1158805-01-add-support-for-qdevice-qnetd-provided-.patch
-Patch2: bz1164402-01-sbd-fix-call_node-calls-on-python3.patch
-Patch3: bz1346852-01-fix-bad-request-when-resource-removal-t.patch
-Patch4: bz1327739-01-add-pcs-quorum-expected-votes-command.patch
-Patch5: bz1348579-01-add-a-wrapper-for-holding-SELinux-conte.patch
-Patch6: bz1349465-01-allow-to-specify-bash-completion-install-dir.patch
-Patch7: fix-qdevice-tests-failing-due-to-multithreading.patch
-Patch8: bz1281364-01-gui-add-constraint-colocation-set-support.patch
-Patch9: bz1269242-01-fix-displaying-cluster-config-when-cib-is-provided-a.patch
-Patch10: bz1353607-01-tests-use-safe-node-names.patch
-Patch11: bz1315371-02-use-recipient-id-as-identifier-instead-of-its-value.patch
-Patch12: bz1158805-01-cli-improve-quorum-device-commands-syntax.patch
-Patch13: bz1289418-01-fixes-for-pcs-cli-running-on-a-remote-node.patch
-Patch14: pcsd-fix-syntax-error-on-ruby-1.8.patch
-Patch15: bz1348579-02-fix-traceback-when-stopping-pcsd-shortly-after-start.patch
-Patch16: bz1225423-01-allow-to-remove-a-dead-node-from-a-cluster.patch
-Patch17: bz1357945-01-add-support-for-clufter-s-dist-parameter.patch
-Patch18: bz1302010-01-fix-filter-by-property-name-in-pcs-property-show.patch
-Patch19: bz1301993-01-improve-node-properties-commands.patch
-Patch20: bz1346852-02-web-UI-fix-error-when-removing-resources-takes-long.patch
-Patch21: bz1231858-01-web-UI-fix-occasional-issue-with-not-showing-optiona.patch
-Patch22: bz1315371-03-improve-alerts-help.patch
-Patch23: bz1346852-03-web-UI-correct-handling-of-timeout-when-removing-mul.patch
-Patch24: bz1303136-01-fix-check-if-id-exists-in-cib.patch
-Patch25: bz1329472-01-when-removing-a-remote-node-remove-it-from-pacemakers-caches-as-well.patch
-Patch26: test-fix-an-occasional-multithread-test-fail.patch
-Patch27: bz1359154-01-fix-exceptions-when-authenticating-cluster-nodes.patch
-Patch28: bz1231858-02-web-UI-don-t-change-current-resource-in-URL-if-not-i.patch
-Patch29: bz1349465-02-install-bash-completion-with-standard-permissions.patch
-Patch30: bz1357945-02-doc-fixes-regarding-clufter.patch
-Patch31: bz1281391-01-web-UI-add-possibility-to-change-order-of-resources-.patch
-Patch32: bz1264360-01-web-UI-add-support-for-unmanaged-resources.patch
-Patch33: bz1308514-01-add-booth-support.patch
-Patch34: bz1298585-01-add-possibility-to-hide-inactive-resources-in-pcs-resource-show.patch
-Patch35: bz1354498-01-handle-exceptions-when-waiting-for-response-from-user.patch
-Patch36: bz1346852-04-fix-detecting-nonexisting-resources-in-pcsd.patch
-Patch37: bz1164402-02-sbd-fixes.patch
-Patch38: bz1315371-04-alerts-related-fixes.patch
-Patch39: bz1366307-01-web-ui-fix-bad-using-of-html-ids.patch
-Patch40: bz1247088-01-fix-error-message-in-node-maintenanceunmaintenance-commands.patch
-Patch41: bz1308514-02-booth-support-improvements.patch
-Patch42: bz1164402-03-sbd-fix-check-if-atb-is-required-when-enabling-sbd.patch
-Patch43: bz1264360-02-web-ui-change-way-of-displaying-status-of-unmanaged-primitive-resources.patch
-Patch44: test-corrections.patch
-Patch45: bz1158805-02-add-support-for-qdeviceqnetd-provided-by-corosync.patch
-Patch46: bz1308514-03-wider-support-for-booth-configuration-beside-mere.patch
-#forgotten patch (chronologically should be before Patch0
-Patch47: fix-pcs-constraint-ticket-set-help.patch
-Patch48: bz1305049-01-pcs-does-not-support-ticket-constraints.patch
-Patch49: bz1158500-01-add-support-for-utilization-attributes.patch
-Patch50: bz1281391-02-web-ui-reset-selected-group-when-displaying-new-resource-dialog.patch
-Patch51: bz1231858-03-resourcefence-agent-options-form-needs-an-overhau.patch
-Patch52: bz1158805-03-add-support-for-qdeviceqnetd-provided-by-corosync.patch
-Patch53: bz1305049-02-pcs-does-not-support-ticket-constraints.patch
-Patch54: rhel7.patch
-Patch55: change-cman-to-rhel6-in-messages.patch
-Patch56: show-only-warning-when-crm_mon-xml-is-invalid.patch
-Patch57: bz1408476-01-accept-RA-with-instantiated-systemd-service-in-name.patch
-Patch58: bz1404233-01-cluster-cib-push-allows-to-obtain-and-push-a-diff.patch
-Patch59: bz1420757-01-fix-pcs-cluster-cib-push-scope.patch
-Patch60: bz1420757-02-fix-cib-push-diff-against-when-the-diff-is-empty.patch
-
-BuildRequires: python2-devel python-setuptools
-BuildRequires: gcc gcc-c++
-BuildRequires: ruby >= 2.0.0 ruby-devel rubygems pam-devel git
+
+Source11: https://rubygems.org/downloads/backports-3.6.8.gem
+Source12: https://rubygems.org/downloads/multi_json-1.12.1.gem
+Source13: https://rubygems.org/downloads/open4-1.3.4.gem
+Source14: https://rubygems.org/downloads/orderedhash-0.0.6.gem
+Source15: https://rubygems.org/downloads/rack-protection-1.5.3.gem
+Source16: https://rubygems.org/downloads/rack-test-0.6.3.gem
+Source17: https://rubygems.org/downloads/rack-1.6.4.gem
+Source18: https://rubygems.org/downloads/rpam-ruby19-1.2.1.gem
+Source19: https://rubygems.org/downloads/sinatra-contrib-1.4.7.gem
+Source20: https://rubygems.org/downloads/sinatra-1.4.8.gem
+Source21: https://rubygems.org/downloads/tilt-2.0.6.gem
+Source22: https://rubygems.org/downloads/ethon-0.10.1.gem
+Source23: https://rubygems.org/downloads/ffi-1.9.17.gem
+
+Source31: https://github.com/testing-cabal/mock/archive/1.0.1.tar.gz#/mock-1.0.1.tar.gz
+
+Patch0: bz1176018-01-remote-guest-nodes-crashes-fixed.patch
+Patch1: bz1373614-01-return-1-when-pcsd-is-unable-to-bind.patch
+Patch2: bz1386114-01-fix-a-crash-in-adding-a-remote-node.patch
+Patch3: bz1284404-01-web-UI-fix-creating-a-new-cluster.patch
+Patch4: bz1165821-01-pcs-CLI-GUI-should-be-capable-of.patch
+Patch5: bz1176018-02-pcs-pcsd-should-be-able-to-config.patch
+Patch6: bz1386114-02-deal-with-f-corosync_conf-if-create-remote-res.patch
+Patch7: bz1176018-03-don-t-call-remove-guest-node-when-f-is-used.patch
+Patch8: bz1165821-02-pcs-CLI-GUI-should-be-capable-of.patch
+Patch9: bz1447910-01-bundle-resources-are-missing-meta-attributes.patch
+Patch10: bz1433016-02-make-container-type-mandatory-in-bundle-create.patch
+Patch11: bz1284404-02-web-ui-fix-timeout-when-cluster-setup-takes-long.patch
+Patch12: bz1458153-01-give-back-orig.-master-behav.-resource-create.patch
+Patch13: bz1459503-01-OSP-workarounds-not-compatible-wi.patch
+
+Patch100: rhel7.patch
+Patch101: change-cman-to-rhel6-in-messages.patch
+Patch102: show-only-warning-when-crm_mon-xml-is-invalid.patch
+
+# git for patches
+BuildRequires: git
+# python for pcs
+BuildRequires: python
+BuildRequires: python-devel
+BuildRequires: python-setuptools
+BuildRequires: python-pycurl
+# gcc for compiling custom rubygems
+BuildRequires: gcc
+BuildRequires: gcc-c++
+# ruby and gems for pcsd
+BuildRequires: ruby >= 2.0.0
+BuildRequires: rubygems
+BuildRequires: ruby-devel
+# pam devel for compiling rubygem-rpam-ruby19
+BuildRequires: pam-devel
+BuildRequires: rubygem-bundler
 BuildRequires: rubygem-json
-BuildRequires: systemd-units rubygem-bundler
 BuildRequires: rubygem-minitest
+#for building rubygem-ffi
+BuildRequires: libffi-devel
+
 # following for UpdateTimestamps sanitization function
 BuildRequires: diffstat
-#following BuildRequires are needed by tests
+BuildRequires: systemd-units
+#for tests
 BuildRequires: python-lxml
 BuildRequires: corosync
 BuildRequires: pacemaker
 BuildRequires: pacemaker-cli
 BuildRequires: fence-agents-all
+# pcsd fonts and font management tools
+BuildRequires: fontconfig
+BuildRequires: liberation-sans-fonts
+BuildRequires: overpass-fonts
 
-Requires(post): systemd
-Requires(preun): systemd
-Requires(postun): systemd
-Requires: corosync pacemaker pacemaker-cli
-Requires: psmisc openssl
+# python and libraries for pcs, setuptools for pcs entrypoint
+Requires: python
 Requires: python-lxml
 Requires: python-setuptools
 Requires: python-clufter >= 0.59.0
+Requires: python-pycurl
+# ruby and gems for pcsd
 Requires: ruby >= 2.0.0
 Requires: rubygem-json
+# for killall
+Requires: psmisc
+# for working with certificates (validation etc.)
+Requires: openssl
+# cluster stack and related packages
+Requires: corosync
+Requires: pacemaker
+Requires: pacemaker-cli
+# for post, preun and postun macros
+Requires(post): systemd
+Requires(preun): systemd
+Requires(postun): systemd
+# pcsd fonts
+Requires: liberation-sans-fonts
+Requires: overpass-fonts
 
 Provides: bundled(rubygem-backports) = 3.6.8
-Provides: bundled(rubygem-eventmachine) = 1.2.0.1
-Provides: bundled(rubygem-multi_json) = 1.12.0
+Provides: bundled(rubygem-multi_json) = 1.12.1
 Provides: bundled(rubygem-open4) = 1.3.4
 Provides: bundled(rubygem-orderedhash) = 0.0.6
 Provides: bundled(rubygem-rack) = 1.6.4
 Provides: bundled(rubygem-rack-protection) = 1.5.3
 Provides: bundled(rubygem-rack-test) = 0.6.3
 Provides: bundled(rubygem-rpam-ruby19) = 1.2.1
-Provides: bundled(rubygem-sinatra) = 1.4.7
+Provides: bundled(rubygem-sinatra) = 1.4.8
 Provides: bundled(rubygem-sinatra-contrib) = 1.4.7
-Provides: bundled(rubygem-tilt) = 2.0.3
+Provides: bundled(rubygem-tilt) = 2.0.6
+Provides: bundled(rubygem-ethon) = 0.10.1
+Provides: bundled(rubygem-ffi) = 1.9.17
 
 %description
 pcs is a corosync and pacemaker configuration tool.  It permits users to
-easily view, modify and created pacemaker based clusters.
+easily view, modify and create pacemaker based clusters.
 
 %define PCS_PREFIX /usr
 %prep
@@ -164,74 +159,30 @@ UpdateTimestamps -p1 %{PATCH10}
 UpdateTimestamps -p1 %{PATCH11}
 UpdateTimestamps -p1 %{PATCH12}
 UpdateTimestamps -p1 %{PATCH13}
-UpdateTimestamps -p1 %{PATCH14}
-UpdateTimestamps -p1 %{PATCH15}
-UpdateTimestamps -p1 %{PATCH16}
-UpdateTimestamps -p1 %{PATCH17}
-UpdateTimestamps -p1 %{PATCH18}
-UpdateTimestamps -p1 %{PATCH19}
-UpdateTimestamps -p1 %{PATCH20}
-UpdateTimestamps -p1 %{PATCH21}
-UpdateTimestamps -p1 %{PATCH22}
-UpdateTimestamps -p1 %{PATCH23}
-UpdateTimestamps -p1 %{PATCH24}
-UpdateTimestamps -p1 %{PATCH25}
-UpdateTimestamps -p1 %{PATCH26}
-UpdateTimestamps -p1 %{PATCH27}
-UpdateTimestamps -p1 %{PATCH28}
-UpdateTimestamps -p1 %{PATCH29}
-UpdateTimestamps -p1 %{PATCH30}
-UpdateTimestamps -p1 %{PATCH31}
-UpdateTimestamps -p1 %{PATCH32}
-UpdateTimestamps -p1 %{PATCH33}
-UpdateTimestamps -p1 %{PATCH34}
-UpdateTimestamps -p1 %{PATCH35}
-UpdateTimestamps -p1 %{PATCH36}
-UpdateTimestamps -p1 %{PATCH37}
-UpdateTimestamps -p1 %{PATCH38}
-UpdateTimestamps -p1 %{PATCH39}
-UpdateTimestamps -p1 %{PATCH40}
-UpdateTimestamps -p1 %{PATCH41}
-UpdateTimestamps -p1 %{PATCH42}
-UpdateTimestamps -p1 %{PATCH43}
-UpdateTimestamps -p1 %{PATCH44}
-UpdateTimestamps -p1 %{PATCH45}
-UpdateTimestamps -p1 %{PATCH46}
-UpdateTimestamps -p1 %{PATCH47}
-UpdateTimestamps -p1 %{PATCH48}
-UpdateTimestamps -p1 %{PATCH49}
-UpdateTimestamps -p1 %{PATCH50}
-UpdateTimestamps -p1 %{PATCH51}
-UpdateTimestamps -p1 %{PATCH52}
-UpdateTimestamps -p1 %{PATCH53}
-UpdateTimestamps -p1 %{PATCH54}
-UpdateTimestamps -p1 %{PATCH55}
-UpdateTimestamps -p1 %{PATCH56}
-UpdateTimestamps -p1 %{PATCH57}
-UpdateTimestamps -p1 %{PATCH58}
-UpdateTimestamps -p1 %{PATCH59}
-UpdateTimestamps -p1 %{PATCH60}
+UpdateTimestamps -p1 %{PATCH100}
+UpdateTimestamps -p1 %{PATCH101}
+UpdateTimestamps -p1 %{PATCH102}
 
 cp -f %SOURCE1 pcsd/public/images
-cp -f %SOURCE99 pcsd/public
 
 mkdir -p pcsd/.bundle
 cp -f %SOURCE2 pcsd/.bundle/config
 
 mkdir -p pcsd/vendor/cache
 #copy ruby gems
-cp -f %SOURCE3 pcsd/vendor/cache
-cp -f %SOURCE4 pcsd/vendor/cache
-cp -f %SOURCE5 pcsd/vendor/cache
-cp -f %SOURCE6 pcsd/vendor/cache
-cp -f %SOURCE7 pcsd/vendor/cache
-cp -f %SOURCE8 pcsd/vendor/cache
-cp -f %SOURCE9 pcsd/vendor/cache
-cp -f %SOURCE10 pcsd/vendor/cache
 cp -f %SOURCE11 pcsd/vendor/cache
 cp -f %SOURCE12 pcsd/vendor/cache
 cp -f %SOURCE13 pcsd/vendor/cache
 cp -f %SOURCE14 pcsd/vendor/cache
+cp -f %SOURCE15 pcsd/vendor/cache
+cp -f %SOURCE16 pcsd/vendor/cache
+cp -f %SOURCE17 pcsd/vendor/cache
+cp -f %SOURCE18 pcsd/vendor/cache
+cp -f %SOURCE19 pcsd/vendor/cache
+cp -f %SOURCE20 pcsd/vendor/cache
+cp -f %SOURCE21 pcsd/vendor/cache
+cp -f %SOURCE22 pcsd/vendor/cache
+cp -f %SOURCE23 pcsd/vendor/cache
 #ruby gems copied
 
 %build
@@ -252,9 +203,12 @@ make install_pcsd \
   includedir="%{_includedir}" \
   PREFIX=%{PCS_PREFIX}
 
+#after the ruby gem compilation we do not need ruby gems in the cache
+rm -r -v $RPM_BUILD_ROOT%{PCS_PREFIX}/lib/pcsd/vendor/cache
+
 %check
 run_all_tests(){
-  #prepare environmet for tests
+  #prepare environment for tests
   sitelib=$RPM_BUILD_ROOT%{python_sitelib}
   pcsd_dir=$RPM_BUILD_ROOT%{PCS_PREFIX}/lib/pcsd
 
@@ -263,30 +217,40 @@ run_all_tests(){
   #manually
   #we do not have permissions to write anywhere else than $RPM_BUILD_ROOT
   #so we must install python2-mock there
-  #disabled tests:
-  #pcs.test.test_lib_external.ParallelCommunicationHelperTest.test_success \
-  #   File "/builddir/build/BUILDROOT/pcs-0.9.152-5.el7.x86_64/usr/lib/python2.7/site-packages/pcs/test/test_lib_external.py", line 865, in test_success
-  #     func.assert_has_calls(expected_calls)...
-  # Expected: [call(0, a=0), call(1, a=2), call(2, a=4)]
-  # Actual: [call(1, a=2), call(0, a=0), call(2, a=4)]
+  #test fail info:
   #
-  #pcs.lib.booth.test.test_env.SetKeyfileAccessTest.test_set_desired_file_access \
-  # Traceback (most recent call last):
-  #   File "/builddir/build/BUILDROOT/pcs-0.9.152-6.el7.x86_64/usr/lib/python2.7/site-packages/pcs/lib/booth/test/test_env.py", line 148, in test_set_desired_file_access
-  #     env.set_keyfile_access(file_path)
-  #   File "/builddir/build/BUILDROOT/pcs-0.9.152-6.el7.x86_64/usr/lib/python2.7/site-packages/pcs/lib/booth/env.py", line 63, in set_keyfile_access
-  #     raise report_keyfile_io_error(file_path, "chown", e)
-  # LibraryError: ERROR FILE_IO_ERROR: {u'reason': u"Operation not permitted: '/builddir/build/BUILDROOT/pcs-0.9.152-6.el7.x86_64/usr/lib/python2.7/site-packages/pcs/test/resources/temp-keyfile'", u'file_role': u'BOOTH_KEY', u'file_path': u'/builddir/build/BUILDROOT/pcs-0.9.152-6.el7.x86_64/usr/lib/python2.7/site-packages/pcs/test/resources/temp-keyfile', u'operation': u'chown'}
+  # FAIL: test_base_create_with_agent_name_including_systemd_instance (pcs.test.cib_resource.test_create.Success)
+  #----------------------------------------------------------------------
+  #Traceback (most recent call last):
+  #  File "/builddir/build/BUILDROOT/pcs-0.9.156-1.el7.x86_64/usr/lib/python2.7/site-packages/pcs/test/cib_resource/test_create.py", line 41, in test_base_create_with_agent_name_including_systemd_instance
+  #    </resources>"""
+  #  File "/builddir/build/BUILDROOT/pcs-0.9.156-1.el7.x86_64/usr/lib/python2.7/site-packages/pcs/test/cib_resource/common.py", line 69, in assert_effect
+  #    self.assert_effect_single(alternative_list[-1], expected_xml, output)
+  #  File "/builddir/build/BUILDROOT/pcs-0.9.156-1.el7.x86_64/usr/lib/python2.7/site-packages/pcs/test/cib_resource/common.py", line 56, in assert_effect_single
+  #    self.assert_pcs_success(command, output)
+  #  File "/builddir/build/BUILDROOT/pcs-0.9.156-1.el7.x86_64/usr/lib/python2.7/site-packages/pcs/test/tools/assertions.py", line 51, in assert_pcs_success
+  #    stdout_start=stdout_start
+  #  File "/builddir/build/BUILDROOT/pcs-0.9.156-1.el7.x86_64/usr/lib/python2.7/site-packages/pcs/test/tools/assertions.py", line 115, in assert_pcs_result
+  #    stdout=stdout
+  #AssertionError: Stdout is not as expected
+  #command: resource create R systemd:lvm2-pvscan@252:2 --no-default-ops --force
+  #diff is (expected is 2nd):
+  #- Warning: Agent 'systemd:lvm2-pvscan@252:2' is not installed or does not provide valid metadata: error: crm_abort: systemd_unit_exec: Triggered fatal assert at systemd.c:676 : systemd_init()
+  #Full stdout:
+  #Warning: Agent 'systemd:lvm2-pvscan@252:2' is not installed or does not provide valid metadata: error: crm_abort: systemd_unit_exec: Triggered fatal assert at systemd.c:676 : systemd_init()
+  #----------------------------------------------------------------------
+  # REASON: crm_resource ends with an error
+  ## cat /etc/redhat-release
+  #Red Hat Enterprise Linux Server release 7.4 Beta (Maipo)
+  ## crm_resource --show-metadata systemd:nonexistent@some:thingxxx
+  #error: crm_abort:  systemd_unit_exec: Triggered fatal assert at systemd.c:676 : systemd_init()
 
   export PYTHONPATH="${PYTHONPATH}:${sitelib}"
-  easy_install -d ${sitelib} %SOURCE15
-  python ${sitelib}/pcs/test/suite.py -v --no-color --all-but \
-    pcs.test.test_resource.ResourceTest.testAddResources \
+  easy_install -d ${sitelib} %SOURCE31
+  python ${sitelib}/pcs/test/suite.py -v --vanilla --all-but \
     pcs.test.test_cluster.ClusterTest.testUIDGID \
-    pcs.test.test_utils.RunParallelTest.test_wait_for_slower_workers \
     pcs.test.test_stonith.StonithTest.test_stonith_create_provides_unfencing \
-    pcs.test.test_lib_external.ParallelCommunicationHelperTest.test_success \
-    pcs.lib.booth.test.test_env.SetKeyfileAccessTest.test_set_desired_file_access \
+    pcs.test.cib_resource.test_create.Success.test_base_create_with_agent_name_including_systemd_instance \
 
   test_result_python=$?
 
@@ -348,31 +312,69 @@ run_all_tests
 %ghost %config(noreplace) /var/lib/pcsd/pcs_users.conf
 %ghost %config(noreplace) /var/lib/pcsd/tokens
 %{_mandir}/man8/pcs.*
+%{_mandir}/man8/pcsd.*
 %exclude /usr/lib/pcsd/*.debian
 %exclude /usr/lib/pcsd/pcsd.service
 %exclude /usr/lib/pcsd/pcsd.conf
-%exclude %{python_sitelib}/pcs/bash_completion.sh
+%exclude %{python_sitelib}/pcs/bash_completion
 %exclude %{python_sitelib}/pcs/pcs.8
 %exclude %{python_sitelib}/pcs/pcs
 
-%doc COPYING README
+%doc COPYING
+%doc README
+%doc CHANGELOG.md
 
 %changelog
-* Thu Mar  2 2017 Johnny Hughes <johnny@centos.org> - 0.9.152-10.el7.centos.3
-- Roll in CentOS Branding (centos bug #9426)
-
-* Tue Feb 14 2017  Ivan Devat <idevat@redhat.com> - 0.9.152-10.el7_3.3
-- Provide a better error message in `pcs cluster cib-push` when the diff of the old and the new CIB is empty
-- Resolves: rhbz#1420757
-
-* Fri Feb 10 2017  Ivan Devat <idevat@redhat.com> - 0.9.152-10.el7_3.2
-- Fixed recognition of the parameter 'scope' in 'cluster cib push'
-- Resolves: rhbz#1420757
-
-* Mon Jan 16 2017  Ivan Devat <idevat@redhat.com> - 0.9.152-10.el7_3.1
-- Fixed resolving resource agent name containing systemd service instance
-- Added posibility to push only diff of cib in 'cluster cib push'
-- Resolves: rhbz#1408476 rhbz#1404233
+* Thu Jun 15 2017 Ivan Devat <idevat@redhat.com> - 0.9.158-6
+- It is now possible to disable, enable, unmanage and manage bundle resources and set their meta attributes
+- Fixed timeout when cluster setup takes long time in web UI
+- It is now mandatory to specify container type in the "resource bundle create" command
+- Resolves: rhbz#1447910 rhbz#1284404
+
+* Thu Jun 08 2017 Ivan Devat <idevat@redhat.com> - 0.9.158-5
+- `pcs cluster setup` uses existing pacemaker authkey if it exists
+- `pcs resource create` shows only warning when case of remote node is detected
+- Resolves: rhbz#1459503
+
+* Tue Jun 06 2017 Ivan Devat <idevat@redhat.com> - 0.9.158-4
+- Added support for enable and disable in bundles
+- New clusters are created with corosync encryption disabled by default
+- Flag `--master` is backward compatible in `pcs resource create`
+- Resolves: rhbz#1165821 rhbz#1433016 rhbz#1458153
+
+* Wed May 31 2017 Ivan Devat <idevat@redhat.com> - 0.9.158-3
+- Added option to create not hardened cluster with the `pcs cluster setup` command using the `--no-hardened` flag
+- Added option to create not hardened cluster from web UI
+- Fixed a crash in the `pcs cluster node add-remote` command when an id conflict occurs
+- Fixed creating a new cluster from the web UI
+- `pcs cluster node add-guest` now works with the flag `--skipp-offline`
+- `pcs cluster node remove-guest` can be run again when the guest node was unreachable first time
+- Fixed "Error: Unable to read /etc/corosync/corosync.conf" when running `pcs resource create`([rhbz#1386114])
+- Binary data are stored in corosync authkey
+- Resolves: rhbz#1284404 rhbz#1373614 rhbz#1165821 rhbz#1176018 rhbz#1386114
+
+* Fri May 26 2017 Tomas Jelinek <tojeline@redhat.com> - 0.9.158-2
+- Fixed crash of the `pcs cluster setup` command when the `--force` flag was used
+- Fixed crash of the `pcs cluster destroy --all` command when the cluster was not running
+- Fixed crash of the `pcs config restore` command when restoring pacemaker authkey
+- Fixed "Error: unable to get cib" when adding a node to a stopped cluster
+- Resolves: rhbz#1176018
+
+* Tue May 23 2017 Ivan Devat <idevat@redhat.com> - 0.9.158-1
+- Rebased to latest upstream sources (see CHANGELOG.md)
+- Resolves: rhbz#1447702 rhbz#1176018 rhbz#1433016 rhbz#1303969 rhbz#1386114 rhbz#1386512 rhbz#1390609 rhbz#1165821 rhbz#1315992 rhbz#1373614 rhbz#1422667 rhbz#1254984
+
+* Mon Apr 10 2017 Ivan Devat <idevat@redhat.com> - 0.9.157-1
+- Rebased to latest upstream sources (see CHANGELOG.md)
+- Resolves: rhbz#1362493 rhbz#1315627 rhbz#1378742 rhbz#1334429 rhbz#1402374 rhbz#1389941 rhbz#1303969 rhbz#1415080 rhbz#1328882 rhbz#1434972 rhbz#1413958
+
+* Tue Feb 28 2017 Ivan Devat <idevat@redhat.com> - 0.9.156-2
+- Added ppc64le architecture
+- Resolves: rhbz#1402573
+
+* Fri Feb 10 2017 Ivan Devat <idevat@redhat.com> - 0.9.156-1
+- Rebased to latest upstream sources (see CHANGELOG.md)
+- Resolves: rhbz#1409821 rhbz#1404233 rhbz#1408476 rhbz#1262001 rhbz#1389443 rhbz#1389941 rhbz#1315992 rhbz#1261116 rhbz#1389501 rhbz#1404229 rhbz#1284404 rhbz#1339355 rhbz#1347335 rhbz#1344712 rhbz#1395226 rhbz#1382004 rhbz#1378107 rhbz#1398562 rhbz#1402475 rhbz#1382597 rhbz#1389453 rhbz#1390071 rhbz#1390066 rhbz#1387670 rhbz#1292858 rhbz#1396462 rhbz#1419903 rhbz#1419661
 
 * Tue Sep 20 2016 Ivan Devat <idevat@redhat.com> - 0.9.152-10
 - Fixed error when stopping qdevice if is not running
@@ -382,7 +384,7 @@ run_all_tests
 - Resolves: rhbz#1158805 rhbz#1305049
 
 * Wed Sep 14 2016 Ivan Devat <idevat@redhat.com> - 0.9.152-9
-- Added warning when stopping/destroying qdevice instance which is being used 
+- Added warning when stopping/destroying qdevice instance which is being used
 - Fiexed removing qdevice from a cluster which uses sbd
 - Fixed re-running "pcs cluster node add" if it failed due to qdevice
 - Fixed documentation regarding booth
@@ -392,7 +394,7 @@ run_all_tests
 - Fixed setting utilization attributes in web UI
 - Fixed support for node utilization on remote node
 - Fixed updating of selected group when displaying new resource dialog
-- Fixed group list when managing cluster running older pcs in web UI 
+- Fixed group list when managing cluster running older pcs in web UI
 - Fixed displaying unmanaged status for resources for older pcs in web UI
 - Fixed clone/master/unclone group/ungroup buttons for older pcs in web UI
 - Fixed node standby/unstandby for older pcs in web UI
@@ -400,7 +402,7 @@ run_all_tests
 
 * Wed Aug 31 2016 Ivan Devat <idevat@redhat.com> - 0.9.152-8
 - Fixed error message in node maintenance/unmaintenance commands
-- Fixed missing line at the end of booth config 
+- Fixed missing line at the end of booth config
 - Fixed documentation regarding booth
 - Fixed remove multiple booth resources with "--force" flag
 - Fixed cleanup of ip resource if it fails to create booth resource