diff --git a/.gitignore b/.gitignore index 09fb27e..f460c9e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,15 @@ SOURCES/HAM-logo.png -SOURCES/pcs-withgems-0.9.143.tar.gz +SOURCES/backports-3.6.8.gem +SOURCES/eventmachine-1.2.0.1.gem +SOURCES/mock-1.0.1.tar.gz +SOURCES/multi_json-1.12.0.gem +SOURCES/open4-1.3.4.gem +SOURCES/orderedhash-0.0.6.gem +SOURCES/pcs-0.9.152.tar.gz +SOURCES/rack-1.6.4.gem +SOURCES/rack-protection-1.5.3.gem +SOURCES/rack-test-0.6.3.gem +SOURCES/rpam-ruby19-1.2.1.gem +SOURCES/sinatra-1.4.7.gem +SOURCES/sinatra-contrib-1.4.7.gem +SOURCES/tilt-2.0.3.gem diff --git a/.pcs.metadata b/.pcs.metadata index 2a2ec1f..68b2d71 100644 --- a/.pcs.metadata +++ b/.pcs.metadata @@ -1,3 +1,15 @@ -9c06bb646aba6330d4d85fe08415cdd2276fe918 SOURCES/HAM-logo.png -062c9973625dced9a54a2f83a7baf7696ac37d60 SOURCES/favicon.ico -f4cfd8dd9ffdc4ce13a9b6946008ded2e1676709 SOURCES/pcs-withgems-0.9.143.tar.gz +80dc7788a3468fb7dd362a4b8bedd9efb373de89 SOURCES/HAM-logo.png +5c9dd0d5552d242ee6bb338a9097e85f0a0a45d5 SOURCES/backports-3.6.8.gem +60b6f1d8391cd374c6a2ef3977cb1397ed89055a SOURCES/eventmachine-1.2.0.1.gem +baa3446eb63557a24c4522dc5a61cfad082fa395 SOURCES/mock-1.0.1.tar.gz +46156f5a4ff17a23c15d0d2f0fc84cb5627ac70d SOURCES/multi_json-1.12.0.gem +41a7fe9f8e3e02da5ae76c821b89c5b376a97746 SOURCES/open4-1.3.4.gem +709cc95025009e5d221e37cb0777e98582146809 SOURCES/orderedhash-0.0.6.gem +2808df782cd1d269e1d94c36a52573023128c0a0 SOURCES/pcs-0.9.152.tar.gz +0a1eea6d7bb903d8c075688534480e87d4151470 SOURCES/rack-1.6.4.gem +1c28529c1d7376c61faed80f3d3297905a14c2b3 SOURCES/rack-protection-1.5.3.gem +6fd5a7f881a65ef93b66e21556ef67fbe08a2fcc SOURCES/rack-test-0.6.3.gem +a90e5a60d99445404a3c29a66d953a5e9918976d SOURCES/rpam-ruby19-1.2.1.gem +1c7f1ad8af670f4990373ebddb4d9fecd8f3c7d1 SOURCES/sinatra-1.4.7.gem +83742328f21b684d6ce6c4747710c6e975b608e7 SOURCES/sinatra-contrib-1.4.7.gem +49bee6e8614c1e991c1156150b0a2eaa28868f8d SOURCES/tilt-2.0.3.gem diff --git a/SOURCES/bz1122818-01-fix-resource-relocation-of-globally-unique-clones.patch b/SOURCES/bz1122818-01-fix-resource-relocation-of-globally-unique-clones.patch deleted file mode 100644 index e76425e..0000000 --- a/SOURCES/bz1122818-01-fix-resource-relocation-of-globally-unique-clones.patch +++ /dev/null @@ -1,34 +0,0 @@ -From a4fa532d6c1091caf94d64c95c5625738aa1ebf3 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek <tojeline@redhat.com> -Date: Wed, 12 Aug 2015 13:36:27 +0200 -Subject: [PATCH] fix resource relocation of globally-unique clones - ---- - pcs/test/test_utils.py | 57 +++++++++++++++++++++++++++++++++++++++++++++++--- - pcs/utils.py | 9 +++++++- - 2 files changed, 62 insertions(+), 4 deletions(-) - -diff --git a/pcs/utils.py b/pcs/utils.py -index d61ff44..740ff04 100644 ---- a/pcs/utils.py -+++ b/pcs/utils.py -@@ -2044,8 +2044,15 @@ def get_resources_location_from_operations(cib_dom, resources_operations): - continue - long_id = res_op["long_id"] - if long_id not in locations: -+ # Move clone instances as if they were non-cloned resources, it -+ # really works with current pacemaker (1.1.13-6). Otherwise there -+ # is probably no way to move them other then setting their -+ # stickiness to 0. -+ res_id = res_op["id"] -+ if ":" in res_id: -+ res_id = res_id.split(":")[0] - id_for_constraint = validate_constraint_resource( -- cib_dom, res_op["id"] -+ cib_dom, res_id - )[2] - if not id_for_constraint: - continue --- -1.9.1 - diff --git a/SOURCES/bz1122818-02-fix-resource-relocate-for-remote-nodes.patch b/SOURCES/bz1122818-02-fix-resource-relocate-for-remote-nodes.patch deleted file mode 100644 index adca66c..0000000 --- a/SOURCES/bz1122818-02-fix-resource-relocate-for-remote-nodes.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 5f6b6c657f2a88985baf02d24a2de8dafa8ec736 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek <tojeline@redhat.com> -Date: Tue, 25 Aug 2015 13:08:46 +0200 -Subject: [PATCH] fix resource relocate for remote nodes - ---- - pcs/test/test_utils.py | 69 +++++++++++++++++++++++++++ - pcs/test/transitions02.xml | 116 +++++++++++++++++++++++++++++++++++++++++++++ - pcs/utils.py | 8 ++-- - 3 files changed, 190 insertions(+), 3 deletions(-) - create mode 100644 pcs/test/transitions02.xml - -diff --git a/pcs/utils.py b/pcs/utils.py -index 740ff04..cd33a27 100644 ---- a/pcs/utils.py -+++ b/pcs/utils.py -@@ -2014,7 +2014,9 @@ def simulate_cib(cib_dom): - - def get_operations_from_transitions(transitions_dom): - operation_list = [] -- watched_operations = ("start", "stop", "promote", "demote") -+ watched_operations = ( -+ "start", "stop", "promote", "demote", "migrate_from", "migrate_to" -+ ) - for rsc_op in transitions_dom.getElementsByTagName("rsc_op"): - primitives = rsc_op.getElementsByTagName("primitive") - if not primitives: -@@ -2040,7 +2042,7 @@ def get_resources_location_from_operations(cib_dom, resources_operations): - locations = {} - for res_op in resources_operations: - operation = res_op["operation"] -- if operation not in ("start", "promote"): -+ if operation not in ("start", "promote", "migrate_from"): - continue - long_id = res_op["long_id"] - if long_id not in locations: -@@ -2061,7 +2063,7 @@ def get_resources_location_from_operations(cib_dom, resources_operations): - "long_id": long_id, - "id_for_constraint": id_for_constraint, - } -- if operation == "start": -+ if operation in ("start", "migrate_from"): - locations[long_id]["start_on_node"] = res_op["on_node"] - if operation == "promote": - locations[long_id]["promote_on_node"] = res_op["on_node"] --- -1.9.1 - diff --git a/SOURCES/bz1158500-01-add-support-for-utilization-attributes.patch b/SOURCES/bz1158500-01-add-support-for-utilization-attributes.patch new file mode 100644 index 0000000..4907fad --- /dev/null +++ b/SOURCES/bz1158500-01-add-support-for-utilization-attributes.patch @@ -0,0 +1,357 @@ +From 1b6ed4d97198e7ca8c1fd5f76bfb8bfc95eeabdc Mon Sep 17 00:00:00 2001 +From: Ivan Devat <idevat@redhat.com> +Date: Wed, 14 Sep 2016 09:37:06 +0200 +Subject: [PATCH] squash bz1158500 add support for utilization attri + +4ab84628f802 fix parsing of utilization attributes + +18d526f59679 support utilization on (non-cib) remote node + +f0b193a681e3 show error when show utilizat. on nonexistent node + +9907c123c225 web UI: fix setting utilization attributes +--- + .pylintrc | 2 +- + pcs/node.py | 54 ++++++++++++++++++++++++++++++++++++----- + pcs/resource.py | 8 ++++--- + pcs/test/test_node.py | 56 +++++++++++++++++++++++++++++++++++++++++++ + pcs/test/test_resource.py | 18 ++++++++++++++ + pcs/test/test_utils.py | 17 +++++++++---- + pcs/utils.py | 12 +++++++++- + pcsd/public/js/nodes-ember.js | 4 ++-- + pcsd/remote.rb | 2 +- + 9 files changed, 155 insertions(+), 18 deletions(-) + +diff --git a/.pylintrc b/.pylintrc +index 1dd6d5d..6101381 100644 +--- a/.pylintrc ++++ b/.pylintrc +@@ -92,7 +92,7 @@ dummy-variables-rgx=_$|dummy + + [FORMAT] + # Maximum number of lines in a module +-max-module-lines=4584 ++max-module-lines=4616 + # Maximum number of characters on a single line. + max-line-length=1291 + +diff --git a/pcs/node.py b/pcs/node.py +index ed77d5d..729ea35 100644 +--- a/pcs/node.py ++++ b/pcs/node.py +@@ -56,7 +56,10 @@ def node_cmd(argv): + elif len(argv) == 1: + print_node_utilization(argv.pop(0), filter_name=filter_name) + else: +- set_node_utilization(argv.pop(0), argv) ++ try: ++ set_node_utilization(argv.pop(0), argv) ++ except CmdLineInputError as e: ++ utils.exit_on_cmdline_input_errror(e, "node", "utilization") + # pcs-to-pcsd use only + elif sub_cmd == "pacemaker-status": + node_pacemaker_status() +@@ -150,17 +153,56 @@ def set_node_utilization(node, argv): + cib = utils.get_cib_dom() + node_el = utils.dom_get_node(cib, node) + if node_el is None: +- utils.err("Unable to find a node: {0}".format(node)) ++ if utils.usefile: ++ utils.err("Unable to find a node: {0}".format(node)) + +- utils.dom_update_utilization( +- node_el, utils.convert_args_to_tuples(argv), "nodes-" +- ) ++ for attrs in utils.getNodeAttributesFromPacemaker(): ++ if attrs.name == node and attrs.type == "remote": ++ node_attrs = attrs ++ break ++ else: ++ utils.err("Unable to find a node: {0}".format(node)) ++ ++ nodes_section_list = cib.getElementsByTagName("nodes") ++ if len(nodes_section_list) == 0: ++ utils.err("Unable to get nodes section of cib") ++ ++ dom = nodes_section_list[0].ownerDocument ++ node_el = dom.createElement("node") ++ node_el.setAttribute("id", node_attrs.id) ++ node_el.setAttribute("type", node_attrs.type) ++ node_el.setAttribute("uname", node_attrs.name) ++ nodes_section_list[0].appendChild(node_el) ++ ++ utils.dom_update_utilization(node_el, prepare_options(argv), "nodes-") + utils.replace_cib_configuration(cib) + + def print_node_utilization(filter_node=None, filter_name=None): + cib = utils.get_cib_dom() ++ ++ node_element_list = cib.getElementsByTagName("node") ++ ++ ++ if( ++ filter_node ++ and ++ filter_node not in [ ++ node_element.getAttribute("uname") ++ for node_element in node_element_list ++ ] ++ and ( ++ utils.usefile ++ or ++ filter_node not in [ ++ node_attrs.name for node_attrs ++ in utils.getNodeAttributesFromPacemaker() ++ ] ++ ) ++ ): ++ utils.err("Unable to find a node: {0}".format(filter_node)) ++ + utilization = {} +- for node_el in cib.getElementsByTagName("node"): ++ for node_el in node_element_list: + node = node_el.getAttribute("uname") + if filter_node is not None and node != filter_node: + continue +diff --git a/pcs/resource.py b/pcs/resource.py +index 74adac6..046a826 100644 +--- a/pcs/resource.py ++++ b/pcs/resource.py +@@ -191,7 +191,10 @@ def resource_cmd(argv): + elif len(argv) == 1: + print_resource_utilization(argv.pop(0)) + else: +- set_resource_utilization(argv.pop(0), argv) ++ try: ++ set_resource_utilization(argv.pop(0), argv) ++ except CmdLineInputError as e: ++ utils.exit_on_cmdline_input_errror(e, "resource", "utilization") + elif (sub_cmd == "get_resource_agent_info"): + get_resource_agent_info(argv) + else: +@@ -2795,8 +2798,7 @@ def set_resource_utilization(resource_id, argv): + resource_el = utils.dom_get_resource(cib, resource_id) + if resource_el is None: + utils.err("Unable to find a resource: {0}".format(resource_id)) +- +- utils.dom_update_utilization(resource_el, utils.convert_args_to_tuples(argv)) ++ utils.dom_update_utilization(resource_el, prepare_options(argv)) + utils.replace_cib_configuration(cib) + + def print_resource_utilization(resource_id): +diff --git a/pcs/test/test_node.py b/pcs/test/test_node.py +index 9b45e07..137c7c7 100644 +--- a/pcs/test/test_node.py ++++ b/pcs/test/test_node.py +@@ -7,7 +7,9 @@ from __future__ import ( + + import shutil + from pcs.test.tools import pcs_unittest as unittest ++from pcs.test.tools.pcs_unittest import mock + ++from pcs import node + from pcs.test.tools.assertions import AssertPcsMixin + from pcs.test.tools.misc import ( + ac, +@@ -268,6 +270,20 @@ Node Utilization: + self.assertEqual(0, returnVal) + + def test_node_utilization_set_invalid(self): ++ output, returnVal = pcs(temp_cib, "node utilization rh7-1 test") ++ expected_out = """\ ++Error: missing value of 'test' option ++""" ++ ac(expected_out, output) ++ self.assertEqual(1, returnVal) ++ ++ output, returnVal = pcs(temp_cib, "node utilization rh7-1 =10") ++ expected_out = """\ ++Error: missing key in '=10' option ++""" ++ ac(expected_out, output) ++ self.assertEqual(1, returnVal) ++ + output, returnVal = pcs(temp_cib, "node utilization rh7-0 test=10") + expected_out = """\ + Error: Unable to find a node: rh7-0 +@@ -524,3 +540,43 @@ Node Attributes: + "node attribute rh7-1 missing= --force", + "" + ) ++ ++class SetNodeUtilizationTest(unittest.TestCase, AssertPcsMixin): ++ def setUp(self): ++ shutil.copy(empty_cib, temp_cib) ++ self.pcs_runner = PcsRunner(temp_cib) ++ ++ def test_refuse_non_option_attribute_parameter_among_options(self): ++ self.assert_pcs_fail("node utilization rh7-1 net", [ ++ "Error: missing value of 'net' option", ++ ]) ++ ++ def test_refuse_option_without_key(self): ++ self.assert_pcs_fail("node utilization rh7-1 =1", [ ++ "Error: missing key in '=1' option", ++ ]) ++ ++class PrintNodeUtilizationTest(unittest.TestCase, AssertPcsMixin): ++ def setUp(self): ++ shutil.copy(empty_cib, temp_cib) ++ self.pcs_runner = PcsRunner(temp_cib) ++ ++ @mock.patch("pcs.node.utils") ++ def test_refuse_when_node_not_in_cib_and_is_not_remote(self, mock_utils): ++ mock_cib = mock.MagicMock() ++ mock_cib.getElementsByTagName = mock.Mock(return_value=[]) ++ ++ mock_utils.get_cib_dom = mock.Mock(return_value=mock_cib) ++ mock_utils.usefile = False ++ mock_utils.getNodeAttributesFromPacemaker = mock.Mock(return_value=[]) ++ mock_utils.err = mock.Mock(side_effect=SystemExit) ++ ++ self.assertRaises( ++ SystemExit, ++ lambda: node.print_node_utilization("some") ++ ) ++ ++ def test_refuse_when_node_not_in_mocked_cib(self): ++ self.assert_pcs_fail("node utilization some_nonexistent_node", [ ++ "Error: Unable to find a node: some_nonexistent_node", ++ ]) +diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py +index 87a7fa8..d32cfb4 100644 +--- a/pcs/test/test_resource.py ++++ b/pcs/test/test_resource.py +@@ -4430,6 +4430,24 @@ Resource Utilization: + self.assertEqual(0, returnVal) + + def test_resource_utilization_set_invalid(self): ++ output, returnVal = pcs( ++ temp_large_cib, "resource utilization dummy test" ++ ) ++ expected_out = """\ ++Error: missing value of 'test' option ++""" ++ ac(expected_out, output) ++ self.assertEqual(1, returnVal) ++ ++ output, returnVal = pcs( ++ temp_large_cib, "resource utilization dummy =10" ++ ) ++ expected_out = """\ ++Error: missing key in '=10' option ++""" ++ ac(expected_out, output) ++ self.assertEqual(1, returnVal) ++ + output, returnVal = pcs(temp_large_cib, "resource utilization dummy0") + expected_out = """\ + Error: Unable to find a resource: dummy0 +diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py +index 252de30..c4c6d87 100644 +--- a/pcs/test/test_utils.py ++++ b/pcs/test/test_utils.py +@@ -1400,12 +1400,12 @@ class UtilsTest(unittest.TestCase): + """).documentElement + self.assertRaises( + SystemExit, +- utils.dom_update_utilization, el, [("name", "invalid_val")] ++ utils.dom_update_utilization, el, {"name": "invalid_val"} + ) + + self.assertRaises( + SystemExit, +- utils.dom_update_utilization, el, [("name", "0.01")] ++ utils.dom_update_utilization, el, {"name": "0.01"} + ) + + sys.stderr = tmp_stderr +@@ -1415,7 +1415,12 @@ class UtilsTest(unittest.TestCase): + <resource id="test_id"/> + """).documentElement + utils.dom_update_utilization( +- el, [("name", ""), ("key", "-1"), ("keys", "90")] ++ el, ++ { ++ "name": "", ++ "key": "-1", ++ "keys": "90", ++ } + ) + + self.assertEqual(len(dom_get_child_elements(el)), 1) +@@ -1459,7 +1464,11 @@ class UtilsTest(unittest.TestCase): + </resource> + """).documentElement + utils.dom_update_utilization( +- el, [("key", "100"), ("keys", "")] ++ el, ++ { ++ "key": "100", ++ "keys": "", ++ } + ) + + u = dom_get_child_elements(el)[0] +diff --git a/pcs/utils.py b/pcs/utils.py +index a7ff7ca..d5b6dcf 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -472,6 +472,16 @@ def getNodesFromPacemaker(): + except LibraryError as e: + process_library_reports(e.args) + ++def getNodeAttributesFromPacemaker(): ++ try: ++ return [ ++ node.attrs ++ for node in ClusterState(getClusterStateXml()).node_section.nodes ++ ] ++ except LibraryError as e: ++ process_library_reports(e.args) ++ ++ + def hasCorosyncConf(conf=None): + if not conf: + if is_rhel6(): +@@ -2487,7 +2497,7 @@ def dom_update_utilization(dom_element, attributes, id_prefix=""): + id_prefix + dom_element.getAttribute("id") + "-utilization" + ) + +- for name, value in attributes: ++ for name, value in sorted(attributes.items()): + if value != "" and not is_int(value): + err( + "Value of utilization attribute must be integer: " +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index c650fe6..19caf14 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -500,9 +500,9 @@ Pcs.UtilizationTableComponent = Ember.Component.extend({ + }, + add: function(form_id) { + var id = "#" + form_id; +- var name = $(id + " input[name='new_utilization_name']").val(); ++ var name = $(id + " input[name='new_utilization_name']").val().trim(); + if (name == "") { +- return; ++ alert("Name of utilization attribute should be non-empty string."); + } + var value = $(id + " input[name='new_utilization_value']").val().trim(); + if (!is_integer(value)) { +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index e467d0a..7dc7951 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -2240,7 +2240,7 @@ def set_node_utilization(params, reqest, auth_user) + + if retval != 0 + return [400, "Unable to set utilization '#{name}=#{value}' for node " + +- "'#{res_id}': #{stderr.join('')}" ++ "'#{node}': #{stderr.join('')}" + ] + end + return 200 +-- +1.8.3.1 + diff --git a/SOURCES/bz1158566-01-fix-dashboard-in-web-UI.patch b/SOURCES/bz1158566-01-fix-dashboard-in-web-UI.patch deleted file mode 100644 index baded2f..0000000 --- a/SOURCES/bz1158566-01-fix-dashboard-in-web-UI.patch +++ /dev/null @@ -1,396 +0,0 @@ -From ef01aa872871b8e1ea79058cbe3301ce878dde9a Mon Sep 17 00:00:00 2001 -From: Ondrej Mular <omular@redhat.com> -Date: Tue, 25 Aug 2015 11:44:00 +0200 -Subject: [PATCH] fix dashboard in web UI - ---- - pcsd/cluster_entity.rb | 53 +++++++++++++++++++++++++++++----------- - pcsd/pcs.rb | 14 ++++++++--- - pcsd/public/js/nodes-ember.js | 17 ++++++++++--- - pcsd/public/js/pcsd.js | 38 ++++++++++++++-------------- - pcsd/remote.rb | 22 +++++++++++++++-- - pcsd/test/test_cluster_entity.rb | 4 +-- - pcsd/views/_resource.erb | 20 +++++++-------- - pcsd/views/main.erb | 4 +++ - 8 files changed, 117 insertions(+), 55 deletions(-) - -diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb -index b291937..78bc5ab 100644 ---- a/pcsd/cluster_entity.rb -+++ b/pcsd/cluster_entity.rb -@@ -112,6 +112,9 @@ module ClusterEntity - status.node = node - primitive.crm_status << status - } -+ primitives.each {|_, resource| -+ resource[0].update_status -+ } - return primitives - end - -@@ -178,6 +181,9 @@ module ClusterEntity - end - end - } -+ tree.each {|resource| -+ resource.update_status -+ } - return tree - end - -@@ -491,23 +497,27 @@ module ClusterEntity - end - end - -+ def update_status -+ @status = get_status -+ end -+ - def get_status -- count = @crm_status.length - running = 0 -+ failed = 0 - @crm_status.each do |s| -- if ['Started', 'Master', 'Slave'].include?(s.role) -+ if s.active - running += 1 -+ elsif s.failed -+ failed += 1 - end - end - - if disabled? - status = ClusterEntity::ResourceStatus.new(:disabled) -- elsif running != 0 -- if running == count -- status = ClusterEntity::ResourceStatus.new(:running) -- else -- status = ClusterEntity::ResourceStatus.new(:partially_running) -- end -+ elsif running > 0 -+ status = ClusterEntity::ResourceStatus.new(:running) -+ elsif failed > 0 -+ status = ClusterEntity::ResourceStatus.new(:failed) - else - status = ClusterEntity::ResourceStatus.new(:blocked) - end -@@ -655,6 +665,14 @@ module ClusterEntity - end - end - -+ def update_status -+ @status = ClusterEntity::ResourceStatus.new(:running) -+ @members.each { |p| -+ p.update_status -+ @status = p.status if @status < p.status -+ } -+ end -+ - def to_status(version='1') - if version == '2' - hash = super(version) -@@ -730,6 +748,13 @@ module ClusterEntity - end - end - -+ def update_status -+ if @member -+ @member.update_status -+ @status = @member.status -+ end -+ end -+ - def to_status(version='1') - if version == '2' - hash = super(version) -@@ -794,13 +819,13 @@ module ClusterEntity - primitive_list = @member.members - end - @masters, @slaves = get_masters_slaves(primitive_list) -- end -- if @masters.empty? -- @error_list << { -- :message => 'Resource is master/slave but has not been promoted '\ -+ if @masters.empty? -+ @error_list << { -+ :message => 'Resource is master/slave but has not been promoted '\ - + 'to master on any node.', -- :type => 'no_master' -- } -+ :type => 'no_master' -+ } -+ end - end - end - -diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb -index 1fe9b99..cc5b038 100644 ---- a/pcsd/pcs.rb -+++ b/pcsd/pcs.rb -@@ -1506,10 +1506,18 @@ def cluster_status_from_nodes(session, cluster_nodes, cluster_name) - status = overview.update(cluster_nodes_map[quorate_nodes[0]]) - status[:quorate] = true - status[:node_list] = node_status_list -- # if we don't have quorum, use data from any node -- # no node has quorum, so no node has any info about the cluster -+ # if we don't have quorum, use data from any online node, -+ # otherwise use data from any node no node has quorum, so no node has any -+ # info about the cluster - elsif not old_status -- status = overview.update(cluster_nodes_map.values[0]) -+ node_to_use = cluster_nodes_map.values[0] -+ cluster_nodes_map.each { |_, node_data| -+ if node_data[:node] and node_data[:node][:status] == 'online' -+ node_to_use = node_data -+ break -+ end -+ } -+ status = overview.update(node_to_use) - status[:quorate] = false - status[:node_list] = node_status_list - # old pcsd doesn't provide info about quorum, use data from any node -diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js -index 1f60adc..172c00a 100644 ---- a/pcsd/public/js/nodes-ember.js -+++ b/pcsd/public/js/nodes-ember.js -@@ -54,7 +54,8 @@ Pcs = Ember.Application.createWithMixins({ - if (window.location.pathname.lastIndexOf('/manage', 0) !== 0) { - return; - } -- clearTimeout(Pcs.update_timeout); -+ clearTimeout(Pcs.get('update_timeout')); -+ Pcs.set('update_timeout', null); - var self = Pcs; - var cluster_name = self.cluster_name; - if (cluster_name == null) { -@@ -77,7 +78,7 @@ Pcs = Ember.Application.createWithMixins({ - if (data["not_current_data"]) { - self.update(); - } else { -- Pcs.update_timeout = window.setTimeout(self.update, 20000); -+ Pcs.set('update_timeout', window.setTimeout(self.update,20000)); - } - hide_loading_screen(); - }, -@@ -92,7 +93,7 @@ Pcs = Ember.Application.createWithMixins({ - console.log("Error: Unable to parse json for clusters_overview"); - } - } -- Pcs.update_timeout = window.setTimeout(self.update,20000); -+ Pcs.set('update_timeout', window.setTimeout(self.update,20000)); - hide_loading_screen(); - } - }); -@@ -126,6 +127,7 @@ Pcs = Ember.Application.createWithMixins({ - var cur_resource = self.get('cur_resource'); - var resource_map = self.get('resource_map'); - if (first_run) { -+ setup_node_links(); - Pcs.nodesController.load_node($('#node_list_row').find('.node_selected').first(),true); - Pcs.aclsController.load_role($('#acls_list_row').find('.node_selected').first(), true); - if (self.get("fence_id_to_load")) { -@@ -173,7 +175,6 @@ Pcs = Ember.Application.createWithMixins({ - if (!resource_change && self.get('cur_resource')) - tree_view_select(self.get('cur_resource').get('id')); - Pcs.selectedNodeController.reset(); -- setup_node_links(); - disable_checkbox_clicks(); - }); - }); -@@ -207,6 +208,7 @@ Pcs.resourcesContainer = Ember.Object.create({ - cur_fence: null, - constraints: {}, - group_list: [], -+ data_version: null, - - get_resource_by_id: function(resource_id) { - var resource_map = this.get('resource_map'); -@@ -434,6 +436,7 @@ Pcs.resourcesContainer = Ember.Object.create({ - update: function(data) { - var self = this; - self.set('group_list', data['groups']); -+ self.set("data_version", data['status_version']); - var resources = data["resource_list"]; - var resource_obj = null; - var resource_id; -@@ -495,6 +498,12 @@ Pcs.resourcesContainer = Ember.Object.create({ - } - }); - -+Pcs.resourcesContainer.reopen({ -+ is_version_1: function() { -+ return (this.get("data_version") == '1'); -+ }.property('data_version') -+}); -+ - Pcs.ResourceObj = Ember.Object.extend({ - id: null, - _id: Ember.computed.alias('id'), -diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js -index 9891aa8..2c71e6b 100644 ---- a/pcsd/public/js/pcsd.js -+++ b/pcsd/public/js/pcsd.js -@@ -1242,26 +1242,24 @@ function destroy_tooltips() { - } - - function remove_cluster(ids) { -- for (var i=0; i<ids.length; i++) { -- var cluster = ids[i]; -- var clusterid_name = "clusterid-"+ids[i]; -- var data = {} -- data[clusterid_name] = true; -- $.ajax({ -- type: 'POST', -- url: '/manage/removecluster', -- data: data, -- timeout: pcs_timeout, -- success: function () { -- $("#dialog_verify_remove_clusters.ui-dialog-content").each(function(key, item) {$(item).dialog("destroy")}); -- location.reload(); -- }, -- error: function (xhr, status, error) { -- alert("Unable to remove cluster: " + res + " ("+error+")"); -- $("#dialog_verify_remove_clusters.ui-dialog-content").each(function(key, item) {$(item).dialog("destroy")}); -- } -- }); -- } -+ var data = {}; -+ $.each(ids, function(_, cluster) { -+ data[ "clusterid-" + cluster] = true; -+ }); -+ $.ajax({ -+ type: 'POST', -+ url: '/manage/removecluster', -+ data: data, -+ timeout: pcs_timeout, -+ success: function () { -+ $("#dialog_verify_remove_clusters.ui-dialog-content").each(function(key, item) {$(item).dialog("destroy")}); -+ location.reload(); -+ }, -+ error: function (xhr, status, error) { -+ alert("Unable to remove cluster: " + res + " ("+error+")"); -+ $("#dialog_verify_remove_clusters.ui-dialog-content").each(function(key, item) {$(item).dialog("destroy")}); -+ } -+ }); - } - - function remove_nodes(ids, force) { -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index a40c1c7..06947ec 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -2,6 +2,7 @@ require 'json' - require 'uri' - require 'open4' - require 'set' -+require 'timeout' - - require 'pcs.rb' - require 'resource.rb' -@@ -1120,6 +1121,16 @@ def clusters_overview(params, request, session) - config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text()) - config.clusters.each { |cluster| - threads << Thread.new { -+ cluster_map[cluster.name] = { -+ 'cluster_name' => cluster.name, -+ 'error_list' => [ -+ {'message' => 'Unable to connect to the cluster. Request timeout.'} -+ ], -+ 'warning_list' => [], -+ 'status' => 'unknown', -+ 'node_list' => get_default_overview_node_list(cluster.name), -+ 'resource_list' => [] -+ } - overview_cluster = nil - online, offline, not_authorized_nodes = check_gui_status_of_nodes( - session, -@@ -1134,7 +1145,7 @@ def clusters_overview(params, request, session) - nodes_not_in_cluster = [] - for node in cluster_nodes_auth - code, response = send_request_with_token( -- session, node, 'cluster_status', true, {}, true, nil, 15 -+ session, node, 'cluster_status', true, {}, true, nil, 8 - ) - if code == 404 - not_supported = true -@@ -1228,7 +1239,14 @@ def clusters_overview(params, request, session) - cluster_map[cluster.name] = overview_cluster - } - } -- threads.each { |t| t.join } -+ -+ begin -+ Timeout::timeout(18) { -+ threads.each { |t| t.join } -+ } -+ rescue Timeout::Error -+ threads.each { |t| t.exit } -+ end - - # update clusters in PCSConfig - not_current_data = false -diff --git a/pcsd/views/_resource.erb b/pcsd/views/_resource.erb -index 862b648..cc4c06e 100644 ---- a/pcsd/views/_resource.erb -+++ b/pcsd/views/_resource.erb -@@ -32,16 +32,16 @@ - <td id="<%=@myView%>_list" class="node_list"> - <%= erb :_resource_list %> - </td> -- <td id="node_info" colspan=2> -- <div id="<%=@myView%>_info_div"> -- <% if @myView == "resource" %> -- {{resource-edit resource=Pcs.resourcesContainer.cur_resource page_name="Resource"}} -- <% else %> -- {{resource-edit resource=Pcs.resourcesContainer.cur_fence page_name="Fence device" stonith=1}} -- <% end %> -- </div> -- </td> -- </tr> -+ <td id="node_info" colspan=2> -+ <div id="<%=@myView%>_info_div"> -+ <% if @myView == "resource" %> -+ {{resource-edit resource=Pcs.resourcesContainer.cur_resource page_name="Resource" old_pcsd=Pcs.resourcesContainer.is_version_1}} -+ <% else %> -+ {{resource-edit resource=Pcs.resourcesContainer.cur_fence page_name="Fence device" stonith=1 old_pcsd=Pcs.resourcesContainer.is_version_1}} -+ <% end %> -+ </div> -+ </td> -+ </tr> - <% if @myView == "resource" %> - </div> - </table> -diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb -index 3c1e0cd..bb4e989 100644 ---- a/pcsd/views/main.erb -+++ b/pcsd/views/main.erb -@@ -197,6 +197,7 @@ - <td class="bold" nowrap>Current Location:</td> - <td id="cur_res_loc" class="reg">{{resource.nodes_running_on_string}}</td> - </tr> -+ {{#unless old_pcsd}} - {{#unless resource.parent}} - <tr> - <td class="bold" nowrap>Clone:</td> -@@ -226,8 +227,10 @@ - </tr> - {{/if}} - {{/unless}} -+ {{/unless}} - {{/if}} - {{/unless}} -+ {{#unless old_pcsd}} - {{#if resource.is_group}} - {{#unless resource.parent}} - <tr> -@@ -258,6 +261,7 @@ - </td> - </tr> - {{/if}} -+ {{/unless}} - </table> - {{#unless resource.stonith}} - {{location_constraints-table constraints=resource.location_constraints}} --- -1.9.1 - diff --git a/SOURCES/bz1158566-02-fix-loading-cluster-status-for-web-UI.patch b/SOURCES/bz1158566-02-fix-loading-cluster-status-for-web-UI.patch deleted file mode 100644 index f901308..0000000 --- a/SOURCES/bz1158566-02-fix-loading-cluster-status-for-web-UI.patch +++ /dev/null @@ -1,143 +0,0 @@ -From f55ca2f12c4552fcd516737fa797cf806aa70705 Mon Sep 17 00:00:00 2001 -From: Ondrej Mular <omular@redhat.com> -Date: Thu, 3 Sep 2015 12:29:37 +0200 -Subject: [PATCH] fix loading cluster status for web UI - ---- - pcs/status.py | 37 ++++++++++++++++++++++++++++++++++--- - pcsd/cluster_entity.rb | 25 ++++++++++++++++++++++--- - pcsd/pcs.rb | 3 +++ - 3 files changed, 59 insertions(+), 6 deletions(-) - -diff --git a/pcs/status.py b/pcs/status.py -index eb2a5eb..34354ef 100644 ---- a/pcs/status.py -+++ b/pcs/status.py -@@ -123,14 +123,28 @@ def nodes_status(argv): - onlinenodes = [] - offlinenodes = [] - standbynodes = [] -+ remote_onlinenodes = [] -+ remote_offlinenodes = [] -+ remote_standbynodes = [] - for node in nodes[0].getElementsByTagName("node"): -+ node_name = node.getAttribute("name") -+ node_remote = node.getAttribute("type") == "remote" - if node.getAttribute("online") == "true": - if node.getAttribute("standby") == "true": -- standbynodes.append(node.getAttribute("name")) -+ if node_remote: -+ remote_standbynodes.append(node_name) -+ else: -+ standbynodes.append(node_name) - else: -- onlinenodes.append(node.getAttribute("name")) -+ if node_remote: -+ remote_onlinenodes.append(node_name) -+ else: -+ onlinenodes.append(node_name) - else: -- offlinenodes.append(node.getAttribute("name")) -+ if node_remote: -+ remote_offlinenodes.append(node_name) -+ else: -+ offlinenodes.append(node_name) - - print "Pacemaker Nodes:" - -@@ -149,6 +163,23 @@ def nodes_status(argv): - print node, - print "" - -+ print "Pacemaker Remote Nodes:" -+ -+ print " Online:", -+ for node in remote_onlinenodes: -+ print node, -+ print "" -+ -+ print " Standby:", -+ for node in remote_standbynodes: -+ print node, -+ print "" -+ -+ print " Offline:", -+ for node in remote_offlinenodes: -+ print node, -+ print "" -+ - # TODO: Remove, currently unused, we use status from the resource.py - def resources_status(argv): - info_dom = utils.getClusterState() -diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb -index 78bc5ab..4f751b8 100644 ---- a/pcsd/cluster_entity.rb -+++ b/pcsd/cluster_entity.rb -@@ -533,7 +533,8 @@ module ClusterEntity - @operations = [] - failed_ops = [] - message_list = [] -- cib_dom.elements.each("//lrm_resource[@id='#{@id}']/lrm_rsc_op") { |e| -+ cib_dom.elements.each("//lrm_resource[@id='#{@id}']/lrm_rsc_op | "\ -+ + "//lrm_resource[starts-with(@id, \"#{@id}:\")]/lrm_rsc_op") { |e| - operation = ResourceOperation.new(e) - @operations << operation - if operation.rc_code != 0 -@@ -819,13 +820,15 @@ module ClusterEntity - primitive_list = @member.members - end - @masters, @slaves = get_masters_slaves(primitive_list) -- if @masters.empty? -- @error_list << { -+ if @masters.empty? and !disabled? -+ @status = ClusterEntity::ResourceStatus.new(:partially_running) -+ @warning_list << { - :message => 'Resource is master/slave but has not been promoted '\ - + 'to master on any node.', - :type => 'no_master' - } - end -+ @status = @member.status if @status < @member.status - end - end - -@@ -851,6 +854,22 @@ module ClusterEntity - end - end - -+ def update_status -+ if @member -+ @member.update_status -+ if @member.instance_of?(Primitive) -+ primitive_list = [@member] -+ else -+ primitive_list = @member.members -+ end -+ @masters, @slaves = get_masters_slaves(primitive_list) -+ if @masters.empty? and !disabled? -+ @status = ClusterEntity::ResourceStatus.new(:partially_running) -+ end -+ @status = @member.status if @status < @member.status -+ end -+ end -+ - private - def get_masters_slaves(primitive_list) - masters = [] -diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb -index cc5b038..87404ac 100644 ---- a/pcsd/pcs.rb -+++ b/pcsd/pcs.rb -@@ -568,6 +568,9 @@ def get_nodes_status() - if l.start_with?("Pacemaker Nodes:") - in_pacemaker = true - end -+ if l.start_with?("Pacemaker Remote Nodes:") -+ break -+ end - if l.end_with?(":") - next - end --- -1.9.1 - diff --git a/SOURCES/bz1158566-03-web-UI-multiple-fixes-in-the-dashboard.patch b/SOURCES/bz1158566-03-web-UI-multiple-fixes-in-the-dashboard.patch deleted file mode 100644 index 56bac08..0000000 --- a/SOURCES/bz1158566-03-web-UI-multiple-fixes-in-the-dashboard.patch +++ /dev/null @@ -1,1223 +0,0 @@ -From 9830bad113bf07fb65af18e2f2423c27da0180c0 Mon Sep 17 00:00:00 2001 -From: Ondrej Mular <omular@redhat.com> -Date: Tue, 8 Sep 2015 12:46:50 +0200 -Subject: [PATCH] web UI: multiple fixes in the dashboard - -- fix no quorum message -- fix status inconsistency of offline cluster -- fix status icons -- cluster status is 'failed' if there is resource with status 'blocked' -- fix random unselecting of current cluster -- performance improvements in loading cluster status -- removed icon that indicates issue in cluster -- changed status detection of resources ---- - pcsd/cluster_entity.rb | 150 +++++++++++++++-------- - pcsd/pcs.rb | 231 +++++++++++++++++------------------ - pcsd/public/js/nodes-ember.js | 122 +++++++++---------- - pcsd/public/js/pcsd.js | 24 +++- - pcsd/test/test_all_suite.rb | 1 + - pcsd/test/test_cluster_entity.rb | 126 +++++++++++++++---- - pcsd/test/test_pcs.rb | 257 +++++++++++++++++++++++++++++++++++++++ - pcsd/views/_cluster_list.erb | 6 +- - pcsd/views/main.erb | 2 +- - pcsd/views/manage.erb | 243 ++++++++++++++++++------------------ - 10 files changed, 779 insertions(+), 383 deletions(-) - create mode 100644 pcsd/test/test_pcs.rb - -diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb -index 4f751b8..b5d2719 100644 ---- a/pcsd/cluster_entity.rb -+++ b/pcsd/cluster_entity.rb -@@ -3,6 +3,34 @@ require 'pcs.rb' - - module ClusterEntity - -+ def self.get_rsc_status(crm_dom) -+ unless crm_dom -+ return {} -+ end -+ status = {} -+ crm_dom.elements.each('/crm_mon/resources//resource') { |e| -+ rsc_id = e.attributes['id'].split(':')[0] -+ status[rsc_id] ||= [] -+ status[rsc_id] << ClusterEntity::CRMResourceStatus.new(e) -+ } -+ return status -+ end -+ -+ def self.get_resources_operations(cib_dom) -+ unless cib_dom -+ return {} -+ end -+ operations = {} -+ cib_dom.elements.each( -+ '/cib/status/node_state/lrm/lrm_resources/lrm_resource/lrm_rsc_op' -+ ) { |e| -+ rsc_id = e.parent.attributes['id'].split(':')[0] -+ operations[rsc_id] ||= [] -+ operations[rsc_id] << ClusterEntity::ResourceOperation.new(e) -+ } -+ return operations -+ end -+ - def self.obj_to_hash(obj, variables=nil) - unless variables - variables = obj.instance_variables -@@ -454,8 +482,9 @@ module ClusterEntity - attr_accessor :agentname, :_class, :provider, :type, :stonith, - :instance_attr, :crm_status, :operations - -- def initialize(primitive_cib_element=nil, crm_dom=nil, parent=nil, cib_dom=nil) -- super(primitive_cib_element, crm_dom, parent) -+ def initialize(primitive_cib_element=nil, rsc_status=nil, parent=nil, -+ operations=nil) -+ super(primitive_cib_element, nil, parent) - @class_type = 'primitive' - @agentname = nil - @_class = nil -@@ -482,18 +511,12 @@ module ClusterEntity - ) - } - @stonith = @_class == 'stonith' -- if @id and crm_dom -- crm_dom.elements.each("//resource[starts-with(@id, \"#{@id}:\")] | "\ -- + "//resource[@id=\"#{@id}\"]") { |e| -- @crm_status << CRMResourceStatus.new(e) -- } -+ if @id and rsc_status -+ @crm_status = rsc_status[@id] || [] - end - - @status = get_status -- -- if cib_dom -- load_operations(cib_dom) -- end -+ load_operations(operations) - end - end - -@@ -525,28 +548,26 @@ module ClusterEntity - return status - end - -- def load_operations(cib_dom) -- unless @id -+ def load_operations(operations) -+ @operations = [] -+ unless operations and @id and operations[@id] - return - end - -- @operations = [] - failed_ops = [] - message_list = [] -- cib_dom.elements.each("//lrm_resource[@id='#{@id}']/lrm_rsc_op | "\ -- + "//lrm_resource[starts-with(@id, \"#{@id}:\")]/lrm_rsc_op") { |e| -- operation = ResourceOperation.new(e) -- @operations << operation -- if operation.rc_code != 0 -+ operations[@id].each { |o| -+ @operations << o -+ if o.rc_code != 0 - # 7 == OCF_NOT_RUNNING == The resource is safely stopped. -- next if operation.operation == 'monitor' and operation.rc_code == 7 -+ next if o.operation == 'monitor' and o.rc_code == 7 - # 8 == OCF_RUNNING_MASTER == The resource is running in master mode. -- next if 8 == operation.rc_code -- failed_ops << operation -- message = "Failed to #{operation.operation} #{@id}" -- message += " on #{Time.at(operation.last_rc_change).asctime}" -- message += " on node #{operation.on_node}" if operation.on_node -- message += ": #{operation.exit_reason}" if operation.exit_reason -+ next if 8 == o.rc_code -+ failed_ops << o -+ message = "Failed to #{o.operation} #{@id}" -+ message += " on #{Time.at(o.last_rc_change).asctime}" -+ message += " on node #{o.on_node}" if o.on_node -+ message += ": #{o.exit_reason}" if o.exit_reason - message_list << { - :message => message - } -@@ -652,26 +673,48 @@ module ClusterEntity - class Group < Resource - attr_accessor :members - -- def initialize(group_cib_element=nil, crm_dom=nil, parent=nil, cib_dom=nil) -- super(group_cib_element, crm_dom, parent) -+ def initialize( -+ group_cib_element=nil, rsc_status=nil, parent=nil, operations=nil -+ ) -+ super(group_cib_element, nil, parent) - @class_type = 'group' - @members = [] - if group_cib_element and group_cib_element.name == 'group' - @status = ClusterEntity::ResourceStatus.new(:running) - group_cib_element.elements.each('primitive') { |e| -- p = Primitive.new(e, crm_dom, self, cib_dom) -+ p = Primitive.new(e, rsc_status, self, operations) - members << p -- @status = p.status if @status < p.status - } -+ update_status - end - end - - def update_status - @status = ClusterEntity::ResourceStatus.new(:running) -+ first = true - @members.each { |p| - p.update_status -- @status = p.status if @status < p.status -+ if first -+ first = false -+ next -+ end -+ if ( -+ p.status == ClusterEntity::ResourceStatus.new(:disabled) or -+ p.status == ClusterEntity::ResourceStatus.new(:blocked) or -+ p.status == ClusterEntity::ResourceStatus.new(:failed) -+ ) -+ @status = ClusterEntity::ResourceStatus.new(:partially_running) -+ end - } -+ if (@members and @members.length > 0 and -+ (ClusterEntity::ResourceStatus.new(:running) != @members[0].status and -+ ClusterEntity::ResourceStatus.new(:unknown) != @members[0].status) -+ ) -+ @status = @members[0].status -+ end -+ if disabled? -+ @status = ClusterEntity::ResourceStatus.new(:disabled) -+ end - end - - def to_status(version='1') -@@ -713,8 +756,9 @@ module ClusterEntity - class MultiInstance < Resource - attr_accessor :member, :unique, :managed, :failed, :failure_ignored - -- def initialize(resource_cib_element=nil, crm_dom=nil, parent=nil, cib_dom=nil) -- super(resource_cib_element, crm_dom, parent) -+ def initialize(resource_cib_element=nil, crm_dom=nil, rsc_status=nil, -+ parent=nil, operations=nil) -+ super(resource_cib_element, nil, parent) - @member = nil - @multi_state = false - @unique = false -@@ -730,15 +774,13 @@ module ClusterEntity - ) - member = resource_cib_element.elements['group | primitive'] - if member and member.name == 'group' -- @member = Group.new(member, crm_dom, self, cib_dom) -+ @member = Group.new(member, rsc_status, self, operations) - elsif member and member.name == 'primitive' -- @member = Primitive.new(member, crm_dom, self, cib_dom) -- end -- if @member -- @status = @member.status -+ @member = Primitive.new(member, rsc_status, self, operations) - end -+ update_status - if crm_dom -- status = crm_dom.elements["//clone[@id='#{@id}']"] -+ status = crm_dom.elements["/crm_mon/resources//clone[@id='#{@id}']"] - if status - @unique = status.attributes['unique'] == 'true' - @managed = status.attributes['managed'] == 'true' -@@ -754,6 +796,9 @@ module ClusterEntity - @member.update_status - @status = @member.status - end -+ if disabled? -+ @status = ClusterEntity::ResourceStatus.new(:disabled) -+ end - end - - def to_status(version='1') -@@ -776,8 +821,11 @@ module ClusterEntity - - class Clone < MultiInstance - -- def initialize(resource_cib_element=nil, crm_dom=nil, parent=nil, cib_dom=nil) -- super(resource_cib_element, crm_dom, parent, cib_dom) -+ def initialize( -+ resource_cib_element=nil, crm_dom=nil, rsc_status=nil, parent=nil, -+ operations=nil -+ ) -+ super(resource_cib_element, crm_dom, rsc_status, parent, operations) - @class_type = 'clone' - end - -@@ -808,11 +856,12 @@ module ClusterEntity - class MasterSlave < MultiInstance - attr_accessor :masters, :slaves - -- def initialize(master_cib_element=nil, crm_dom=nil, parent=nil, cib_dom=nil) -- super(master_cib_element, crm_dom, parent, cib_dom) -+ def initialize(master_cib_element=nil, crm_dom=nil, rsc_status=nil, parent=nil, operations=nil) -+ super(master_cib_element, crm_dom, rsc_status, parent, operations) - @class_type = 'master' - @masters = [] - @slaves = [] -+ update_status - if @member - if @member.instance_of?(Primitive) - primitive_list = [@member] -@@ -820,15 +869,15 @@ module ClusterEntity - primitive_list = @member.members - end - @masters, @slaves = get_masters_slaves(primitive_list) -- if @masters.empty? and !disabled? -- @status = ClusterEntity::ResourceStatus.new(:partially_running) -+ if (@masters.empty? and -+ @status != ClusterEntity::ResourceStatus.new(:disabled) -+ ) - @warning_list << { - :message => 'Resource is master/slave but has not been promoted '\ - + 'to master on any node.', - :type => 'no_master' - } - end -- @status = @member.status if @status < @member.status - end - end - -@@ -857,16 +906,21 @@ module ClusterEntity - def update_status - if @member - @member.update_status -+ @status = @member.status - if @member.instance_of?(Primitive) - primitive_list = [@member] - else - primitive_list = @member.members - end - @masters, @slaves = get_masters_slaves(primitive_list) -- if @masters.empty? and !disabled? -+ if (@masters.empty? and -+ @member.status != ClusterEntity::ResourceStatus.new(:disabled) -+ ) - @status = ClusterEntity::ResourceStatus.new(:partially_running) - end -- @status = @member.status if @status < @member.status -+ end -+ if disabled? -+ @status = ClusterEntity::ResourceStatus.new(:disabled) - end - end - -diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb -index 87404ac..9a0d145 100644 ---- a/pcsd/pcs.rb -+++ b/pcsd/pcs.rb -@@ -15,14 +15,14 @@ require 'resource.rb' - require 'cluster_entity.rb' - require 'auth.rb' - --def getAllSettings(session) -- stdout, stderr, retval = run_cmd(session, PCS, "property") -- stdout.map(&:chomp!) -- stdout.map(&:strip!) -+def getAllSettings(session, cib_dom=nil) -+ unless cib_dom -+ cib_dom = get_cib_dom(session) -+ end - stdout2, stderr2, retval2 = run_cmd(session, PENGINE, "metadata") - metadata = stdout2.join - ret = {} -- if retval == 0 and retval2 == 0 -+ if cib_dom and retval2 == 0 - doc = REXML::Document.new(metadata) - - default = "" -@@ -37,8 +37,9 @@ def getAllSettings(session) - ret[name] = {"value" => default, "type" => el_type} - } - -- stdout.each {|line| -- key,val = line.split(': ', 2) -+ cib_dom.elements.each('/cib/configuration/crm_config//nvpair') { |e| -+ key = e.attributes['name'] -+ val = e.attributes['value'] - key.gsub!(/-/,"_") - if ret.has_key?(key) - if ret[key]["type"] == "boolean" -@@ -723,106 +724,92 @@ def get_cluster_name() - end - end - --def get_node_attributes(session) -- stdout, stderr, retval = run_cmd(session, PCS, "property", "list") -- if retval != 0 -- return {} -- end -- -- attrs = {} -- found = false -- stdout.each { |line| -- if not found -- if line.strip.start_with?("Node Attributes:") -- found = true -- end -- next -- end -- if not line.start_with?(" ") -- break -- end -- sline = line.split(":", 2) -- nodename = sline[0].strip -- attrs[nodename] = [] -- sline[1].strip.split(" ").each { |attr| -- key, val = attr.split("=", 2) -- attrs[nodename] << {:key => key, :value => val} -+def get_node_attributes(session, cib_dom=nil) -+ unless cib_dom -+ cib_dom = get_cib_dom(session) -+ return {} unless cib_dom -+ end -+ node_attrs = {} -+ cib_dom.elements.each( -+ '/cib/configuration/nodes/node/instance_attributes/nvpair' -+ ) { |e| -+ node = e.parent.parent.attributes['uname'] -+ node_attrs[node] ||= [] -+ node_attrs[node] << { -+ :id => e.attributes['id'], -+ :key => e.attributes['name'], -+ :value => e.attributes['value'] - } - } -- return attrs -+ node_attrs.each { |_, val| val.sort_by! { |obj| obj[:key] }} -+ return node_attrs - end - --def get_fence_levels(session) -- stdout, stderr, retval = run_cmd(session, PCS, "stonith", "level") -- if retval != 0 or stdout == "" -- return {} -+def get_fence_levels(session, cib_dom=nil) -+ unless cib_dom -+ cib_dom = get_cib_dom(session) -+ return {} unless cib_dom - end - - fence_levels = {} -- node = "" -- stdout.each {|line| -- if line.start_with?(" Node: ") -- node = line.split(":",2)[1].strip -- next -- end -- fence_levels[node] ||= [] -- md = / Level (\S+) - (.*)$/.match(line) -- fence_levels[node] << {"level" => md[1], "devices" => md[2]} -+ cib_dom.elements.each( -+ '/cib/configuration/fencing-topology/fencing-level' -+ ) { |e| -+ target = e.attributes['target'] -+ fence_levels[target] ||= [] -+ fence_levels[target] << { -+ 'level' => e.attributes['index'], -+ 'devices' => e.attributes['devices'] -+ } - } -+ fence_levels.each { |_, val| val.sort_by! { |obj| obj['level'].to_i }} - return fence_levels - end - --def get_acls(session) -- stdout, stderr, retval = run_cmd(session, PCS, "acl", "show") -- if retval != 0 or stdout == "" -- return {} -+def get_acls(session, cib_dom=nil) -+ unless cib_dom -+ cib_dom = get_cib_dom(session) -+ return {} unless cib_dom - end - -- ret_val = {} -- state = nil -- user = "" -- role = "" -- -- stdout.each do |line| -- if m = /^User: (.*)$/.match(line) -- user = m[1] -- state = "user" -- ret_val[state] ||= {} -- ret_val[state][user] ||= [] -- next -- elsif m = /^Group: (.*)$/.match(line) -- user = m[1] -- state = "group" -- ret_val[state] ||= {} -- ret_val[state][user] ||= [] -- next -- elsif m = /^Role: (.*)$/.match(line) -- role = m[1] -- state = "role" -- ret_val[state] ||= {} -- ret_val[state][role] ||= {} -- next -- end -+ acls = { -+ 'role' => {}, -+ 'group' => {}, -+ 'user' => {}, -+ 'target' => {} -+ } - -- case state -- when "user", "group" -- m = /^ Roles: (.*)$/.match(line) -- ret_val[state][user] ||= [] -- m[1].scan(/\S+/).each {|urole| -- ret_val[state][user] << urole -+ cib_dom.elements.each('/cib/configuration/acls/*') { |e| -+ type = e.name[4..-1] -+ if e.name == 'acl_role' -+ role_id = e.attributes['id'] -+ desc = e.attributes['description'] -+ acls[type][role_id] = {} -+ acls[type][role_id]['description'] = desc ? desc : '' -+ acls[type][role_id]['permissions'] = [] -+ e.elements.each('acl_permission') { |p| -+ p_id = p.attributes['id'] -+ p_kind = p.attributes['kind'] -+ val = '' -+ if p.attributes['xpath'] -+ val = "xpath #{p.attributes['xpath']}" -+ elsif p.attributes['reference'] -+ val = "id #{p.attributes['reference']}" -+ else -+ next -+ end -+ acls[type][role_id]['permissions'] << "#{p_kind} #{val} (#{p_id})" -+ } -+ elsif ['acl_target', 'acl_group'].include?(e.name) -+ id = e.attributes['id'] -+ acls[type][id] = [] -+ e.elements.each('role') { |r| -+ acls[type][id] << r.attributes['id'] - } -- when "role" -- ret_val[state][role] ||= {} -- ret_val[state][role]["permissions"] ||= [] -- ret_val[state][role]["description"] ||= "" -- if m = /^ Description: (.*)$/.match(line) -- ret_val[state][role]["description"] = m[1] -- elsif m = /^ Permission: (.*)$/.match(line) -- ret_val[state][role]["permissions"] << m[1] -- end - end -- end -- return ret_val -+ } -+ acls['user'] = acls['target'] -+ return acls - end - - def enable_cluster(session) -@@ -1438,7 +1425,7 @@ def cluster_status_from_nodes(session, cluster_nodes, cluster_name) - {:version=>'2', :operations=>'1'}, - true, - nil, -- 6 -+ 15 - ) - node_map[node] = {} - node_map[node].update(overview) -@@ -1601,10 +1588,10 @@ def cluster_status_from_nodes(session, cluster_nodes, cluster_name) - } - if status[:status] != 'error' - status[:resource_list].each { |resource| -- if resource[:status] == 'failed' -+ if ['failed', 'blocked'].include?(resource[:status]) - status[:status] = 'error' - break -- elsif ['blocked', 'partially running'].include?(resource[:status]) -+ elsif ['partially running'].include?(resource[:status]) - status[:status] = 'warning' - end - } -@@ -1634,10 +1621,11 @@ def get_node_status(session, cib_dom) - :cluster_settings => {}, - :need_ring1_address => need_ring1_address?, - :is_cman_with_udpu_transport => is_cman_with_udpu_transport?, -- :acls => get_acls(session), -+ :acls => get_acls(session, cib_dom), - :username => session[:username], -- :fence_levels => get_fence_levels(session), -- :node_attr => node_attrs_to_v2(get_node_attributes(session)) -+ :fence_levels => get_fence_levels(session, cib_dom), -+ :node_attr => node_attrs_to_v2(get_node_attributes(session, cib_dom)), -+ :known_nodes => [] - } - - nodes = get_nodes_status() -@@ -1654,10 +1642,10 @@ def get_node_status(session, cib_dom) - - if cib_dom - node_status[:groups] = get_resource_groups(cib_dom) -- node_status[:constraints] = getAllConstraints(cib_dom.elements['//constraints']) -+ node_status[:constraints] = getAllConstraints(cib_dom.elements['/cib/configuration/constraints']) - end - -- cluster_settings = getAllSettings(session) -+ cluster_settings = getAllSettings(session, cib_dom) - if not cluster_settings.has_key?('error') - node_status[:cluster_settings] = cluster_settings - end -@@ -1670,7 +1658,7 @@ def get_resource_groups(cib_dom) - return [] - end - group_list = [] -- cib_dom.elements.each('cib/configuration/resources//group') do |e| -+ cib_dom.elements.each('/cib/configuration/resources//group') do |e| - group_list << e.attributes['id'] - end - return group_list -@@ -1682,49 +1670,54 @@ def get_resources(cib_dom, crm_dom=nil, get_operations=false) - end - - resource_list = [] -- cib = (get_operations) ? cib_dom : nil -+ operations = (get_operations) ? ClusterEntity::get_resources_operations(cib_dom) : nil -+ rsc_status = ClusterEntity::get_rsc_status(crm_dom) - -- cib_dom.elements.each('cib/configuration/resources/primitive') do |e| -- resource_list << ClusterEntity::Primitive.new(e, crm_dom, nil, cib) -+ cib_dom.elements.each('/cib/configuration/resources/primitive') do |e| -+ resource_list << ClusterEntity::Primitive.new(e, rsc_status, nil, operations) - end -- cib_dom.elements.each('cib/configuration/resources/group') do |e| -- resource_list << ClusterEntity::Group.new(e, crm_dom, nil, cib) -+ cib_dom.elements.each('/cib/configuration/resources/group') do |e| -+ resource_list << ClusterEntity::Group.new(e, rsc_status, nil, operations) - end -- cib_dom.elements.each('cib/configuration/resources/clone') do |e| -- resource_list << ClusterEntity::Clone.new(e, crm_dom, nil, cib) -+ cib_dom.elements.each('/cib/configuration/resources/clone') do |e| -+ resource_list << ClusterEntity::Clone.new( -+ e, crm_dom, rsc_status, nil, operations -+ ) - end -- cib_dom.elements.each('cib/configuration/resources/master') do |e| -- resource_list << ClusterEntity::MasterSlave.new(e, crm_dom, nil, cib) -+ cib_dom.elements.each('/cib/configuration/resources/master') do |e| -+ resource_list << ClusterEntity::MasterSlave.new( -+ e, crm_dom, rsc_status, nil, operations -+ ) - end - return resource_list - end - --def get_resource_by_id(id, cib_dom, crm_dom=nil, get_operations=false) -+def get_resource_by_id(id, cib_dom, crm_dom=nil, rsc_status=nil, operations=false) - unless cib_dom - return nil - end - -- e = cib_dom.elements["cib/configuration/resources//*[@id='#{id}']"] -+ e = cib_dom.elements["/cib/configuration/resources//*[@id='#{id}']"] - unless e - return nil - end - - if e.parent.name != 'resources' # if resource is in group, clone or master/slave -- p = get_resource_by_id(e.parent.attributes['id'], cib_dom, crm_dom, get_operations) -+ p = get_resource_by_id( -+ e.parent.attributes['id'], cib_dom, crm_dom, rsc_status, operations -+ ) - return p.get_map[id.to_sym] - end - -- cib = (get_operations) ? cib_dom : nil -- - case e.name - when 'primitive' -- return ClusterEntity::Primitive.new(e, crm_dom, nil, cib) -+ return ClusterEntity::Primitive.new(e, rsc_status, nil, operations) - when 'group' -- return ClusterEntity::Group.new(e, crm_dom, nil, cib) -+ return ClusterEntity::Group.new(e, rsc_status, nil, operations) - when 'clone' -- return ClusterEntity::Clone.new(e, crm_dom, nil, cib) -+ return ClusterEntity::Clone.new(e, crm_dom, rsc_status, nil, operations) - when 'master' -- return ClusterEntity::MasterSlave.new(e, crm_dom, nil, cib) -+ return ClusterEntity::MasterSlave.new(e, crm_dom, rsc_status, nil, operations) - else - return nil - end -@@ -1762,7 +1755,7 @@ def node_attrs_to_v2(node_attrs) - all_nodes_attr[node] = [] - attrs.each { |attr| - all_nodes_attr[node] << { -- :id => nil, -+ :id => attr[:id], - :name => attr[:key], - :value => attr[:value] - } -diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js -index 5fec386..bbeed55 100644 ---- a/pcsd/public/js/nodes-ember.js -+++ b/pcsd/public/js/nodes-ember.js -@@ -75,9 +75,9 @@ Pcs = Ember.Application.createWithMixins({ - timeout: 20000, - success: function(data) { - Pcs.clusterController.update(data); -- Ember.run.next(function() { -- correct_visibility_dashboard(Pcs.clusterController.cur_cluster); -- }); -+ if (Pcs.clusterController.get('cur_cluster')) { -+ Pcs.clusterController.update_cur_cluster(Pcs.clusterController.get('cur_cluster').get('name')); -+ } - if (data["not_current_data"]) { - self.update(); - } -@@ -595,30 +595,20 @@ Pcs.ResourceObj = Ember.Object.extend({ - }.property("class_type"), - res_type: Ember.computed.alias('resource_type'), - status_icon: function() { -- var icon_class; -- switch (this.get('status')) { -- case "running": -- icon_class = "check"; -- break; -- case "disabled": -- case "partially running": -- icon_class = "warning"; -- break; -- case "failed": -- case "blocked": -- icon_class = "error"; -- break; -- default: -- icon_class = "x"; -- } -+ var icon_class = get_status_icon_class(this.get("status_val")); - return "<div style=\"float:left;margin-right:6px;height:16px;\" class=\"" + icon_class + " sprites\"></div>"; - }.property("status_val"), - status_val: function() { -- if (this.get('warning_list').length) -- return get_status_value("warning"); -+ var status_val = get_status_value(this.get('status')); -+ if (this.get('warning_list').length && status_val != get_status_value('disabled')) -+ status_val = get_status_value("warning"); - if (this.get('error_list').length) -- return get_status_value("error"); -- return get_status_value(this.status); -+ status_val = get_status_value("error"); -+ if ((get_status_value(this.get('status')) - status_val) < 0) { -+ return get_status_value(this.get('status')); -+ } else { -+ return status_val; -+ } - }.property('status', 'error_list.@each.message', 'warning_list.@each.message'), - status_color: function() { - return get_status_color(this.get("status_val")); -@@ -996,12 +986,17 @@ Pcs.Clusternode = Ember.Object.extend({ - return this.get('status') == "unknown"; - }.property("status"), - status_val: function() { -- if (this.warnings && this.warnings.length) -- return get_status_value("warning"); -- if (this.errors && this.errors.length) -- return get_status_value("error"); -- return get_status_value(this.status); -- }.property("status"), -+ var status_val = get_status_value(this.get('status')); -+ if (this.get('warning_list').length) -+ status_val = get_status_value("warning"); -+ if (this.get('error_list').length) -+ status_val = get_status_value("error"); -+ if ((get_status_value(this.get('status')) - status_val) < 0) { -+ return get_status_value(this.get('status')); -+ } else { -+ return status_val; -+ } -+ }.property('status', 'error_list.@each.message', 'warning_list.@each.message'), - status_style: function() { - var color = get_status_color(this.get("status_val")); - return "color: " + color + ((color != "green")? "; font-weight: bold;" : ""); -@@ -1011,8 +1006,8 @@ Pcs.Clusternode = Ember.Object.extend({ - return ((this.get("status_val") == get_status_value("ok") || this.status == "standby") ? show + "default-hidden" : ""); - }.property("status_val"), - status_icon: function() { -- var icon_class = {"-1": "x", 1: "error", 2: "warning", 3: "x", 4: "check"}; -- return "<div style=\"float:left;margin-right:6px;\" class=\"" + icon_class[this.get("status_val")] + " sprites\"></div>"; -+ var icon_class = get_status_icon_class(this.get("status_val")); -+ return "<div style=\"float:left;margin-right:6px;\" class=\"" + icon_class + " sprites\"></div>"; - }.property("status_val"), - error_list: [], - warning_list: [], -@@ -1158,18 +1153,18 @@ Pcs.Cluster = Ember.Object.extend({ - return out; - }.property("error_list"), - status_icon: function() { -- var icon_class = {"-1": "x", 1: "error", 2: "warning", 3: "x", 4: "check"}; -- return "<div style=\"float:left;margin-right:6px;\" class=\"" + icon_class[get_status_value(this.status)] + " sprites\"></div>"; -+ var icon_class = get_status_icon_class(get_status_value(this.get('status'))); -+ return "<div style=\"float:left;margin-right:6px;\" class=\"" + icon_class + " sprites\"></div>"; - }.property("status"), - quorum_show: function() { -- if (this.status == "unknown") { -+ if (this.get('status') == "unknown") { - return "<span style='color:orange'>(quorate unknown)</span>" -- } else if (!this.quorate) { -+ } else if (!this.get('quorate')) { - return "<span style='color: red'>(doesn't have quorum)</span>" - } else { - return "" - } -- }.property("status", "quorum"), -+ }.property("status", "quorate"), - nodes: [], - nodes_failed: 0, - resource_list: [], -@@ -1270,7 +1265,7 @@ Pcs.Cluster = Ember.Object.extend({ - - Pcs.clusterController = Ember.Object.create({ - cluster_list: Ember.ArrayController.create({ -- content: Ember.A(), sortProperties: ['status'], -+ content: Ember.A(), sortProperties: ['status', 'name'], - sortAscending: true, - sortFunction: function(a,b){return status_comparator(a,b);} - }), -@@ -1283,26 +1278,25 @@ Pcs.clusterController = Ember.Object.create({ - num_warning: 0, - num_unknown: 0, - -- update_cur_cluster: function(row) { -+ update_cur_cluster: function(cluster_name) { - var self = this; -- var cluster_name = $(row).attr("nodeID"); -- $("#clusters_list").find("div.arrow").hide(); -- $(row).find("div.arrow").show(); -+ $("#clusters_list div.arrow").hide(); -+ var selected_cluster = null; - - $.each(self.get('cluster_list').get('content'), function(key, cluster) { - if (cluster.get("name") == cluster_name) { -- self.set('cur_cluster', cluster); -+ selected_cluster = cluster; - return false; - } - }); -- correct_visibility_dashboard(self.get('cur_cluster')); - -- $("#node_sub_info").children().each(function (i, val) { -- if ($(val).attr("id") == ("cluster_info_" + cluster_name)) -- $(val).show(); -- else -- $(val).hide(); -- }); -+ self.set('cur_cluster', selected_cluster); -+ if (selected_cluster) { -+ Ember.run.next(function() { -+ $("#clusters_list tr[nodeID=" + cluster_name + "] div.arrow").show(); -+ correct_visibility_dashboard(self.get('cur_cluster')); -+ }); -+ } - }, - - update: function(data) { -@@ -1355,21 +1349,6 @@ Pcs.clusterController = Ember.Object.create({ - }); - } - -- switch (cluster.get('status')) { -- case "ok": -- self.incrementProperty('num_ok'); -- break; -- case "error": -- self.incrementProperty('num_error'); -- break; -- case "warning": -- self.incrementProperty('num_warning'); -- break; -- default: -- self.incrementProperty('num_unknown'); -- break; -- } -- - var nodes_to_auth = []; - $.each(cluster.get('warning_list'), function(key, val){ - if (val.hasOwnProperty("type") && val.type == "nodes_not_authorized"){ -@@ -1398,6 +1377,21 @@ Pcs.clusterController = Ember.Object.create({ - - cluster.set("status", "unknown"); - } -+ -+ switch (get_status_value(cluster.get('status'))) { -+ case get_status_value("ok"): -+ self.incrementProperty('num_ok'); -+ break; -+ case get_status_value("error"): -+ self.incrementProperty('num_error'); -+ break; -+ case get_status_value("warning"): -+ self.incrementProperty('num_warning'); -+ break; -+ default: -+ self.incrementProperty('num_unknown'); -+ break; -+ } - }); - - var to_remove = []; -diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js -index e4830a9..cddf14e 100644 ---- a/pcsd/public/js/pcsd.js -+++ b/pcsd/public/js/pcsd.js -@@ -1850,10 +1850,10 @@ function get_status_value(status) { - standby: 2, - "partially running": 2, - disabled: 3, -- unknown: 3, -- ok: 4, -- running: 4, -- online: 4 -+ unknown: 4, -+ ok: 5, -+ running: 5, -+ online: 5 - }; - return ((values.hasOwnProperty(status)) ? values[status] : -1); - } -@@ -1866,11 +1866,25 @@ function status_comparator(a,b) { - return valA - valB; - } - -+function get_status_icon_class(status_val) { -+ switch (status_val) { -+ case get_status_value("error"): -+ return "error"; -+ case get_status_value("disabled"): -+ case get_status_value("warning"): -+ return "warning"; -+ case get_status_value("ok"): -+ return "check"; -+ default: -+ return "x"; -+ } -+} -+ - function get_status_color(status_val) { - if (status_val == get_status_value("ok")) { - return "green"; - } -- else if (status_val == get_status_value("warning") || status_val == get_status_value("unknown")) { -+ else if (status_val == get_status_value("warning") || status_val == get_status_value("unknown") || status_val == get_status_value('disabled')) { - return "orange"; - } - return "red"; -diff --git a/pcsd/views/_cluster_list.erb b/pcsd/views/_cluster_list.erb -index 9d719e0..90f084e 100644 ---- a/pcsd/views/_cluster_list.erb -+++ b/pcsd/views/_cluster_list.erb -@@ -22,7 +22,7 @@ - {{/if}} - </tr> - {{#each Pcs.clusterController.cluster_list }} -- <tr onmouseover="hover_over(this);" onmouseout="hover_out(this);" onclick="Pcs.clusterController.update_cur_cluster(this);" {{bind-attr nodeID="this.name"}}> -+ <tr onmouseover="hover_over(this);" onmouseout="hover_out(this);" onclick="Pcs.clusterController.update_cur_cluster($(this).attr('nodeID'));" {{bind-attr nodeID="this.name"}}> - <td class="node_list_check"> - <input class="node_list_check" type="checkbox" {{bind-attr name="input_name"}} {{bind-attr res_id="name"}}> - </td> -@@ -42,7 +42,7 @@ - {{else}} - {{nodes.length}} - {{#if nodes_failed}} -- | <div style="display: inline-block;" title="Issue(s) found"><div class="warning sprites"></div> <span style="font-weight: bold; color: red">{{nodes_failed}}</span></div> -+ | <div style="display: inline-block;" title="Issue(s) found"><span style="font-weight: bold; color: red">{{nodes_failed}}</span></div> - {{/if}} - {{/if}} - </td> -@@ -52,7 +52,7 @@ - {{else}} - {{resource_list.length}} - {{#if resources_failed}} -- | <div style="display: inline-block;" title="Issue(s) found"><div class="warning sprites"></div> <span style="font-weight: bold; color: red">{{resources_failed}}</span></div> -+ | <div style="display: inline-block;" title="Issue(s) found"><span style="font-weight: bold; color: red">{{resources_failed}}</span></div> - {{/if}} - {{/if}} - </td> -diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb -index bb4e989..b24c74a 100644 ---- a/pcsd/views/main.erb -+++ b/pcsd/views/main.erb -@@ -151,7 +151,7 @@ - <input disabled style="margin-right: 50px;" type="text" {{bind-attr value=resource._id}} size="35" class="text_field"> - </td> - <td> -- <div style="margin-right: 8px;" class="check sprites"></div> -+ {{{resource.status_icon}}} - </td> - <td nowrap>{{{resource.show_status}}}</td> - </tr> -diff --git a/pcsd/views/manage.erb b/pcsd/views/manage.erb -index 79a8637..3620779 100644 ---- a/pcsd/views/manage.erb -+++ b/pcsd/views/manage.erb -@@ -42,131 +42,132 @@ - <div id="node_info_header_title">INFORMATION ABOUT CLUSTERS</div> - </div> - <div id="node_sub_info"> -- <div id="no_cluster_selected">Select a cluster to view more detailed cluster information</div> -- {{#each Pcs.clusterController.cluster_list}} -- <div style="display:none;" {{bind-attr id=div_id}}> -- <table> -- <tr> -- <td style="text-align:right"> -- <b>Cluster:</b> -- </td> -- <td> -- {{#if forbidden}} -- {{name}} -- {{else}} -- <a {{bind-attr href=url_link}}>{{name}}</a> {{{quorum_show}}} -+ {{#if Pcs.clusterController.cur_cluster}} -+ <div {{bind-attr id=Pcs.clusterController.cur_cluster.div_id}}> -+ <table> -+ <tr> -+ <td style="text-align:right"> -+ <b>Cluster:</b> -+ </td> -+ <td> -+ {{#if Pcs.clusterController.cur_cluster.forbidden}} -+ {{Pcs.clusterController.cur_cluster.name}} -+ {{else}} -+ <a {{bind-attr href=Pcs.clusterController.cur_cluster.url_link}}>{{Pcs.clusterController.cur_cluster.name}}</a> {{{Pcs.clusterController.cur_cluster.quorum_show}}} -+ {{/if}} -+ </td> -+ </tr> -+ {{#if Pcs.clusterController.cur_cluster.error_list}} -+ <tr><td style="text-align:right"><b>Errors:</b> </td><td></td></tr> - {{/if}} -- </td> -- </tr> -- {{#if error_list}} -- <tr><td style="text-align:right"><b>Errors:</b> </td><td></td></tr> -- {{/if}} -- {{#each error_list}} -- <tr><td></td><td style="color: red;">{{{message}}}</td></tr> -- {{/each}} -- {{#if warning_list}} -- <tr><td style="text-align:right"><b>Warnings:</b> </td><td></td></tr> -- {{/if}} -- {{#each warning_list}} -- <tr><td></td><td style="color: orange;">{{{message}}}</td></tr> -- {{/each}} -- </table><br> -- {{#unless forbidden}} -- <table style="clear:left;float:left" class="nodes_list"> -- <tr> -- <td class="datatable_header hover-pointer" onclick="show_hide_dashboard(this, 'nodes');"> -- <span style="display: none;" class="downarrow sprites"></span> -- <span style="" class="rightarrow sprites"></span> -- Nodes ({{nodes.length}} | {{#if nodes_failed}}<span style="color: red">issues: {{nodes_failed}}{{else}}<span style="color: green;">OK{{/if}}</span>) -+ {{#each Pcs.clusterController.cur_cluster.error_list}} -+ <tr><td></td><td style="color: red;">{{{message}}}</td></tr> -+ {{/each}} -+ {{#if Pcs.clusterController.cur_cluster.warning_list}} -+ <tr><td style="text-align:right"><b>Warnings:</b> </td><td></td></tr> -+ {{/if}} -+ {{#each Pcs.clusterController.cur_cluster.warning_list}} -+ <tr><td></td><td style="color: orange;">{{{message}}}</td></tr> -+ {{/each}} -+ </table><br> -+ {{#unless Pcs.clusterController.cur_cluster.forbidden}} -+ <table style="clear:left;float:left" class="nodes_list"> -+ <tr> -+ <td class="datatable_header hover-pointer" onclick="show_hide_dashboard(this, 'nodes');"> -+ <span style="display: none;" class="downarrow sprites"></span> -+ <span style="" class="rightarrow sprites"></span> -+ Nodes ({{Pcs.clusterController.cur_cluster.nodes.length}} | {{#if Pcs.clusterController.cur_cluster.nodes_failed}}<span style="color: red">issues: {{Pcs.clusterController.cur_cluster.nodes_failed}}{{else}}<span style="color: green;">OK{{/if}}</span>) - <span style="font-size: 10px;">(displaying {{#if Pcs.clusterController.show_all_nodes}}all{{else}}only issues{{/if}})</span> -- </td> -- </tr> -- <tr> -- <td> -- <table class="datatable"> -- <tr> -- <th style="width: 150px;">NODE</th> -- <th style="width: 80px;">STATUS</th> -- <th style="width: 70px;">QUORUM</th> -- </tr> -- {{#each node in nodes}} -- <tr {{bind-attr title=node.tooltip}} {{bind-attr class=node.status_class}}> -- <td><a {{bind-attr href=node.url_link}}>{{node.name}}</a></td> -- <td {{bind-attr style=node.status_style}}>{{{node.status_icon}}}{{node.status}}</td> -- <td>{{{node.quorum_show}}}</td> -- </tr> -- {{/each}} -- </table> -- </td> -- </tr> -- </table> -- {{#unless status_unknown}} -- <table style="clear:left;float:left" class="resources_list"> -- <tr> -- <td class="datatable_header hover-pointer" onclick="show_hide_dashboard(this, 'resources');"> -- <span style="display: none;" class="downarrow sprites"></span> -- <span style="" class="rightarrow sprites"></span> -- Resources ({{resource_list.length}} | {{#if resources_failed}}<span style="color: red">issues: {{resources_failed}}{{else}}<span style="color: green;">OK{{/if}}</span>) -+ </td> -+ </tr> -+ <tr> -+ <td> -+ <table class="datatable"> -+ <tr> -+ <th style="width: 150px;">NODE</th> -+ <th style="width: 80px;">STATUS</th> -+ <th style="width: 70px;">QUORUM</th> -+ </tr> -+ {{#each node in Pcs.clusterController.cur_cluster.nodes}} -+ <tr {{bind-attr title=node.tooltip}} {{bind-attr class=node.status_class}}> -+ <td><a {{bind-attr href=node.url_link}}>{{node.name}}</a></td> -+ <td {{bind-attr style=node.status_style}}>{{{node.status_icon}}}{{node.status}}</td> -+ <td>{{{node.quorum_show}}}</td> -+ </tr> -+ {{/each}} -+ </table> -+ </td> -+ </tr> -+ </table> -+ {{#unless Pcs.clusterController.cur_cluster.status_unknown}} -+ <table style="clear:left;float:left" class="resources_list"> -+ <tr> -+ <td class="datatable_header hover-pointer" onclick="show_hide_dashboard(this, 'resources');"> -+ <span style="display: none;" class="downarrow sprites"></span> -+ <span style="" class="rightarrow sprites"></span> -+ Resources ({{Pcs.clusterController.cur_cluster.resource_list.length}} | {{#if Pcs.clusterController.cur_cluster.resources_failed}}<span style="color: red">issues: {{Pcs.clusterController.cur_cluster.resources_failed}}{{else}}<span style="color: green;">OK{{/if}}</span>) - <span style="font-size: 10px;">(displaying {{#if Pcs.clusterController.show_all_resources}}all{{else}}only issues{{/if}})</span> -- </td> -- </tr> -- <tr> -- <td> -- <table class="datatable"> -- <tr> -- <th style="width: 150px;">RESOURCE</th> -- <th style="width: 80px;">STATUS</th> -- </tr> -- {{#each r in resource_list}} -- <tr {{bind-attr title=r.tooltip}} {{bind-attr class=r.status_class}}> -- <td><a {{bind-attr href=r.url_link}}>{{r.id}}</a></td> -- <td {{bind-attr style=r.status_style}}>{{{r.status_icon}}}{{r.status}}</td> -- </tr> -- {{else}} -- <tr> -- <td>No resources</td> -- <td></td> -- </tr> -- {{/each}} -- </table> -- </td> -- </tr> -- </table> -- <table style="clear:left;float:left" class="fence_list"> -- <tr> -- <td class="datatable_header hover-pointer" onclick="show_hide_dashboard(this, 'fence');"> -- <span style="display: none;" class="downarrow sprites"></span> -- <span style="" class="rightarrow sprites"></span> -- Fence-devices ({{fence_list.length}} | {{#if fence_failed}}<span style="color: red">issues: {{fence_failed}}{{else}}<span style="color: green;">OK{{/if}}</span>) -+ </td> -+ </tr> -+ <tr> -+ <td> -+ <table class="datatable"> -+ <tr> -+ <th style="width: 150px;">RESOURCE</th> -+ <th style="width: 80px;">STATUS</th> -+ </tr> -+ {{#each r in Pcs.clusterController.cur_cluster.resource_list}} -+ <tr {{bind-attr title=r.tooltip}} {{bind-attr class=r.status_class}}> -+ <td><a {{bind-attr href=r.url_link}}>{{r.id}}</a></td> -+ <td {{bind-attr style=r.status_style}}>{{{r.status_icon}}}{{r.status}}</td> -+ </tr> -+ {{else}} -+ <tr> -+ <td>No resources</td> -+ <td></td> -+ </tr> -+ {{/each}} -+ </table> -+ </td> -+ </tr> -+ </table> -+ <table style="clear:left;float:left" class="fence_list"> -+ <tr> -+ <td class="datatable_header hover-pointer" onclick="show_hide_dashboard(this, 'fence');"> -+ <span style="display: none;" class="downarrow sprites"></span> -+ <span style="" class="rightarrow sprites"></span> -+ Fence-devices ({{Pcs.clusterController.cur_cluster.fence_list.length}} | {{#if Pcs.clusterController.cur_cluster.fence_failed}}<span style="color: red">issues: {{Pcs.clusterController.cur_cluster.fence_failed}}{{else}}<span style="color: green;">OK{{/if}}</span>) - <span style="font-size: 10px;">(displaying {{#if Pcs.clusterController.show_all_fence}}all{{else}}only issues{{/if}})</span> -- </td> -- </tr> -- <tr> -- <td> -- <table class="datatable"> -- <tr> -- <th style="width: 150px;">FENCE-DEVICE</th> -- <th style="width: 80px;">STATUS</th> -- </tr> -- {{#each f in fence_list}} -- <tr {{bind-attr title=f.tooltip}} {{bind-attr class=f.status_class_fence}}> -- <td><a {{bind-attr href=f.url_link}}>{{f.id}}</a></td> -- <td {{bind-attr style=f.status_style}}>{{{f.status_icon}}}{{f.status}}</td> -- </tr> -- {{else}} -- <tr> -- <td>No fence devices</td> -- <td></td> -- </tr> -- {{/each}} -- </table> -- </td> -- </tr> -- </table> -- {{/unless}} -- {{/unless}} -- </div> -- {{/each}} -+ </td> -+ </tr> -+ <tr> -+ <td> -+ <table class="datatable"> -+ <tr> -+ <th style="width: 150px;">FENCE-DEVICE</th> -+ <th style="width: 80px;">STATUS</th> -+ </tr> -+ {{#each f in Pcs.clusterController.cur_cluster.fence_list}} -+ <tr {{bind-attr title=f.tooltip}} {{bind-attr class=f.status_class_fence}}> -+ <td><a {{bind-attr href=f.url_link}}>{{f.id}}</a></td> -+ <td {{bind-attr style=f.status_style}}>{{{f.status_icon}}}{{f.status}}</td> -+ </tr> -+ {{else}} -+ <tr> -+ <td>No fence devices</td> -+ <td></td> -+ </tr> -+ {{/each}} -+ </table> -+ </td> -+ </tr> -+ </table> -+ {{/unless}} -+ {{/unless}} -+ </div> -+ {{else}} -+ <div id="no_cluster_selected">Select a cluster to view more detailed cluster information</div> -+ {{/if}} - </div> - </td> - </tr> --- -1.9.1 - diff --git a/SOURCES/bz1158569-01-fixed-a-typo-in-an-error-message.patch b/SOURCES/bz1158569-01-fixed-a-typo-in-an-error-message.patch deleted file mode 100644 index 3389bd3..0000000 --- a/SOURCES/bz1158569-01-fixed-a-typo-in-an-error-message.patch +++ /dev/null @@ -1,25 +0,0 @@ -From 1307ccbf977dd4ca797a82312631afae03530fbb Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek <tojeline@redhat.com> -Date: Tue, 8 Sep 2015 09:19:10 +0200 -Subject: [PATCH] fixed a typo in an error message - ---- - pcsd/remote.rb | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index 06947ec..8a71000 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -2127,7 +2127,7 @@ def fix_auth_of_cluster(params, request, session) - tokens_data, true - ) - if retval == 404 -- return [400, "Old version of PCS/PCSD is runnig on cluster nodes. Fixing authentication is not supported. Use 'pcs cluster auth' command to authenticate the nodes."] -+ return [400, "Old version of PCS/PCSD is running on cluster nodes. Fixing authentication is not supported. Use 'pcs cluster auth' command to authenticate the nodes."] - elsif retval != 200 - return [400, "Authentication failed."] - end --- -1.9.1 - diff --git a/SOURCES/bz1158569-02-fix-authentication-in-web-UI.patch b/SOURCES/bz1158569-02-fix-authentication-in-web-UI.patch deleted file mode 100644 index 06f1040..0000000 --- a/SOURCES/bz1158569-02-fix-authentication-in-web-UI.patch +++ /dev/null @@ -1,125 +0,0 @@ -From 0b12b5e6212b42a3128d30dbce9371ac361dd865 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek <tojeline@redhat.com> -Date: Tue, 15 Sep 2015 16:30:23 +0200 -Subject: [PATCH] fix authentication in web UI - ---- - pcsd/public/js/pcsd.js | 10 ++++---- - pcsd/remote.rb | 62 +++++++++++++++++++++++++++++++------------------- - 2 files changed, 45 insertions(+), 27 deletions(-) - -diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js -index 197cdd1..e4830a9 100644 ---- a/pcsd/public/js/pcsd.js -+++ b/pcsd/public/js/pcsd.js -@@ -719,7 +719,7 @@ function auth_nodes(dialog) { - $("#auth_failed_error_msg").hide(); - $.ajax({ - type: 'POST', -- url: '/remote/auth_nodes', -+ url: '/remote/auth_gui_against_nodes', - data: dialog.find("#auth_nodes_form").serialize(), - timeout: pcs_timeout, - success: function (data) { -@@ -735,9 +735,11 @@ function auth_nodes(dialog) { - function auth_nodes_dialog_update(dialog_obj, data) { - var unauth_nodes = []; - var node; -- for (node in data) { -- if (data[node] != 0) { -- unauth_nodes.push(node); -+ if (data['node_auth_error']) { -+ for (node in data['node_auth_error']) { -+ if (data['node_auth_error'][node] != 0) { -+ unauth_nodes.push(node); -+ } - } - } - -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index 8a71000..e65c8ac 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -60,7 +60,7 @@ def remote(params, request, session) - :cluster_destroy => method(:cluster_destroy), - :get_wizard => method(:get_wizard), - :wizard_submit => method(:wizard_submit), -- :auth_nodes => method(:auth_nodes), -+ :auth_gui_against_nodes => method(:auth_gui_against_nodes), - :get_tokens => method(:get_tokens), - :get_cluster_tokens => method(:get_cluster_tokens), - :save_tokens => method(:save_tokens), -@@ -1994,32 +1994,48 @@ def wizard_submit(params, request, session) - - end - --def auth_nodes(params, request, session) -- retval = {} -- params.each{|node| -- if node[0].end_with?"-pass" and node[0].length > 5 -- nodename = node[0][0..-6] -- if params.has_key?("all") -- pass = params["pass-all"] -- else -- pass = node[1] -- end -- result, sync_successful, _, _ = pcs_auth( -- session, [nodename], SUPERUSER, pass, true, true -- ) -- if not sync_successful -- retval[nodename] = 1 -- else -- node_status = result[nodename]['status'] -- if 'ok' == node_status or 'already_authorized' == node_status -- retval[nodename] = 0 -+def auth_gui_against_nodes(params, request, session) -+ node_auth_error = {} -+ new_tokens = {} -+ threads = [] -+ params.each { |node| -+ threads << Thread.new { -+ if node[0].end_with?("-pass") and node[0].length > 5 -+ nodename = node[0][0..-6] -+ if params.has_key?("all") -+ pass = params["pass-all"] - else -- retval[nodename] = 1 -+ pass = node[1] -+ end -+ data = { -+ 'node-0' => nodename, -+ 'username' => SUPERUSER, -+ 'password' => pass, -+ 'force' => 1, -+ } -+ node_auth_error[nodename] = 1 -+ code, response = send_request(session, nodename, 'auth', true, data) -+ if 200 == code -+ token = response.strip -+ if not token.empty? -+ new_tokens[nodename] = token -+ node_auth_error[nodename] = 0 -+ end - end - end -- end -+ } - } -- return [200, JSON.generate(retval)] -+ threads.each { |t| t.join } -+ -+ if not new_tokens.empty? -+ cluster_nodes = get_corosync_nodes() -+ tokens_cfg = Cfgsync::PcsdTokens.from_file('') -+ sync_successful, sync_responses = Cfgsync::save_sync_new_tokens( -+ tokens_cfg, new_tokens, cluster_nodes, $cluster_name -+ ) -+ end -+ -+ return [200, JSON.generate({'node_auth_error' => node_auth_error})] - end - - # not used anymore, left here for backward compatability reasons --- -1.9.1 - diff --git a/SOURCES/bz1158571-01-web-UI-mark-unsaved-permissions-forms.patch b/SOURCES/bz1158571-01-web-UI-mark-unsaved-permissions-forms.patch deleted file mode 100644 index 0d9637f..0000000 --- a/SOURCES/bz1158571-01-web-UI-mark-unsaved-permissions-forms.patch +++ /dev/null @@ -1,130 +0,0 @@ -From 5c62afc314bfbff55e36c0f7f8e9aec0cc9246c4 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek <tojeline@redhat.com> -Date: Wed, 2 Sep 2015 14:04:55 +0200 -Subject: [PATCH] web UI: mark unsaved permissions forms - ---- - pcsd/public/js/pcsd.js | 36 ++++++++++++++++++++++++++++++++++++ - pcsd/views/_permissions_cluster.erb | 5 ++++- - pcsd/views/permissions.erb | 8 +++++++- - 3 files changed, 47 insertions(+), 2 deletions(-) - -diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js -index 2c71e6b..879b533 100644 ---- a/pcsd/public/js/pcsd.js -+++ b/pcsd/public/js/pcsd.js -@@ -2205,6 +2205,7 @@ function permissions_load_cluster(cluster_name, callback) { - $("#" + element_id + " :checkbox").each(function(key, checkbox) { - permissions_fix_dependent_checkboxes(checkbox); - }); -+ permissions_cluster_dirty_flag(cluster_name, false); - if (callback) { - callback(); - } -@@ -2259,8 +2260,27 @@ function permissions_save_cluster(form) { - }); - } - -+function permissions_cluster_dirty_flag(cluster_name, flag) { -+ var cluster_row = permissions_get_cluster_row(cluster_name); -+ if (cluster_row) { -+ var dirty_elem = cluster_row.find("span[class=unsaved_changes]"); -+ if (dirty_elem) { -+ if (flag) { -+ dirty_elem.show(); -+ } -+ else { -+ dirty_elem.hide(); -+ } -+ } -+ } -+} -+ - function permission_remove_row(button) { -+ var cluster_name = permissions_get_clustername( -+ $(button).parents("form").first() -+ ); - $(button).parent().parent().remove(); -+ permissions_cluster_dirty_flag(cluster_name, true); - } - - function permissions_add_row(template_row) { -@@ -2268,6 +2288,9 @@ function permissions_add_row(template_row) { - var user_type = permissions_get_row_type(template_row); - var max_key = -1; - var exists = false; -+ var cluster_name = permissions_get_clustername( -+ $(template_row).parents("form").first() -+ ); - - if("" == user_name) { - alert("Please enter the name"); -@@ -2326,6 +2349,8 @@ function permissions_add_row(template_row) { - template_inputs.removeAttr("checked").removeAttr("selected"); - template_inputs.removeAttr("disabled").removeAttr("readonly"); - $(template_row).find(":input[type=text]").val(""); -+ -+ permissions_cluster_dirty_flag(cluster_name, true); - } - - function permissions_get_dependent_checkboxes(checkbox) { -@@ -2400,3 +2425,14 @@ function permissions_get_checkbox_permission(checkbox) { - return ""; - } - -+function permissions_get_cluster_row(cluster_name) { -+ var cluster_row = null; -+ $('#cluster_list td[class=node_name]').each(function(index, elem) { -+ var jq_elem = $(elem); -+ if (jq_elem.text().trim() == cluster_name.trim()) { -+ cluster_row = jq_elem.parents("tr").first(); -+ } -+ }); -+ return cluster_row; -+} -+ -diff --git a/pcsd/views/_permissions_cluster.erb b/pcsd/views/_permissions_cluster.erb -index 232a5de..4048366 100644 ---- a/pcsd/views/_permissions_cluster.erb -+++ b/pcsd/views/_permissions_cluster.erb -@@ -58,7 +58,10 @@ - <% if user['allow'].include?(perm['code']) %> - checked="checked" - <% end %> -- onchange="permissions_fix_dependent_checkboxes(this);" -+ onchange=" -+ permissions_fix_dependent_checkboxes(this); -+ permissions_cluster_dirty_flag('<%= h(@cluster_name) %>', true); -+ " - > - </td> - <% } %> -diff --git a/pcsd/views/permissions.erb b/pcsd/views/permissions.erb -index b02d9d3..1e38d7e 100644 ---- a/pcsd/views/permissions.erb -+++ b/pcsd/views/permissions.erb -@@ -16,7 +16,8 @@ - <table cellpadding="0" cellspacing="0" style="width:100%;"> - <tr> - <th> </th> -- <th>CLUSTER NAME</th> -+ <th> </th> -+ <th>CLUSTER NAME</th> - <th style="padding-right: 16px;"> </th> - </tr> - <% @clusters.each do |c| %> -@@ -28,6 +29,11 @@ - <td class="node_list_sprite"> - <div class="check sprites"></div> - </td> -+ <td style="min-width:1em; padding-right:0.5em;"> -+ <span class="unsaved_changes" style="display:none;" -+ title="There are unsaved changes in the form" -+ >(*)</span> -+ </td> - <td nowrap class="node_name"> - <%= h(c.name) %> - </td> --- -1.9.1 - diff --git a/SOURCES/bz1158571-02-check-and-refresh-user-auth-info-upon-each-request.patch b/SOURCES/bz1158571-02-check-and-refresh-user-auth-info-upon-each-request.patch deleted file mode 100644 index 569ebd8..0000000 --- a/SOURCES/bz1158571-02-check-and-refresh-user-auth-info-upon-each-request.patch +++ /dev/null @@ -1,89 +0,0 @@ -From 00ef3951514889791a11318124c271309d8b4958 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek <tojeline@redhat.com> -Date: Fri, 4 Sep 2015 16:01:00 +0200 -Subject: [PATCH] check and refresh user auth info upon each request - ---- - pcs/cluster.py | 2 ++ - pcs/utils.py | 2 ++ - pcsd/auth.rb | 16 ++++++++++++---- - pcsd/test/test_auth.rb | 1 + - 4 files changed, 17 insertions(+), 4 deletions(-) - -diff --git a/pcs/cluster.py b/pcs/cluster.py -index d2a80a8..5a2128a 100644 ---- a/pcs/cluster.py -+++ b/pcs/cluster.py -@@ -235,6 +235,8 @@ def auth_nodes_do(nodes, username, password, force, local): - 'local': local, - } - output, retval = utils.run_pcsdcli('auth', pcsd_data) -+ if retval == 0 and output['status'] == 'access_denied': -+ utils.err('Access denied') - if retval == 0 and output['status'] == 'ok' and output['data']: - failed = False - try: -diff --git a/pcs/utils.py b/pcs/utils.py -index c91b50e..757c159 100644 ---- a/pcs/utils.py -+++ b/pcs/utils.py -@@ -803,6 +803,8 @@ def call_local_pcsd(argv, interactive_auth=False, std_in=None): - return [['Unable to communicate with pcsd'], 1, '', ''] - if output_json['status'] == 'bad_command': - return [['Command not allowed'], 1, '', ''] -+ if output_json['status'] == 'access_denied': -+ return [['Access denied'], 1, '', ''] - if output_json['status'] != "ok" or not output_json["data"]: - return [['Unable to communicate with pcsd'], 1, '', ''] - try: -diff --git a/pcsd/auth.rb b/pcsd/auth.rb -index 22d7868..53712ed 100644 ---- a/pcsd/auth.rb -+++ b/pcsd/auth.rb -@@ -19,7 +19,7 @@ class PCSAuth - - def self.validUser(username, password, generate_token = false) - $logger.info("Attempting login by '#{username}'") -- if not Rpam.auth(username,password, :service => "pcsd") -+ if not Rpam.auth(username, password, :service => "pcsd") - $logger.info("Failed login by '#{username}' (bad username or password)") - return nil - end -@@ -59,7 +59,7 @@ class PCSAuth - return [true, stdout.join(' ').split(nil)] - end - -- def self.isUserAllowedToLogin(username) -+ def self.isUserAllowedToLogin(username, log_success=true) - success, groups = getUsersGroups(username) - if not success - $logger.info( -@@ -73,7 +73,9 @@ class PCSAuth - ) - return false - end -- $logger.info("Successful login by '#{username}'") -+ if log_success -+ $logger.info("Successful login by '#{username}'") -+ end - return true - end - -@@ -131,7 +133,13 @@ class PCSAuth - end - - def self.isLoggedIn(session) -- return session[:username] != nil -+ username = session[:username] -+ if (username != nil) and isUserAllowedToLogin(username, false) -+ success, groups = getUsersGroups(username) -+ session[:usergroups] = success ? groups : [] -+ return true -+ end -+ return false - end - - def self.getSuperuserSession() --- -1.9.1 - diff --git a/SOURCES/bz1158571-03-fix-checking-user-s-group-membership.patch b/SOURCES/bz1158571-03-fix-checking-user-s-group-membership.patch deleted file mode 100644 index 8815b8f..0000000 --- a/SOURCES/bz1158571-03-fix-checking-user-s-group-membership.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 25a4636078b869779cc6adfac3368a9fc382496d Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek <tojeline@redhat.com> -Date: Mon, 7 Sep 2015 16:42:02 +0200 -Subject: [PATCH] fix checking user's group membership - ---- - pcsd/pcsd.rb | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - -diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb -index da47fb2..9a07ee8 100644 ---- a/pcsd/pcsd.rb -+++ b/pcsd/pcsd.rb -@@ -120,8 +120,7 @@ $thread_cfgsync = Thread.new { - - helpers do - def protected! -- PCSAuth.loginByToken(session, cookies) if not PCSAuth.isLoggedIn(session) -- if not PCSAuth.isLoggedIn(session) -+ if not PCSAuth.loginByToken(session, cookies) and not PCSAuth.isLoggedIn(session) - # If we're on /managec/<cluster_name>/main we redirect - match_expr = "/managec/(.*)/(.*)" - mymatch = request.path.match(match_expr) --- -1.9.1 - diff --git a/SOURCES/bz1158577-01-improve-logging-in-pcsd.patch b/SOURCES/bz1158577-01-improve-logging-in-pcsd.patch deleted file mode 100644 index 0ef49d8..0000000 --- a/SOURCES/bz1158577-01-improve-logging-in-pcsd.patch +++ /dev/null @@ -1,24 +0,0 @@ -From df10fbfd2673523f4cadac4be64cdf97ec9aba6c Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek <tojeline@redhat.com> -Date: Wed, 12 Aug 2015 15:47:09 +0200 -Subject: [PATCH] improve logging in pcsd - ---- - pcsd/pcs.rb | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb -index 6c7661a..1cddca8 100644 ---- a/pcsd/pcs.rb -+++ b/pcsd/pcs.rb -@@ -359,6 +359,7 @@ def send_request_with_token(session, node, request, post=false, data={}, remote= - token = additional_tokens[node] || get_node_token(node) - $logger.info "SRWT Node: #{node} Request: #{request}" - if not token -+ $logger.error "Unable to connect to node #{node}, no token available" - return 400,'{"notoken":true}' - end - cookies_data = { --- -1.9.1 - diff --git a/SOURCES/bz1158577-02-fix-certificates-syncing.patch b/SOURCES/bz1158577-02-fix-certificates-syncing.patch deleted file mode 100644 index 21faec1..0000000 --- a/SOURCES/bz1158577-02-fix-certificates-syncing.patch +++ /dev/null @@ -1,554 +0,0 @@ -From 8363f06e73bba0a1d3f7d18cf5b1cde5b5080141 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek <tojeline@redhat.com> -Date: Thu, 27 Aug 2015 14:29:21 +0200 -Subject: [PATCH] fix certificates syncing - ---- - pcs/cluster.py | 16 +++--- - pcs/pcsd.py | 107 ++++++++++++++++++++++++++-------------- - pcs/utils.py | 29 +++++++++++ - pcsd/pcs.rb | 153 ++++++++++++++++++++++++++++++++++++++++++++++++++++----- - pcsd/pcsd.rb | 12 ++++- - pcsd/remote.rb | 12 +++-- - pcsd/ssl.rb | 26 ++++++++-- - 7 files changed, 292 insertions(+), 63 deletions(-) - -diff --git a/pcs/cluster.py b/pcs/cluster.py -index c982ffe..d2a80a8 100644 ---- a/pcs/cluster.py -+++ b/pcs/cluster.py -@@ -345,13 +345,13 @@ def corosync_setup(argv,returnConfig=False): - sync_start(argv, primary_nodes) - if "--enable" in utils.pcs_options: - enable_cluster(primary_nodes) -- pcsd.pcsd_sync_certs([]) -+ pcsd.pcsd_sync_certs([], exit_after_error=False) - return - elif not returnConfig and not "--local" in utils.pcs_options:# and fedora_config: - sync(argv, primary_nodes) - if "--enable" in utils.pcs_options: - enable_cluster(primary_nodes) -- pcsd.pcsd_sync_certs([]) -+ pcsd.pcsd_sync_certs([], exit_after_error=False) - return - else: - nodes = argv[1:] -@@ -1190,15 +1190,17 @@ def cluster_node(argv): - - utils.setCorosyncConfig(node0, corosync_conf) - if "--enable" in utils.pcs_options: -- utils.enableCluster(node0) -+ retval, err = utils.enableCluster(node0) -+ if retval != 0: -+ print("Warning: enable cluster - {0}".format(err)) - if "--start" in utils.pcs_options or utils.is_rhel6(): - # always start new node on cman cluster - # otherwise it will get fenced -- utils.startCluster(node0) -+ retval, err = utils.startCluster(node0) -+ if retval != 0: -+ print("Warning: start cluster - {0}".format(err)) - -- pcsd_data = {'nodes': [node0]} -- utils.run_pcsdcli('send_local_certs', pcsd_data) -- utils.run_pcsdcli('pcsd_restart_nodes', pcsd_data) -+ pcsd.pcsd_sync_certs([node0], exit_after_error=False) - else: - utils.err("Unable to update any nodes") - output, retval = utils.reloadCorosync() -diff --git a/pcs/pcsd.py b/pcs/pcsd.py -index 6002c1a..b1b6be6 100644 ---- a/pcs/pcsd.py -+++ b/pcs/pcsd.py -@@ -36,14 +36,15 @@ def pcsd_certkey(argv): - try: - with open(certfile, 'r') as myfile: - cert = myfile.read() -- except IOError as e: -- utils.err(e) -- -- try: - with open(keyfile, 'r') as myfile: - key = myfile.read() - except IOError as e: - utils.err(e) -+ errors = utils.verify_cert_key_pair(cert, key) -+ if errors: -+ for err in errors: -+ utils.err(err, False) -+ sys.exit(1) - - if not "--force" in utils.pcs_options and (os.path.exists(settings.pcsd_cert_location) or os.path.exists(settings.pcsd_key_location)): - utils.err("certificate and/or key already exists, your must use --force to overwrite") -@@ -70,39 +71,71 @@ def pcsd_certkey(argv): - - print "Certificate and key updated, you may need to restart pcsd (service pcsd restart) for new settings to take effect" - --def pcsd_sync_certs(argv): -- nodes = utils.getNodesFromCorosyncConf() -- pcsd_data = {'nodes': nodes} -- commands = [ -- { -- "command": "send_local_certs", -- "message": "Synchronizing pcsd certificates on nodes {0}.".format( -- ", ".join(nodes) -- ), -- }, -- { -- "command": "pcsd_restart_nodes", -- "message": "Restaring pcsd on the nodes in order to reload " -- + "the certificates." -- , -- }, -- ] -- for cmd in commands: -- error = '' -- print cmd["message"] -- output, retval = utils.run_pcsdcli(cmd["command"], pcsd_data) -- if retval == 0 and output['status'] == 'ok' and output['data']: -- try: -- if output['data']['status'] != 'ok' and output['data']['text']: -- error = output['data']['text'] -- except KeyError: -- error = 'Unable to communicate with pcsd' -- else: -- error = 'Unable to sync pcsd certificates' -- if error: -- # restart pcsd even if sync failed in order to reload -- # the certificates on nodes where it succeded -- utils.err(error, False) -+def pcsd_sync_certs(argv, exit_after_error=True): -+ error = False -+ nodes_sync = argv if argv else utils.getNodesFromCorosyncConf() -+ nodes_restart = [] -+ -+ print("Synchronizing pcsd certificates on nodes {0}...".format( -+ ", ".join(nodes_sync) -+ )) -+ pcsd_data = { -+ "nodes": nodes_sync, -+ } -+ output, retval = utils.run_pcsdcli("send_local_certs", pcsd_data) -+ if retval == 0 and output["status"] == "ok" and output["data"]: -+ try: -+ sync_result = output["data"] -+ if sync_result["node_status"]: -+ for node, status in sync_result["node_status"].items(): -+ print("{0}: {1}".format(node, status["text"])) -+ if status["status"] == "ok": -+ nodes_restart.append(node) -+ else: -+ error = True -+ if sync_result["status"] != "ok": -+ error = True -+ utils.err(sync_result["text"], False) -+ if error and not nodes_restart: -+ if exit_after_error: -+ sys.exit(1) -+ else: -+ return -+ print -+ except (KeyError, AttributeError): -+ utils.err("Unable to communicate with pcsd", exit_after_error) -+ return -+ else: -+ utils.err("Unable to sync pcsd certificates", exit_after_error) -+ return -+ -+ print("Restaring pcsd on the nodes in order to reload the certificates...") -+ pcsd_data = { -+ "nodes": nodes_restart, -+ } -+ output, retval = utils.run_pcsdcli("pcsd_restart_nodes", pcsd_data) -+ if retval == 0 and output["status"] == "ok" and output["data"]: -+ try: -+ restart_result = output["data"] -+ if restart_result["node_status"]: -+ for node, status in restart_result["node_status"].items(): -+ print("{0}: {1}".format(node, status["text"])) -+ if status["status"] != "ok": -+ error = True -+ if restart_result["status"] != "ok": -+ error = True -+ utils.err(restart_result["text"], False) -+ if error: -+ if exit_after_error: -+ sys.exit(1) -+ else: -+ return -+ except (KeyError, AttributeError): -+ utils.err("Unable to communicate with pcsd", exit_after_error) -+ return -+ else: -+ utils.err("Unable to restart pcsd", exit_after_error) -+ return - - def pcsd_clear_auth(argv): - output = [] -diff --git a/pcs/utils.py b/pcs/utils.py -index 761723b..c91b50e 100644 ---- a/pcs/utils.py -+++ b/pcs/utils.py -@@ -1880,6 +1880,35 @@ def is_iso8601_date(var): - output, retVal = run(["iso8601", "-d", var]) - return retVal == 0 - -+def verify_cert_key_pair(cert, key): -+ errors = [] -+ cert_modulus = "" -+ key_modulus = "" -+ -+ output, retval = run( -+ ["/usr/bin/openssl", "x509", "-modulus", "-noout"], -+ string_for_stdin=cert -+ ) -+ if retval != 0: -+ errors.append("Invalid certificate: {0}".format(output.strip())) -+ else: -+ cert_modulus = output.strip() -+ -+ output, retval = run( -+ ["/usr/bin/openssl", "rsa", "-modulus", "-noout"], -+ string_for_stdin=key -+ ) -+ if retval != 0: -+ errors.append("Invalid key: {0}".format(output.strip())) -+ else: -+ key_modulus = output.strip() -+ -+ if not errors and cert_modulus and key_modulus: -+ if cert_modulus != key_modulus: -+ errors.append("Certificate does not match the key") -+ -+ return errors -+ - # Does pacemaker consider a variable as true in cib? - # See crm_is_true in pacemaker/lib/common/utils.c - def is_cib_true(var): -diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb -index 1cddca8..37f6b83 100644 ---- a/pcsd/pcs.rb -+++ b/pcsd/pcs.rb -@@ -1215,29 +1215,84 @@ def send_local_configs_to_nodes( - end - - def send_local_certs_to_nodes(session, nodes) -- data = { -- 'ssl_cert' => File.read(CRT_FILE), -- 'ssl_key' => File.read(KEY_FILE), -- 'cookie_secret' => File.read(COOKIE_FILE), -- } -+ begin -+ data = { -+ 'ssl_cert' => File.read(CRT_FILE), -+ 'ssl_key' => File.read(KEY_FILE), -+ 'cookie_secret' => File.read(COOKIE_FILE), -+ } -+ rescue => e -+ return { -+ 'status' => 'error', -+ 'text' => "Unable to read certificates: #{e}", -+ 'node_status' => {}, -+ } -+ end -+ -+ crt_errors = verify_cert_key_pair(data['ssl_cert'], data['ssl_key']) -+ if crt_errors and not crt_errors.empty? -+ return { -+ 'status' => 'error', -+ 'text' => "Invalid certificate and/or key: #{crt_errors.join}", -+ 'node_status' => {}, -+ } -+ end -+ secret_errors = verify_cookie_secret(data['cookie_secret']) -+ if secret_errors and not secret_errors.empty? -+ return { -+ 'status' => 'error', -+ 'text' => "Invalid cookie secret: #{secret_errors.join}", -+ 'node_status' => {}, -+ } -+ end -+ - node_response = {} - threads = [] - nodes.each { |node| - threads << Thread.new { -- code, _ = send_request_with_token(session, node, '/set_certs', true, data) -- node_response[node] = 200 == code ? 'ok' : 'error' -+ code, response = send_request_with_token( -+ session, node, '/set_certs', true, data -+ ) -+ node_response[node] = [code, response] - } - } - threads.each { |t| t.join } - - node_error = [] -+ node_status = {} - node_response.each { |node, response| -- node_error << node if response != 'ok' -+ if response[0] == 200 -+ node_status[node] = { -+ 'status' => 'ok', -+ 'text' => 'Success', -+ } -+ else -+ text = response[1] -+ if response[0] == 401 -+ text = "Unable to authenticate, try running 'pcs cluster auth'" -+ elsif response[0] == 400 -+ begin -+ parsed_response = JSON.parse(response[1], {:symbolize_names => true}) -+ if parsed_response[:noresponse] -+ text = "Unable to connect" -+ elsif parsed_response[:notoken] or parsed_response[:notauthorized] -+ text = "Unable to authenticate, try running 'pcs cluster auth'" -+ end -+ rescue JSON::ParserError -+ end -+ end -+ node_status[node] = { -+ 'status' => 'error', -+ 'text' => text -+ } -+ node_error << node -+ end - } - return { - 'status' => node_error.empty?() ? 'ok' : 'error', - 'text' => node_error.empty?() ? 'Success' : \ - "Unable to save pcsd certificates to nodes: #{node_error.join(', ')}", -+ 'node_status' => node_status, - } - end - -@@ -1246,20 +1301,49 @@ def pcsd_restart_nodes(session, nodes) - threads = [] - nodes.each { |node| - threads << Thread.new { -- code, _ = send_request_with_token(session, node, '/pcsd_restart', true) -- node_response[node] = 200 == code ? 'ok' : 'error' -+ code, response = send_request_with_token( -+ session, node, '/pcsd_restart', true -+ ) -+ node_response[node] = [code, response] - } - } - threads.each { |t| t.join } - - node_error = [] -+ node_status = {} - node_response.each { |node, response| -- node_error << node if response != 'ok' -+ if response[0] == 200 -+ node_status[node] = { -+ 'status' => 'ok', -+ 'text' => 'Success', -+ } -+ else -+ text = response[1] -+ if response[0] == 401 -+ text = "Unable to authenticate, try running 'pcs cluster auth'" -+ elsif response[0] == 400 -+ begin -+ parsed_response = JSON.parse(response[1], {:symbolize_names => true}) -+ if parsed_response[:noresponse] -+ text = "Unable to connect" -+ elsif parsed_response[:notoken] or parsed_response[:notauthorized] -+ text = "Unable to authenticate, try running 'pcs cluster auth'" -+ end -+ rescue JSON::ParserError -+ end -+ end -+ node_status[node] = { -+ 'status' => 'error', -+ 'text' => text -+ } -+ node_error << node -+ end - } - return { - 'status' => node_error.empty?() ? 'ok' : 'error', - 'text' => node_error.empty?() ? 'Success' : \ - "Unable to restart pcsd on nodes: #{node_error.join(', ')}", -+ 'node_status' => node_status, - } - end - -@@ -1280,6 +1364,53 @@ def write_file_lock(path, perm, data) - end - end - -+def verify_cert_key_pair(cert, key) -+ errors = [] -+ cert_modulus = nil -+ key_modulus = nil -+ -+ stdout, stderr, retval = run_cmd_options( -+ PCSAuth.getSuperuserSession(), -+ { -+ 'stdin' => cert, -+ }, -+ '/usr/bin/openssl', 'x509', '-modulus', '-noout' -+ ) -+ if retval != 0 -+ errors << "Invalid certificate: #{stderr.join}" -+ else -+ cert_modulus = stdout.join.strip -+ end -+ -+ stdout, stderr, retval = run_cmd_options( -+ PCSAuth.getSuperuserSession(), -+ { -+ 'stdin' => key, -+ }, -+ '/usr/bin/openssl', 'rsa', '-modulus', '-noout' -+ ) -+ if retval != 0 -+ errors << "Invalid key: #{stderr.join}" -+ else -+ key_modulus = stdout.join.strip -+ end -+ -+ if errors.empty? and cert_modulus and key_modulus -+ if cert_modulus != key_modulus -+ errors << 'Certificate does not match the key' -+ end -+ end -+ -+ return errors -+end -+ -+def verify_cookie_secret(secret) -+ if secret.empty? -+ return ['Cookie secret is empty'] -+ end -+ return [] -+end -+ - def cluster_status_from_nodes(session, cluster_nodes, cluster_name) - node_map = {} - forbidden_nodes = {} -diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb -index 1f26fe5..da47fb2 100644 ---- a/pcsd/pcsd.rb -+++ b/pcsd/pcsd.rb -@@ -25,10 +25,20 @@ Dir["wizards/*.rb"].each {|file| require file} - - use Rack::CommonLogger - -+def generate_cookie_secret -+ return SecureRandom.hex(30) -+end -+ - begin - secret = File.read(COOKIE_FILE) -+ secret_errors = verify_cookie_secret(secret) -+ if secret_errors and not secret_errors.empty? -+ secret_errors.each { |err| $logger.error err } -+ $logger.error "Invalid cookie secret, using temporary one" -+ secret = generate_cookie_secret() -+ end - rescue Errno::ENOENT -- secret = SecureRandom.hex(30) -+ secret = generate_cookie_secret() - File.open(COOKIE_FILE, 'w', 0700) {|f| f.write(secret)} - end - -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index 4655756..22af38a 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -584,15 +584,19 @@ def set_certs(params, request, session) - return [400, 'cannot save ssl key without ssl certificate'] - end - if !ssl_cert.empty? and !ssl_key.empty? -+ ssl_errors = verify_cert_key_pair(ssl_cert, ssl_key) -+ if ssl_errors and !ssl_errors.empty? -+ return [400, ssl_errors.join] -+ end - begin - write_file_lock(CRT_FILE, 0700, ssl_cert) - write_file_lock(KEY_FILE, 0700, ssl_key) -- rescue -+ rescue => e - # clean the files if we ended in the middle - # the files will be regenerated on next pcsd start - FileUtils.rm(CRT_FILE, {:force => true}) - FileUtils.rm(KEY_FILE, {:force => true}) -- return [400, 'cannot save ssl files'] -+ return [400, "cannot save ssl files: #{e}"] - end - end - -@@ -601,8 +605,8 @@ def set_certs(params, request, session) - if !cookie_secret.empty? - begin - write_file_lock(COOKIE_FILE, 0700, cookie_secret) -- rescue -- return [400, 'cannot save cookie secret'] -+ rescue => e -+ return [400, "cannot save cookie secret: #{e}"] - end - end - end -diff --git a/pcsd/ssl.rb b/pcsd/ssl.rb -index 02372f6..e948aef 100644 ---- a/pcsd/ssl.rb -+++ b/pcsd/ssl.rb -@@ -5,10 +5,12 @@ require 'openssl' - require 'rack' - - require 'bootstrap.rb' -+require 'pcs.rb' - - server_name = WEBrick::Utils::getservername -+$logger = configure_logger('/var/log/pcsd/pcsd.log') - --if not File.exists?(CRT_FILE) or not File.exists?(KEY_FILE) -+def generate_cert_key_pair(server_name) - name = "/C=US/ST=MN/L=Minneapolis/O=pcsd/OU=pcsd/CN=#{server_name}" - ca = OpenSSL::X509::Name.parse(name) - key = OpenSSL::PKey::RSA.new(2048) -@@ -21,9 +23,27 @@ if not File.exists?(CRT_FILE) or not File.exists?(KEY_FILE) - crt.not_before = Time.now - crt.not_after = Time.now + 10 * 365 * 24 * 60 * 60 # 10 year - crt.sign(key, OpenSSL::Digest::SHA256.new) -+ return crt, key -+end - -+if not File.exists?(CRT_FILE) or not File.exists?(KEY_FILE) -+ crt, key = generate_cert_key_pair(server_name) - File.open(CRT_FILE, 'w',0700) {|f| f.write(crt)} - File.open(KEY_FILE, 'w',0700) {|f| f.write(key)} -+else -+ crt, key = nil, nil -+ begin -+ crt = File.read(CRT_FILE) -+ key = File.read(KEY_FILE) -+ rescue => e -+ $logger.error "Unable to read certificate or key: #{e}" -+ end -+ crt_errors = verify_cert_key_pair(crt, key) -+ if crt_errors and not crt_errors.empty? -+ crt_errors.each { |err| $logger.error err } -+ $logger.error "Invalid certificate and/or key, using temporary ones" -+ crt, key = generate_cert_key_pair(server_name) -+ end - end - - webrick_options = { -@@ -32,8 +52,8 @@ webrick_options = { - :Host => '::', - :SSLEnable => true, - :SSLVerifyClient => OpenSSL::SSL::VERIFY_NONE, -- :SSLCertificate => OpenSSL::X509::Certificate.new(File.open(CRT_FILE).read), -- :SSLPrivateKey => OpenSSL::PKey::RSA.new(File.open(KEY_FILE).read()), -+ :SSLCertificate => OpenSSL::X509::Certificate.new(crt), -+ :SSLPrivateKey => OpenSSL::PKey::RSA.new(key), - :SSLCertName => [[ "CN", server_name ]], - :SSLOptions => OpenSSL::SSL::OP_NO_SSLv2 | OpenSSL::SSL::OP_NO_SSLv3, - } --- -1.9.1 - diff --git a/SOURCES/bz1158805-01-add-support-for-qdevice-qnetd-provided-.patch b/SOURCES/bz1158805-01-add-support-for-qdevice-qnetd-provided-.patch new file mode 100644 index 0000000..4f6eaaf --- /dev/null +++ b/SOURCES/bz1158805-01-add-support-for-qdevice-qnetd-provided-.patch @@ -0,0 +1,10043 @@ +From db8643c4489274faee0bba008846a63c2ab63f46 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Wed, 15 Jun 2016 14:52:39 +0200 +Subject: [PATCH] bz1158805-01-add support for qdevice-qnetd provided by + corosync + +--- + pcs/cli/common/lib_wrapper.py | 10 + + pcs/cluster.py | 119 +- + pcs/common/report_codes.py | 31 +- + pcs/lib/commands/qdevice.py | 88 +- + pcs/lib/commands/quorum.py | 217 +- + pcs/lib/corosync/config_facade.py | 98 +- + pcs/lib/corosync/live.py | 15 + + pcs/lib/corosync/qdevice_client.py | 93 + + pcs/lib/corosync/qdevice_net.py | 314 ++- + pcs/lib/env.py | 11 +- + pcs/lib/errors.py | 6 +- + pcs/lib/external.py | 44 +- + pcs/lib/nodes_task.py | 69 +- + pcs/lib/reports.py | 225 +- + pcs/pcs.8 | 27 +- + pcs/qdevice.py | 71 + + pcs/quorum.py | 34 +- + pcs/settings_default.py | 6 +- + pcs/test/resources/qdevice-certs/qnetd-cacert.crt | 1 + + pcs/test/test_lib_commands_qdevice.py | 255 ++ + pcs/test/test_lib_commands_quorum.py | 1109 ++++++++- + pcs/test/test_lib_corosync_config_facade.py | 367 ++- + pcs/test/test_lib_corosync_live.py | 62 +- + pcs/test/test_lib_corosync_qdevice_client.py | 60 + + pcs/test/test_lib_corosync_qdevice_net.py | 965 +++++++- + pcs/test/test_lib_env.py | 142 +- + pcs/test/test_lib_external.py | 126 +- + pcs/test/test_lib_nodes_task.py | 168 +- + pcs/test/test_quorum.py | 9 +- + pcs/test/test_utils.py | 2628 +++++++++++---------- + pcs/usage.py | 53 +- + pcs/utils.py | 147 +- + pcsd/pcs.rb | 17 + + pcsd/remote.rb | 163 +- + pcsd/settings.rb | 6 + + pcsd/settings.rb.debian | 10 +- + 36 files changed, 6170 insertions(+), 1596 deletions(-) + create mode 100644 pcs/lib/corosync/qdevice_client.py + create mode 100644 pcs/test/resources/qdevice-certs/qnetd-cacert.crt + create mode 100644 pcs/test/test_lib_corosync_qdevice_client.py + +diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py +index 2ba5602..2dd5810 100644 +--- a/pcs/cli/common/lib_wrapper.py ++++ b/pcs/cli/common/lib_wrapper.py +@@ -117,6 +117,8 @@ def load_module(env, middleware_factory, name): + "get_config": quorum.get_config, + "remove_device": quorum.remove_device, + "set_options": quorum.set_options, ++ "status": quorum.status_text, ++ "status_device": quorum.status_device_text, + "update_device": quorum.update_device, + } + ) +@@ -125,6 +127,7 @@ def load_module(env, middleware_factory, name): + env, + middleware.build(), + { ++ "status": qdevice.qdevice_status_text, + "setup": qdevice.qdevice_setup, + "destroy": qdevice.qdevice_destroy, + "start": qdevice.qdevice_start, +@@ -132,6 +135,13 @@ def load_module(env, middleware_factory, name): + "kill": qdevice.qdevice_kill, + "enable": qdevice.qdevice_enable, + "disable": qdevice.qdevice_disable, ++ # following commands are internal use only, called from pcsd ++ "client_net_setup": qdevice.client_net_setup, ++ "client_net_import_certificate": ++ qdevice.client_net_import_certificate, ++ "client_net_destroy": qdevice.client_net_destroy, ++ "sign_net_cert_request": ++ qdevice.qdevice_net_sign_certificate_request, + } + ) + if name == "sbd": +diff --git a/pcs/cluster.py b/pcs/cluster.py +index 002b5c5..988ab75 100644 +--- a/pcs/cluster.py ++++ b/pcs/cluster.py +@@ -36,23 +36,29 @@ from pcs import ( + ) + from pcs.utils import parallel_for_nodes + from pcs.common import report_codes ++from pcs.cli.common.reports import process_library_reports + from pcs.lib import ( + pacemaker as lib_pacemaker, + sbd as lib_sbd, + reports as lib_reports, + ) +-from pcs.lib.tools import environment_file_to_dict ++from pcs.lib.commands.quorum import _add_device_model_net ++from pcs.lib.corosync import ( ++ config_parser as corosync_conf_utils, ++ qdevice_net, ++) ++from pcs.lib.corosync.config_facade import ConfigFacade as corosync_conf_facade ++from pcs.lib.errors import ( ++ LibraryError, ++ ReportItemSeverity, ++) + from pcs.lib.external import ( + disable_service, + NodeCommunicationException, + node_communicator_exception_to_report_item, + ) + from pcs.lib.node import NodeAddresses +-from pcs.lib.errors import ( +- LibraryError, +- ReportItemSeverity, +-) +-from pcs.lib.corosync import config_parser as corosync_conf_utils ++from pcs.lib.tools import environment_file_to_dict + + def cluster_cmd(argv): + if len(argv) == 0: +@@ -288,7 +294,7 @@ def cluster_setup(argv): + ) + if udpu_rrp and "rrp_mode" not in options["transport_options"]: + options["transport_options"]["rrp_mode"] = "passive" +- utils.process_library_reports(messages) ++ process_library_reports(messages) + + # prepare config file + if is_rhel6: +@@ -306,7 +312,7 @@ def cluster_setup(argv): + options["totem_options"], + options["quorum_options"] + ) +- utils.process_library_reports(messages) ++ process_library_reports(messages) + + # setup on the local node + if "--local" in utils.pcs_options: +@@ -870,6 +876,7 @@ def start_cluster(argv): + return + + print("Starting Cluster...") ++ service_list = [] + if utils.is_rhel6(): + # Verify that CMAN_QUORUM_TIMEOUT is set, if not, then we set it to 0 + retval, output = getstatusoutput('source /etc/sysconfig/cman ; [ -z "$CMAN_QUORUM_TIMEOUT" ]') +@@ -882,14 +889,15 @@ def start_cluster(argv): + print(output) + utils.err("unable to start cman") + else: +- output, retval = utils.run(["service", "corosync","start"]) ++ service_list.append("corosync") ++ if utils.need_to_handle_qdevice_service(): ++ service_list.append("corosync-qdevice") ++ service_list.append("pacemaker") ++ for service in service_list: ++ output, retval = utils.run(["service", service, "start"]) + if retval != 0: + print(output) +- utils.err("unable to start corosync") +- output, retval = utils.run(["service", "pacemaker", "start"]) +- if retval != 0: +- print(output) +- utils.err("unable to start pacemaker") ++ utils.err("unable to start {0}".format(service)) + if wait: + wait_for_nodes_started([], wait_timeout) + +@@ -1035,14 +1043,20 @@ def enable_cluster(argv): + enable_cluster_nodes(argv) + return + +- utils.enableServices() ++ try: ++ utils.enableServices() ++ except LibraryError as e: ++ process_library_reports(e.args) + + def disable_cluster(argv): + if len(argv) > 0: + disable_cluster_nodes(argv) + return + +- utils.disableServices() ++ try: ++ utils.disableServices() ++ except LibraryError as e: ++ process_library_reports(e.args) + + def enable_cluster_all(): + enable_cluster_nodes(utils.getNodesFromCorosyncConf()) +@@ -1132,13 +1146,18 @@ def stop_cluster_corosync(): + utils.err("unable to stop cman") + else: + print("Stopping Cluster (corosync)...") +- output, retval = utils.run(["service", "corosync","stop"]) +- if retval != 0: +- print(output) +- utils.err("unable to stop corosync") ++ service_list = [] ++ if utils.need_to_handle_qdevice_service(): ++ service_list.append("corosync-qdevice") ++ service_list.append("corosync") ++ for service in service_list: ++ output, retval = utils.run(["service", service, "stop"]) ++ if retval != 0: ++ print(output) ++ utils.err("unable to stop {0}".format(service)) + + def kill_cluster(argv): +- daemons = ["crmd", "pengine", "attrd", "lrmd", "stonithd", "cib", "pacemakerd", "corosync"] ++ daemons = ["crmd", "pengine", "attrd", "lrmd", "stonithd", "cib", "pacemakerd", "corosync-qdevice", "corosync"] + dummy_output, dummy_retval = utils.run(["killall", "-9"] + daemons) + # if dummy_retval != 0: + # print "Error: unable to execute killall -9" +@@ -1321,19 +1340,16 @@ def cluster_node(argv): + "cluster is not configured for RRP, " + "you must not specify ring 1 address for the node" + ) +- utils.check_qdevice_algorithm_and_running_cluster( +- utils.getCorosyncConf(), add=True +- ) + corosync_conf = None + (canAdd, error) = utils.canAddNodeToCluster(node0) + if not canAdd: + utils.err("Unable to add '%s' to cluster: %s" % (node0, error)) + ++ lib_env = utils.get_lib_env() ++ report_processor = lib_env.report_processor ++ node_communicator = lib_env.node_communicator() ++ node_addr = NodeAddresses(node0, node1) + try: +- node_addr = NodeAddresses(node0, node1) +- lib_env = utils.get_lib_env() +- report_processor = lib_env.report_processor +- node_communicator = lib_env.node_communicator() + if lib_sbd.is_sbd_enabled(utils.cmd_runner()): + if "--watchdog" not in utils.pcs_options: + watchdog = settings.sbd_watchdog_default +@@ -1367,9 +1383,9 @@ def cluster_node(argv): + report_processor, node_communicator, node_addr + ) + except LibraryError as e: +- utils.process_library_reports(e.args) ++ process_library_reports(e.args) + except NodeCommunicationException as e: +- utils.process_library_reports( ++ process_library_reports( + [node_communicator_exception_to_report_item(e)] + ) + +@@ -1383,6 +1399,8 @@ def cluster_node(argv): + else: + print("%s: Corosync updated" % my_node) + corosync_conf = output ++ # corosync.conf must be reloaded before the new node is started ++ output, retval = utils.reloadCorosync() + if corosync_conf != None: + # send local cluster pcsd configs to the new node + # may be used for sending corosync config as well in future +@@ -1406,6 +1424,25 @@ def cluster_node(argv): + except: + utils.err('Unable to communicate with pcsd') + ++ # set qdevice-net certificates if needed ++ if not utils.is_rhel6(): ++ try: ++ conf_facade = corosync_conf_facade.from_string( ++ corosync_conf ++ ) ++ qdevice_model, qdevice_model_options, _ = conf_facade.get_quorum_device_settings() ++ if qdevice_model == "net": ++ _add_device_model_net( ++ lib_env, ++ qdevice_model_options["host"], ++ conf_facade.get_cluster_name(), ++ [node_addr], ++ skip_offline_nodes=False ++ ) ++ except LibraryError as e: ++ process_library_reports(e.args) ++ ++ print("Setting up corosync...") + utils.setCorosyncConfig(node0, corosync_conf) + if "--enable" in utils.pcs_options: + retval, err = utils.enableCluster(node0) +@@ -1421,7 +1458,6 @@ def cluster_node(argv): + pcsd.pcsd_sync_certs([node0], exit_after_error=False) + else: + utils.err("Unable to update any nodes") +- output, retval = utils.reloadCorosync() + if utils.is_cman_with_udpu_transport(): + print("Warning: Using udpu transport on a CMAN cluster, " + + "cluster restart is required to apply node addition") +@@ -1433,9 +1469,6 @@ def cluster_node(argv): + utils.err( + "node '%s' does not appear to exist in configuration" % node0 + ) +- utils.check_qdevice_algorithm_and_running_cluster( +- utils.getCorosyncConf(), add=False +- ) + if "--force" not in utils.pcs_options: + retval, data = utils.get_remote_quorumtool_output(node0) + if retval != 0: +@@ -1697,10 +1730,18 @@ def cluster_destroy(argv): + else: + print("Shutting down pacemaker/corosync services...") + os.system("service pacemaker stop") ++ # returns error if qdevice is not running, it is safe to ignore it ++ # since we want it not to be running ++ os.system("service corosync-qdevice stop") + os.system("service corosync stop") + print("Killing any remaining services...") +- os.system("killall -q -9 corosync aisexec heartbeat pacemakerd ccm stonithd ha_logd lrmd crmd pengine attrd pingd mgmtd cib fenced dlm_controld gfs_controld") +- utils.disableServices() ++ os.system("killall -q -9 corosync corosync-qdevice aisexec heartbeat pacemakerd ccm stonithd ha_logd lrmd crmd pengine attrd pingd mgmtd cib fenced dlm_controld gfs_controld") ++ try: ++ utils.disableServices() ++ except: ++ # previously errors were suppressed in here, let's keep it that way ++ # for now ++ pass + try: + disable_service(utils.cmd_runner(), "sbd") + except: +@@ -1716,6 +1757,12 @@ def cluster_destroy(argv): + "pe*.bz2","cib.*"] + for name in state_files: + os.system("find /var/lib -name '"+name+"' -exec rm -f \{\} \;") ++ try: ++ qdevice_net.client_destroy() ++ except: ++ # errors from deleting other files are suppressed as well ++ # we do not want to fail if qdevice was not set up ++ pass + + def cluster_verify(argv): + nofilename = True +diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py +index bda982a..afe0554 100644 +--- a/pcs/common/report_codes.py ++++ b/pcs/common/report_codes.py +@@ -45,6 +45,8 @@ COROSYNC_CONFIG_RELOAD_ERROR = "COROSYNC_CONFIG_RELOAD_ERROR" + COROSYNC_NOT_RUNNING_CHECK_STARTED = "COROSYNC_NOT_RUNNING_CHECK_STARTED" + COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR = "COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR" + COROSYNC_NOT_RUNNING_ON_NODE = "COROSYNC_NOT_RUNNING_ON_NODE" ++COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE = "COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE" ++COROSYNC_QUORUM_GET_STATUS_ERROR = "COROSYNC_QUORUM_GET_STATUS_ERROR" + COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE" + CRM_MON_ERROR = "CRM_MON_ERROR" + DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST" +@@ -62,11 +64,11 @@ INVALID_SCORE = "INVALID_SCORE" + INVALID_TIMEOUT_VALUE = "INVALID_TIMEOUT_VALUE" + MULTIPLE_SCORE_OPTIONS = "MULTIPLE_SCORE_OPTIONS" + NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL = "NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL" +-NODE_COMMUNICATION_ERROR = "NODE_COMMUNICATION_ERROR", +-NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED = "NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED", +-NODE_COMMUNICATION_ERROR_PERMISSION_DENIED = "NODE_COMMUNICATION_ERROR_PERMISSION_DENIED", +-NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT = "NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT", +-NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND = "NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND", ++NODE_COMMUNICATION_ERROR = "NODE_COMMUNICATION_ERROR" ++NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED = "NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED" ++NODE_COMMUNICATION_ERROR_PERMISSION_DENIED = "NODE_COMMUNICATION_ERROR_PERMISSION_DENIED" ++NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT = "NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT" ++NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND = "NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND" + NODE_COMMUNICATION_FINISHED = "NODE_COMMUNICATION_FINISHED" + NODE_COMMUNICATION_NOT_CONNECTED = "NODE_COMMUNICATION_NOT_CONNECTED" + NODE_COMMUNICATION_STARTED = "NODE_COMMUNICATION_STARTED" +@@ -74,16 +76,25 @@ NODE_NOT_FOUND = "NODE_NOT_FOUND" + NON_UDP_TRANSPORT_ADDR_MISMATCH = 'NON_UDP_TRANSPORT_ADDR_MISMATCH' + OMITTING_NODE = "OMITTING_NODE" + PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND = "PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND" +-PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE", +-PARSE_ERROR_COROSYNC_CONF = "PARSE_ERROR_COROSYNC_CONF", +-PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE", ++PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE" ++PARSE_ERROR_COROSYNC_CONF = "PARSE_ERROR_COROSYNC_CONF" ++PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE" + QDEVICE_ALREADY_DEFINED = "QDEVICE_ALREADY_DEFINED" + QDEVICE_ALREADY_INITIALIZED = "QDEVICE_ALREADY_INITIALIZED" ++QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE = "QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE" ++QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED = "QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED" ++QDEVICE_CERTIFICATE_REMOVAL_STARTED = "QDEVICE_CERTIFICATE_REMOVAL_STARTED" ++QDEVICE_CERTIFICATE_REMOVED_FROM_NODE = "QDEVICE_CERTIFICATE_REMOVED_FROM_NODE" ++QDEVICE_CERTIFICATE_IMPORT_ERROR = "QDEVICE_CERTIFICATE_IMPORT_ERROR" ++QDEVICE_CERTIFICATE_SIGN_ERROR = "QDEVICE_CERTIFICATE_SIGN_ERROR" + QDEVICE_DESTROY_ERROR = "QDEVICE_DESTROY_ERROR" + QDEVICE_DESTROY_SUCCESS = "QDEVICE_DESTROY_SUCCESS" ++QDEVICE_GET_STATUS_ERROR = "QDEVICE_GET_STATUS_ERROR" + QDEVICE_INITIALIZATION_ERROR = "QDEVICE_INITIALIZATION_ERROR" + QDEVICE_INITIALIZATION_SUCCESS = "QDEVICE_INITIALIZATION_SUCCESS" + QDEVICE_NOT_DEFINED = "QDEVICE_NOT_DEFINED" ++QDEVICE_NOT_INITIALIZED = "QDEVICE_NOT_INITIALIZED" ++QDEVICE_CLIENT_RELOAD_STARTED = "QDEVICE_CLIENT_RELOAD_STARTED" + QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED = "QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED" + REQUIRED_OPTION_IS_MISSING = "REQUIRED_OPTION_IS_MISSING" + RESOURCE_CLEANUP_ERROR = "RESOURCE_CLEANUP_ERROR" +@@ -106,12 +117,16 @@ SBD_ENABLING_STARTED = "SBD_ENABLING_STARTED" + SBD_NOT_INSTALLED = "SBD_NOT_INSTALLED" + SBD_NOT_ENABLED = "SBD_NOT_ENABLED" + SERVICE_DISABLE_ERROR = "SERVICE_DISABLE_ERROR" ++SERVICE_DISABLE_STARTED = "SERVICE_DISABLE_STARTED" + SERVICE_DISABLE_SUCCESS = "SERVICE_DISABLE_SUCCESS" + SERVICE_ENABLE_ERROR = "SERVICE_ENABLE_ERROR" ++SERVICE_ENABLE_STARTED = "SERVICE_ENABLE_STARTED" ++SERVICE_ENABLE_SKIPPED = "SERVICE_ENABLE_SKIPPED" + SERVICE_ENABLE_SUCCESS = "SERVICE_ENABLE_SUCCESS" + SERVICE_KILL_ERROR = "SERVICE_KILL_ERROR" + SERVICE_KILL_SUCCESS = "SERVICE_KILL_SUCCESS" + SERVICE_START_ERROR = "SERVICE_START_ERROR" ++SERVICE_START_SKIPPED = "SERVICE_START_SKIPPED" + SERVICE_START_STARTED = "SERVICE_START_STARTED" + SERVICE_START_SUCCESS = "SERVICE_START_SUCCESS" + SERVICE_STOP_ERROR = "SERVICE_STOP_ERROR" +diff --git a/pcs/lib/commands/qdevice.py b/pcs/lib/commands/qdevice.py +index c300a4c..1d1d85f 100644 +--- a/pcs/lib/commands/qdevice.py ++++ b/pcs/lib/commands/qdevice.py +@@ -5,6 +5,9 @@ from __future__ import ( + unicode_literals, + ) + ++import base64 ++import binascii ++ + from pcs.lib import external, reports + from pcs.lib.corosync import qdevice_net + from pcs.lib.errors import LibraryError +@@ -31,7 +34,7 @@ def qdevice_setup(lib_env, model, enable, start): + def qdevice_destroy(lib_env, model): + """ + Stop and disable qdevice on local host and remove its configuration +- string model qdevice model to initialize ++ string model qdevice model to destroy + """ + _ensure_not_cman(lib_env) + _check_model(model) +@@ -40,6 +43,22 @@ def qdevice_destroy(lib_env, model): + qdevice_net.qdevice_destroy() + lib_env.report_processor.process(reports.qdevice_destroy_success(model)) + ++def qdevice_status_text(lib_env, model, verbose=False, cluster=None): ++ """ ++ Get runtime status of a quorum device in plain text ++ string model qdevice model to query ++ bool verbose get more detailed output ++ string cluster show information only about specified cluster ++ """ ++ _ensure_not_cman(lib_env) ++ _check_model(model) ++ runner = lib_env.cmd_runner() ++ return ( ++ qdevice_net.qdevice_status_generic_text(runner, verbose) ++ + ++ qdevice_net.qdevice_status_cluster_text(runner, cluster, verbose) ++ ) ++ + def qdevice_enable(lib_env, model): + """ + make qdevice start automatically on boot on local host +@@ -80,6 +99,73 @@ def qdevice_kill(lib_env, model): + _check_model(model) + _service_kill(lib_env, qdevice_net.qdevice_kill) + ++def qdevice_net_sign_certificate_request( ++ lib_env, certificate_request, cluster_name ++): ++ """ ++ Sign node certificate request by qnetd CA ++ string certificate_request base64 encoded certificate request ++ string cluster_name name of the cluster to which qdevice is being added ++ """ ++ _ensure_not_cman(lib_env) ++ try: ++ certificate_request_data = base64.b64decode(certificate_request) ++ except (TypeError, binascii.Error): ++ raise LibraryError(reports.invalid_option_value( ++ "qnetd certificate request", ++ certificate_request, ++ ["base64 encoded certificate"] ++ )) ++ return base64.b64encode( ++ qdevice_net.qdevice_sign_certificate_request( ++ lib_env.cmd_runner(), ++ certificate_request_data, ++ cluster_name ++ ) ++ ) ++ ++def client_net_setup(lib_env, ca_certificate): ++ """ ++ Intialize qdevice net client on local host ++ ca_certificate base64 encoded qnetd CA certificate ++ """ ++ _ensure_not_cman(lib_env) ++ try: ++ ca_certificate_data = base64.b64decode(ca_certificate) ++ except (TypeError, binascii.Error): ++ raise LibraryError(reports.invalid_option_value( ++ "qnetd CA certificate", ++ ca_certificate, ++ ["base64 encoded certificate"] ++ )) ++ qdevice_net.client_setup(lib_env.cmd_runner(), ca_certificate_data) ++ ++def client_net_import_certificate(lib_env, certificate): ++ """ ++ Import qnetd client certificate to local node certificate storage ++ certificate base64 encoded qnetd client certificate ++ """ ++ _ensure_not_cman(lib_env) ++ try: ++ certificate_data = base64.b64decode(certificate) ++ except (TypeError, binascii.Error): ++ raise LibraryError(reports.invalid_option_value( ++ "qnetd client certificate", ++ certificate, ++ ["base64 encoded certificate"] ++ )) ++ qdevice_net.client_import_certificate_and_key( ++ lib_env.cmd_runner(), ++ certificate_data ++ ) ++ ++def client_net_destroy(lib_env): ++ """ ++ delete qdevice client config files on local host ++ """ ++ _ensure_not_cman(lib_env) ++ qdevice_net.client_destroy() ++ + def _ensure_not_cman(lib_env): + if lib_env.is_cman_cluster: + raise LibraryError(reports.cman_unsupported_command()) +diff --git a/pcs/lib/commands/quorum.py b/pcs/lib/commands/quorum.py +index 1ee5411..aa00bbd 100644 +--- a/pcs/lib/commands/quorum.py ++++ b/pcs/lib/commands/quorum.py +@@ -5,9 +5,18 @@ from __future__ import ( + unicode_literals, + ) + +- + from pcs.lib import reports + from pcs.lib.errors import LibraryError ++from pcs.lib.corosync import ( ++ live as corosync_live, ++ qdevice_net, ++ qdevice_client ++) ++from pcs.lib.external import ( ++ NodeCommunicationException, ++ node_communicator_exception_to_report_item, ++ parallel_nodes_communication_helper, ++) + + + def get_config(lib_env): +@@ -42,6 +51,21 @@ def set_options(lib_env, options, skip_offline_nodes=False): + cfg.set_quorum_options(lib_env.report_processor, options) + lib_env.push_corosync_conf(cfg, skip_offline_nodes) + ++def status_text(lib_env): ++ """ ++ Get quorum runtime status in plain text ++ """ ++ __ensure_not_cman(lib_env) ++ return corosync_live.get_quorum_status_text(lib_env.cmd_runner()) ++ ++def status_device_text(lib_env, verbose=False): ++ """ ++ Get quorum device client runtime status in plain text ++ bool verbose get more detailed output ++ """ ++ __ensure_not_cman(lib_env) ++ return qdevice_client.get_status_text(lib_env.cmd_runner(), verbose) ++ + def add_device( + lib_env, model, model_options, generic_options, force_model=False, + force_options=False, skip_offline_nodes=False +@@ -58,6 +82,8 @@ def add_device( + __ensure_not_cman(lib_env) + + cfg = lib_env.get_corosync_conf() ++ # Try adding qdevice to corosync.conf. This validates all the options and ++ # makes sure qdevice is not defined in corosync.conf yet. + cfg.add_quorum_device( + lib_env.report_processor, + model, +@@ -66,9 +92,131 @@ def add_device( + force_model, + force_options + ) +- # TODO validation, verification, certificates, etc. ++ ++ # First setup certificates for qdevice, then send corosync.conf to nodes. ++ # If anything fails, nodes will not have corosync.conf with qdevice in it, ++ # so there is no effect on the cluster. ++ if lib_env.is_corosync_conf_live: ++ # do model specific configuration ++ # if model is not known to pcs and was forced, do not configure antyhing ++ # else but corosync.conf, as we do not know what to do anyways ++ if model == "net": ++ _add_device_model_net( ++ lib_env, ++ # we are sure it's there, it was validated in add_quorum_device ++ model_options["host"], ++ cfg.get_cluster_name(), ++ cfg.get_nodes(), ++ skip_offline_nodes ++ ) ++ ++ lib_env.report_processor.process( ++ reports.service_enable_started("corosync-qdevice") ++ ) ++ communicator = lib_env.node_communicator() ++ parallel_nodes_communication_helper( ++ qdevice_client.remote_client_enable, ++ [ ++ [(lib_env.report_processor, communicator, node), {}] ++ for node in cfg.get_nodes() ++ ], ++ lib_env.report_processor, ++ skip_offline_nodes ++ ) ++ ++ # everything set up, it's safe to tell the nodes to use qdevice + lib_env.push_corosync_conf(cfg, skip_offline_nodes) + ++ # Now, when corosync.conf has been reloaded, we can start qdevice service. ++ if lib_env.is_corosync_conf_live: ++ lib_env.report_processor.process( ++ reports.service_start_started("corosync-qdevice") ++ ) ++ communicator = lib_env.node_communicator() ++ parallel_nodes_communication_helper( ++ qdevice_client.remote_client_start, ++ [ ++ [(lib_env.report_processor, communicator, node), {}] ++ for node in cfg.get_nodes() ++ ], ++ lib_env.report_processor, ++ skip_offline_nodes ++ ) ++ ++def _add_device_model_net( ++ lib_env, qnetd_host, cluster_name, cluster_nodes, skip_offline_nodes ++): ++ """ ++ setup cluster nodes for using qdevice model net ++ string qnetd_host address of qdevice provider (qnetd host) ++ string cluster_name name of the cluster to which qdevice is being added ++ NodeAddressesList cluster_nodes list of cluster nodes addresses ++ bool skip_offline_nodes continue even if not all nodes are accessible ++ """ ++ communicator = lib_env.node_communicator() ++ runner = lib_env.cmd_runner() ++ reporter = lib_env.report_processor ++ ++ reporter.process( ++ reports.qdevice_certificate_distribution_started() ++ ) ++ # get qnetd CA certificate ++ try: ++ qnetd_ca_cert = qdevice_net.remote_qdevice_get_ca_certificate( ++ communicator, ++ qnetd_host ++ ) ++ except NodeCommunicationException as e: ++ raise LibraryError( ++ node_communicator_exception_to_report_item(e) ++ ) ++ # init certificate storage on all nodes ++ parallel_nodes_communication_helper( ++ qdevice_net.remote_client_setup, ++ [ ++ ((communicator, node, qnetd_ca_cert), {}) ++ for node in cluster_nodes ++ ], ++ reporter, ++ skip_offline_nodes ++ ) ++ # create client certificate request ++ cert_request = qdevice_net.client_generate_certificate_request( ++ runner, ++ cluster_name ++ ) ++ # sign the request on qnetd host ++ try: ++ signed_certificate = qdevice_net.remote_sign_certificate_request( ++ communicator, ++ qnetd_host, ++ cert_request, ++ cluster_name ++ ) ++ except NodeCommunicationException as e: ++ raise LibraryError( ++ node_communicator_exception_to_report_item(e) ++ ) ++ # transform the signed certificate to pk12 format which can sent to nodes ++ pk12 = qdevice_net.client_cert_request_to_pk12(runner, signed_certificate) ++ # distribute final certificate to nodes ++ def do_and_report(reporter, communicator, node, pk12): ++ qdevice_net.remote_client_import_certificate_and_key( ++ communicator, node, pk12 ++ ) ++ reporter.process( ++ reports.qdevice_certificate_accepted_by_node(node.label) ++ ) ++ parallel_nodes_communication_helper( ++ do_and_report, ++ [ ++ ((reporter, communicator, node, pk12), {}) ++ for node in cluster_nodes ++ ], ++ reporter, ++ skip_offline_nodes ++ ) ++ + def update_device( + lib_env, model_options, generic_options, force_options=False, + skip_offline_nodes=False +@@ -98,9 +246,74 @@ def remove_device(lib_env, skip_offline_nodes=False): + __ensure_not_cman(lib_env) + + cfg = lib_env.get_corosync_conf() ++ model, dummy_options, dummy_options = cfg.get_quorum_device_settings() + cfg.remove_quorum_device() + lib_env.push_corosync_conf(cfg, skip_offline_nodes) + ++ if lib_env.is_corosync_conf_live: ++ # disable qdevice ++ lib_env.report_processor.process( ++ reports.service_disable_started("corosync-qdevice") ++ ) ++ communicator = lib_env.node_communicator() ++ parallel_nodes_communication_helper( ++ qdevice_client.remote_client_disable, ++ [ ++ [(lib_env.report_processor, communicator, node), {}] ++ for node in cfg.get_nodes() ++ ], ++ lib_env.report_processor, ++ skip_offline_nodes ++ ) ++ # stop qdevice ++ lib_env.report_processor.process( ++ reports.service_stop_started("corosync-qdevice") ++ ) ++ communicator = lib_env.node_communicator() ++ parallel_nodes_communication_helper( ++ qdevice_client.remote_client_stop, ++ [ ++ [(lib_env.report_processor, communicator, node), {}] ++ for node in cfg.get_nodes() ++ ], ++ lib_env.report_processor, ++ skip_offline_nodes ++ ) ++ # handle model specific configuration ++ if model == "net": ++ _remove_device_model_net( ++ lib_env, ++ cfg.get_nodes(), ++ skip_offline_nodes ++ ) ++ ++def _remove_device_model_net(lib_env, cluster_nodes, skip_offline_nodes): ++ """ ++ remove configuration used by qdevice model net ++ NodeAddressesList cluster_nodes list of cluster nodes addresses ++ bool skip_offline_nodes continue even if not all nodes are accessible ++ """ ++ reporter = lib_env.report_processor ++ communicator = lib_env.node_communicator() ++ ++ reporter.process( ++ reports.qdevice_certificate_removal_started() ++ ) ++ def do_and_report(reporter, communicator, node): ++ qdevice_net.remote_client_destroy(communicator, node) ++ reporter.process( ++ reports.qdevice_certificate_removed_from_node(node.label) ++ ) ++ parallel_nodes_communication_helper( ++ do_and_report, ++ [ ++ [(reporter, communicator, node), {}] ++ for node in cluster_nodes ++ ], ++ lib_env.report_processor, ++ skip_offline_nodes ++ ) ++ + def __ensure_not_cman(lib_env): + if lib_env.is_corosync_conf_live and lib_env.is_cman_cluster: + raise LibraryError(reports.cman_unsupported_command()) +diff --git a/pcs/lib/corosync/config_facade.py b/pcs/lib/corosync/config_facade.py +index 5a486ca..600a89b 100644 +--- a/pcs/lib/corosync/config_facade.py ++++ b/pcs/lib/corosync/config_facade.py +@@ -22,6 +22,12 @@ class ConfigFacade(object): + "last_man_standing_window", + "wait_for_all", + ) ++ QUORUM_OPTIONS_INCOMPATIBLE_WITH_QDEVICE = ( ++ "auto_tie_breaker", ++ "last_man_standing", ++ "last_man_standing_window", ++ ) ++ + + @classmethod + def from_string(cls, config_string): +@@ -52,6 +58,8 @@ class ConfigFacade(object): + self._config = parsed_config + # set to True if changes cannot be applied on running cluster + self._need_stopped_cluster = False ++ # set to True if qdevice reload is required to apply changes ++ self._need_qdevice_reload = False + + @property + def config(self): +@@ -61,6 +69,17 @@ class ConfigFacade(object): + def need_stopped_cluster(self): + return self._need_stopped_cluster + ++ @property ++ def need_qdevice_reload(self): ++ return self._need_qdevice_reload ++ ++ def get_cluster_name(self): ++ cluster_name = "" ++ for totem in self.config.get_sections("totem"): ++ for attrs in totem.get_attributes("cluster_name"): ++ cluster_name = attrs[1] ++ return cluster_name ++ + def get_nodes(self): + """ + Get all defined nodes +@@ -112,8 +131,9 @@ class ConfigFacade(object): + + def __validate_quorum_options(self, options): + report_items = [] ++ has_qdevice = self.has_quorum_device() ++ qdevice_incompatible_options = [] + for name, value in sorted(options.items()): +- + allowed_names = self.__class__.QUORUM_OPTIONS + if name not in allowed_names: + report_items.append( +@@ -124,6 +144,13 @@ class ConfigFacade(object): + if value == "": + continue + ++ if ( ++ has_qdevice ++ and ++ name in self.__class__.QUORUM_OPTIONS_INCOMPATIBLE_WITH_QDEVICE ++ ): ++ qdevice_incompatible_options.append(name) ++ + if name == "last_man_standing_window": + if not value.isdigit(): + report_items.append(reports.invalid_option_value( +@@ -137,6 +164,13 @@ class ConfigFacade(object): + name, value, allowed_values + )) + ++ if qdevice_incompatible_options: ++ report_items.append( ++ reports.corosync_options_incompatible_with_qdevice( ++ qdevice_incompatible_options ++ ) ++ ) ++ + return report_items + + def has_quorum_device(self): +@@ -201,13 +235,13 @@ class ConfigFacade(object): + force=force_options + ) + ) ++ + # configuration cleanup +- remove_need_stopped_cluster = { +- "auto_tie_breaker": "", +- "last_man_standing": "", +- "last_man_standing_window": "", +- } +- need_stopped_cluster = False ++ remove_need_stopped_cluster = dict([ ++ (name, "") ++ for name in self.__class__.QUORUM_OPTIONS_INCOMPATIBLE_WITH_QDEVICE ++ ]) ++ # remove old device settings + quorum_section_list = self.__ensure_section(self.config, "quorum") + for quorum in quorum_section_list: + for device in quorum.get_sections("device"): +@@ -218,13 +252,19 @@ class ConfigFacade(object): + and + value not in ["", "0"] + ): +- need_stopped_cluster = True ++ self._need_stopped_cluster = True ++ # remove conflicting quorum options + attrs_to_remove = { + "allow_downscale": "", + "two_node": "", + } + attrs_to_remove.update(remove_need_stopped_cluster) + self.__set_section_options(quorum_section_list, attrs_to_remove) ++ # remove nodes' votes ++ for nodelist in self.config.get_sections("nodelist"): ++ for node in nodelist.get_sections("node"): ++ node.del_attributes_by_name("quorum_votes") ++ + # add new configuration + quorum = quorum_section_list[-1] + new_device = config_parser.Section("device") +@@ -234,12 +274,9 @@ class ConfigFacade(object): + new_model = config_parser.Section(model) + self.__set_section_options([new_model], model_options) + new_device.add_section(new_model) ++ self.__update_qdevice_votes() + self.__update_two_node() + self.__remove_empty_sections(self.config) +- # update_two_node sets self._need_stopped_cluster when changing an +- # algorithm lms <-> 2nodelms. We don't care about that, it's not really +- # a change, as there was no qdevice before. So we override it. +- self._need_stopped_cluster = need_stopped_cluster + + def update_quorum_device( + self, report_processor, model_options, generic_options, +@@ -281,9 +318,10 @@ class ConfigFacade(object): + model_sections.extend(device.get_sections(model)) + self.__set_section_options(device_sections, generic_options) + self.__set_section_options(model_sections, model_options) ++ self.__update_qdevice_votes() + self.__update_two_node() + self.__remove_empty_sections(self.config) +- self._need_stopped_cluster = True ++ self._need_qdevice_reload = True + + def remove_quorum_device(self): + """ +@@ -369,7 +407,7 @@ class ConfigFacade(object): + continue + + if name == "algorithm": +- allowed_values = ("2nodelms", "ffsplit", "lms") ++ allowed_values = ("ffsplit", "lms") + if value not in allowed_values: + report_items.append(reports.invalid_option_value( + name, value, allowed_values, severity, forceable +@@ -461,19 +499,29 @@ class ConfigFacade(object): + else: + for quorum in self.config.get_sections("quorum"): + quorum.del_attributes_by_name("two_node") +- # update qdevice algorithm "lms" vs "2nodelms" ++ ++ def __update_qdevice_votes(self): ++ # ffsplit won't start if votes is missing or not set to 1 ++ # for other algorithms it's required not to put votes at all ++ model = None ++ algorithm = None ++ device_sections = [] + for quorum in self.config.get_sections("quorum"): + for device in quorum.get_sections("device"): +- for net in device.get_sections("net"): +- algorithm = None +- for dummy_name, value in net.get_attributes("algorithm"): +- algorithm = value +- if algorithm == "lms" and has_two_nodes: +- net.set_attribute("algorithm", "2nodelms") +- self._need_stopped_cluster = True +- elif algorithm == "2nodelms" and not has_two_nodes: +- net.set_attribute("algorithm", "lms") +- self._need_stopped_cluster = True ++ device_sections.append(device) ++ for dummy_name, value in device.get_attributes("model"): ++ model = value ++ for device in device_sections: ++ for model_section in device.get_sections(model): ++ for dummy_name, value in model_section.get_attributes( ++ "algorithm" ++ ): ++ algorithm = value ++ if model == "net": ++ if algorithm == "ffsplit": ++ self.__set_section_options(device_sections, {"votes": "1"}) ++ else: ++ self.__set_section_options(device_sections, {"votes": ""}) + + def __set_section_options(self, section_list, options): + for section in section_list[:-1]: +diff --git a/pcs/lib/corosync/live.py b/pcs/lib/corosync/live.py +index 2446a46..4129aeb 100644 +--- a/pcs/lib/corosync/live.py ++++ b/pcs/lib/corosync/live.py +@@ -47,3 +47,18 @@ def reload_config(runner): + reports.corosync_config_reload_error(output.rstrip()) + ) + ++def get_quorum_status_text(runner): ++ """ ++ Get runtime quorum status from the local node ++ """ ++ output, retval = runner.run([ ++ os.path.join(settings.corosync_binaries, "corosync-quorumtool"), ++ "-p" ++ ]) ++ # retval is 0 on success if node is not in partition with quorum ++ # retval is 1 on error OR on success if node has quorum ++ if retval not in [0, 1]: ++ raise LibraryError( ++ reports.corosync_quorum_get_status_error(output) ++ ) ++ return output +diff --git a/pcs/lib/corosync/qdevice_client.py b/pcs/lib/corosync/qdevice_client.py +new file mode 100644 +index 0000000..98fbb0e +--- /dev/null ++++ b/pcs/lib/corosync/qdevice_client.py +@@ -0,0 +1,93 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import os.path ++ ++from pcs import settings ++from pcs.lib import reports ++from pcs.lib.errors import LibraryError ++ ++ ++def get_status_text(runner, verbose=False): ++ """ ++ Get quorum device client runtime status in plain text ++ bool verbose get more detailed output ++ """ ++ cmd = [ ++ os.path.join(settings.corosync_binaries, "corosync-qdevice-tool"), ++ "-s" ++ ] ++ if verbose: ++ cmd.append("-v") ++ output, retval = runner.run(cmd) ++ if retval != 0: ++ raise LibraryError( ++ reports.corosync_quorum_get_status_error(output) ++ ) ++ return output ++ ++def remote_client_enable(reporter, node_communicator, node): ++ """ ++ enable qdevice client service (corosync-qdevice) on a remote node ++ """ ++ response = node_communicator.call_node( ++ node, ++ "remote/qdevice_client_enable", ++ None ++ ) ++ if response == "corosync is not enabled, skipping": ++ reporter.process( ++ reports.service_enable_skipped( ++ "corosync-qdevice", ++ "corosync is not enabled", ++ node.label ++ ) ++ ) ++ else: ++ reporter.process( ++ reports.service_enable_success("corosync-qdevice", node.label) ++ ) ++ ++def remote_client_disable(reporter, node_communicator, node): ++ """ ++ disable qdevice client service (corosync-qdevice) on a remote node ++ """ ++ node_communicator.call_node(node, "remote/qdevice_client_disable", None) ++ reporter.process( ++ reports.service_disable_success("corosync-qdevice", node.label) ++ ) ++ ++def remote_client_start(reporter, node_communicator, node): ++ """ ++ start qdevice client service (corosync-qdevice) on a remote node ++ """ ++ response = node_communicator.call_node( ++ node, ++ "remote/qdevice_client_start", ++ None ++ ) ++ if response == "corosync is not running, skipping": ++ reporter.process( ++ reports.service_start_skipped( ++ "corosync-qdevice", ++ "corosync is not running", ++ node.label ++ ) ++ ) ++ else: ++ reporter.process( ++ reports.service_start_success("corosync-qdevice", node.label) ++ ) ++ ++def remote_client_stop(reporter, node_communicator, node): ++ """ ++ stop qdevice client service (corosync-qdevice) on a remote node ++ """ ++ node_communicator.call_node(node, "remote/qdevice_client_stop", None) ++ reporter.process( ++ reports.service_stop_success("corosync-qdevice", node.label) ++ ) +diff --git a/pcs/lib/corosync/qdevice_net.py b/pcs/lib/corosync/qdevice_net.py +index 7479257..4054592 100644 +--- a/pcs/lib/corosync/qdevice_net.py ++++ b/pcs/lib/corosync/qdevice_net.py +@@ -5,8 +5,14 @@ from __future__ import ( + unicode_literals, + ) + ++import base64 ++import binascii ++import functools ++import os + import os.path ++import re + import shutil ++import tempfile + + from pcs import settings + from pcs.lib import external, reports +@@ -15,6 +21,18 @@ from pcs.lib.errors import LibraryError + + __model = "net" + __service_name = "corosync-qnetd" ++__qnetd_certutil = os.path.join( ++ settings.corosync_qnet_binaries, ++ "corosync-qnetd-certutil" ++) ++__qnetd_tool = os.path.join( ++ settings.corosync_qnet_binaries, ++ "corosync-qnetd-tool" ++) ++__qdevice_certutil = os.path.join( ++ settings.corosync_binaries, ++ "corosync-qdevice-net-certutil" ++) + + def qdevice_setup(runner): + """ +@@ -24,25 +42,63 @@ def qdevice_setup(runner): + raise LibraryError(reports.qdevice_already_initialized(__model)) + + output, retval = runner.run([ +- os.path.join(settings.corosync_binaries, "corosync-qnetd-certutil"), +- "-i" ++ __qnetd_certutil, "-i" + ]) + if retval != 0: + raise LibraryError( + reports.qdevice_initialization_error(__model, output.rstrip()) + ) + ++def qdevice_initialized(): ++ """ ++ check if qdevice server certificate database has been initialized ++ """ ++ return os.path.exists(os.path.join( ++ settings.corosync_qdevice_net_server_certs_dir, ++ "cert8.db" ++ )) ++ + def qdevice_destroy(): + """ + delete qdevice configuration on local host + """ + try: +- shutil.rmtree(settings.corosync_qdevice_net_server_certs_dir) ++ if qdevice_initialized(): ++ shutil.rmtree(settings.corosync_qdevice_net_server_certs_dir) + except EnvironmentError as e: + raise LibraryError( + reports.qdevice_destroy_error(__model, e.strerror) + ) + ++def qdevice_status_generic_text(runner, verbose=False): ++ """ ++ get qdevice runtime status in plain text ++ bool verbose get more detailed output ++ """ ++ cmd = [__qnetd_tool, "-s"] ++ if verbose: ++ cmd.append("-v") ++ output, retval = runner.run(cmd) ++ if retval != 0: ++ raise LibraryError(reports.qdevice_get_status_error(__model, output)) ++ return output ++ ++def qdevice_status_cluster_text(runner, cluster=None, verbose=False): ++ """ ++ get qdevice runtime status in plain text ++ bool verbose get more detailed output ++ string cluster show information only about specified cluster ++ """ ++ cmd = [__qnetd_tool, "-l"] ++ if verbose: ++ cmd.append("-v") ++ if cluster: ++ cmd.extend(["-c", cluster]) ++ output, retval = runner.run(cmd) ++ if retval != 0: ++ raise LibraryError(reports.qdevice_get_status_error(__model, output)) ++ return output ++ + def qdevice_enable(runner): + """ + make qdevice start automatically on boot on local host +@@ -72,3 +128,255 @@ def qdevice_kill(runner): + kill qdevice now on local host + """ + external.kill_services(runner, [__service_name]) ++ ++def qdevice_sign_certificate_request(runner, cert_request, cluster_name): ++ """ ++ sign client certificate request ++ cert_request certificate request data ++ string cluster_name name of the cluster to which qdevice is being added ++ """ ++ if not qdevice_initialized(): ++ raise LibraryError(reports.qdevice_not_initialized(__model)) ++ # save the certificate request, corosync tool only works with files ++ tmpfile = _store_to_tmpfile( ++ cert_request, ++ reports.qdevice_certificate_sign_error ++ ) ++ # sign the request ++ output, retval = runner.run([ ++ __qnetd_certutil, "-s", "-c", tmpfile.name, "-n", cluster_name ++ ]) ++ tmpfile.close() # temp file is deleted on close ++ if retval != 0: ++ raise LibraryError( ++ reports.qdevice_certificate_sign_error(output.strip()) ++ ) ++ # get signed certificate, corosync tool only works with files ++ return _get_output_certificate( ++ output, ++ reports.qdevice_certificate_sign_error ++ ) ++ ++def client_setup(runner, ca_certificate): ++ """ ++ initialize qdevice client on local host ++ ca_certificate qnetd CA certificate ++ """ ++ client_destroy() ++ # save CA certificate, corosync tool only works with files ++ ca_file_path = os.path.join( ++ settings.corosync_qdevice_net_client_certs_dir, ++ settings.corosync_qdevice_net_client_ca_file_name ++ ) ++ try: ++ if not os.path.exists(ca_file_path): ++ os.makedirs( ++ settings.corosync_qdevice_net_client_certs_dir, ++ mode=0o700 ++ ) ++ with open(ca_file_path, "wb") as ca_file: ++ ca_file.write(ca_certificate) ++ except EnvironmentError as e: ++ raise LibraryError( ++ reports.qdevice_initialization_error(__model, e.strerror) ++ ) ++ # initialize client's certificate storage ++ output, retval = runner.run([ ++ __qdevice_certutil, "-i", "-c", ca_file_path ++ ]) ++ if retval != 0: ++ raise LibraryError( ++ reports.qdevice_initialization_error(__model, output.rstrip()) ++ ) ++ ++def client_initialized(): ++ """ ++ check if qdevice net client certificate database has been initialized ++ """ ++ return os.path.exists(os.path.join( ++ settings.corosync_qdevice_net_client_certs_dir, ++ "cert8.db" ++ )) ++ ++def client_destroy(): ++ """ ++ delete qdevice client config files on local host ++ """ ++ try: ++ if client_initialized(): ++ shutil.rmtree(settings.corosync_qdevice_net_client_certs_dir) ++ except EnvironmentError as e: ++ raise LibraryError( ++ reports.qdevice_destroy_error(__model, e.strerror) ++ ) ++ ++def client_generate_certificate_request(runner, cluster_name): ++ """ ++ create a certificate request which can be signed by qnetd server ++ string cluster_name name of the cluster to which qdevice is being added ++ """ ++ if not client_initialized(): ++ raise LibraryError(reports.qdevice_not_initialized(__model)) ++ output, retval = runner.run([ ++ __qdevice_certutil, "-r", "-n", cluster_name ++ ]) ++ if retval != 0: ++ raise LibraryError( ++ reports.qdevice_initialization_error(__model, output.rstrip()) ++ ) ++ return _get_output_certificate( ++ output, ++ functools.partial(reports.qdevice_initialization_error, __model) ++ ) ++ ++def client_cert_request_to_pk12(runner, cert_request): ++ """ ++ transform signed certificate request to pk12 certificate which can be ++ imported to nodes ++ cert_request signed certificate request ++ """ ++ if not client_initialized(): ++ raise LibraryError(reports.qdevice_not_initialized(__model)) ++ # save the signed certificate request, corosync tool only works with files ++ tmpfile = _store_to_tmpfile( ++ cert_request, ++ reports.qdevice_certificate_import_error ++ ) ++ # transform it ++ output, retval = runner.run([ ++ __qdevice_certutil, "-M", "-c", tmpfile.name ++ ]) ++ tmpfile.close() # temp file is deleted on close ++ if retval != 0: ++ raise LibraryError( ++ reports.qdevice_certificate_import_error(output) ++ ) ++ # get resulting pk12, corosync tool only works with files ++ return _get_output_certificate( ++ output, ++ reports.qdevice_certificate_import_error ++ ) ++ ++def client_import_certificate_and_key(runner, pk12_certificate): ++ """ ++ import qdevice client certificate to the local node certificate storage ++ """ ++ if not client_initialized(): ++ raise LibraryError(reports.qdevice_not_initialized(__model)) ++ # save the certificate, corosync tool only works with files ++ tmpfile = _store_to_tmpfile( ++ pk12_certificate, ++ reports.qdevice_certificate_import_error ++ ) ++ output, retval = runner.run([ ++ __qdevice_certutil, "-m", "-c", tmpfile.name ++ ]) ++ tmpfile.close() # temp file is deleted on close ++ if retval != 0: ++ raise LibraryError( ++ reports.qdevice_certificate_import_error(output) ++ ) ++ ++def remote_qdevice_get_ca_certificate(node_communicator, host): ++ """ ++ connect to a qnetd host and get qnetd CA certificate ++ string host address of the qnetd host ++ """ ++ try: ++ return base64.b64decode( ++ node_communicator.call_host( ++ host, ++ "remote/qdevice_net_get_ca_certificate", ++ None ++ ) ++ ) ++ except (TypeError, binascii.Error): ++ raise LibraryError(reports.invalid_response_format(host)) ++ ++def remote_client_setup(node_communicator, node, qnetd_ca_certificate): ++ """ ++ connect to a remote node and initialize qdevice there ++ NodeAddresses node target node ++ qnetd_ca_certificate qnetd CA certificate ++ """ ++ return node_communicator.call_node( ++ node, ++ "remote/qdevice_net_client_init_certificate_storage", ++ external.NodeCommunicator.format_data_dict([ ++ ("ca_certificate", base64.b64encode(qnetd_ca_certificate)), ++ ]) ++ ) ++ ++def remote_sign_certificate_request( ++ node_communicator, host, cert_request, cluster_name ++): ++ """ ++ connect to a qdevice host and sign node certificate there ++ string host address of the qnetd host ++ cert_request certificate request to be signed ++ string cluster_name name of the cluster to which qdevice is being added ++ """ ++ try: ++ return base64.b64decode( ++ node_communicator.call_host( ++ host, ++ "remote/qdevice_net_sign_node_certificate", ++ external.NodeCommunicator.format_data_dict([ ++ ("certificate_request", base64.b64encode(cert_request)), ++ ("cluster_name", cluster_name), ++ ]) ++ ) ++ ) ++ except (TypeError, binascii.Error): ++ raise LibraryError(reports.invalid_response_format(host)) ++ ++def remote_client_import_certificate_and_key(node_communicator, node, pk12): ++ """ ++ import pk12 certificate on a remote node ++ NodeAddresses node target node ++ pk12 certificate ++ """ ++ return node_communicator.call_node( ++ node, ++ "remote/qdevice_net_client_import_certificate", ++ external.NodeCommunicator.format_data_dict([ ++ ("certificate", base64.b64encode(pk12)), ++ ]) ++ ) ++ ++def remote_client_destroy(node_communicator, node): ++ """ ++ delete qdevice client config files on a remote node ++ NodeAddresses node target node ++ """ ++ return node_communicator.call_node( ++ node, ++ "remote/qdevice_net_client_destroy", ++ None ++ ) ++ ++def _store_to_tmpfile(data, report_func): ++ try: ++ tmpfile = tempfile.NamedTemporaryFile(mode="wb", suffix=".pcs") ++ tmpfile.write(data) ++ tmpfile.flush() ++ return tmpfile ++ except EnvironmentError as e: ++ raise LibraryError(report_func(e.strerror)) ++ ++def _get_output_certificate(cert_tool_output, report_func): ++ regexp = re.compile(r"^Certificate( request)? stored in (?P<path>.+)$") ++ filename = None ++ for line in cert_tool_output.splitlines(): ++ match = regexp.search(line) ++ if match: ++ filename = match.group("path") ++ if not filename: ++ raise LibraryError(report_func(cert_tool_output)) ++ try: ++ with open(filename, "rb") as cert_file: ++ return cert_file.read() ++ except EnvironmentError as e: ++ raise LibraryError(report_func( ++ "{path}: {error}".format(path=filename, error=e.strerror) ++ )) +diff --git a/pcs/lib/env.py b/pcs/lib/env.py +index 1151891..24e4252 100644 +--- a/pcs/lib/env.py ++++ b/pcs/lib/env.py +@@ -10,6 +10,7 @@ from lxml import etree + from pcs.lib import reports + from pcs.lib.external import ( + is_cman_cluster, ++ is_service_running, + CommandRunner, + NodeCommunicator, + ) +@@ -21,6 +22,7 @@ from pcs.lib.corosync.live import ( + from pcs.lib.nodes_task import ( + distribute_corosync_conf, + check_corosync_offline_on_nodes, ++ qdevice_reload_on_nodes, + ) + from pcs.lib.pacemaker import ( + get_cib, +@@ -152,11 +154,18 @@ class LibraryEnvironment(object): + corosync_conf_data, + skip_offline_nodes + ) +- if not corosync_conf_facade.need_stopped_cluster: ++ if is_service_running(self.cmd_runner(), "corosync"): + reload_corosync_config(self.cmd_runner()) + self.report_processor.process( + reports.corosync_config_reloaded() + ) ++ if corosync_conf_facade.need_qdevice_reload: ++ qdevice_reload_on_nodes( ++ self.node_communicator(), ++ self.report_processor, ++ node_list, ++ skip_offline_nodes ++ ) + else: + self._corosync_conf_data = corosync_conf_data + +diff --git a/pcs/lib/errors.py b/pcs/lib/errors.py +index c0bd3d1..9cab5e9 100644 +--- a/pcs/lib/errors.py ++++ b/pcs/lib/errors.py +@@ -42,4 +42,8 @@ class ReportItem(object): + self.message = self.message_pattern.format(**self.info) + + def __repr__(self): +- return self.code+": "+str(self.info) ++ return "{severity} {code}: {info}".format( ++ severity=self.severity, ++ code=self.code, ++ info=self.info ++ ) +diff --git a/pcs/lib/external.py b/pcs/lib/external.py +index 34426f9..c773e5a 100644 +--- a/pcs/lib/external.py ++++ b/pcs/lib/external.py +@@ -49,7 +49,11 @@ except ImportError: + + from pcs.lib import reports + from pcs.lib.errors import LibraryError, ReportItemSeverity +-from pcs.common.tools import simple_cache ++from pcs.common import report_codes ++from pcs.common.tools import ( ++ simple_cache, ++ run_parallel as tools_run_parallel, ++) + from pcs import settings + + +@@ -521,7 +525,7 @@ class NodeCommunicator(object): + # text in response body with HTTP code 400 + # we need to be backward compatible with that + raise NodeCommandUnsuccessfulException( +- host, request, response_data ++ host, request, response_data.rstrip() + ) + elif e.code == 401: + raise NodeAuthenticationException( +@@ -581,3 +585,39 @@ class NodeCommunicator(object): + base64.b64encode(" ".join(self._groups).encode("utf-8")) + )) + return cookies ++ ++ ++def parallel_nodes_communication_helper( ++ func, func_args_kwargs, reporter, skip_offline_nodes=False ++): ++ """ ++ Help running node calls in parallel and handle communication exceptions. ++ Raise LibraryError on any failure. ++ ++ function func function to be run, should be a function calling a node ++ iterable func_args_kwargs list of tuples: (*args, **kwargs) ++ bool skip_offline_nodes do not raise LibraryError if a node is unreachable ++ """ ++ failure_severity = ReportItemSeverity.ERROR ++ failure_forceable = report_codes.SKIP_OFFLINE_NODES ++ if skip_offline_nodes: ++ failure_severity = ReportItemSeverity.WARNING ++ failure_forceable = None ++ report_items = [] ++ ++ def _parallel(*args, **kwargs): ++ try: ++ func(*args, **kwargs) ++ except NodeCommunicationException as e: ++ report_items.append( ++ node_communicator_exception_to_report_item( ++ e, ++ failure_severity, ++ failure_forceable ++ ) ++ ) ++ except LibraryError as e: ++ report_items.extend(e.args) ++ ++ tools_run_parallel(_parallel, func_args_kwargs) ++ reporter.process_list(report_items) +diff --git a/pcs/lib/nodes_task.py b/pcs/lib/nodes_task.py +index b9a61f6..e94d327 100644 +--- a/pcs/lib/nodes_task.py ++++ b/pcs/lib/nodes_task.py +@@ -8,14 +8,19 @@ from __future__ import ( + import json + + from pcs.common import report_codes ++from pcs.common.tools import run_parallel as tools_run_parallel + from pcs.lib import reports +-from pcs.lib.errors import ReportItemSeverity ++from pcs.lib.errors import LibraryError, ReportItemSeverity + from pcs.lib.external import ( + NodeCommunicator, + NodeCommunicationException, + node_communicator_exception_to_report_item, ++ parallel_nodes_communication_helper, ++) ++from pcs.lib.corosync import ( ++ live as corosync_live, ++ qdevice_client, + ) +-from pcs.lib.corosync import live as corosync_live + + + def distribute_corosync_conf( +@@ -33,11 +38,9 @@ def distribute_corosync_conf( + if skip_offline_nodes: + failure_severity = ReportItemSeverity.WARNING + failure_forceable = None +- +- reporter.process(reports.corosync_config_distribution_started()) + report_items = [] +- # TODO use parallel communication +- for node in node_addr_list: ++ ++ def _parallel(node): + try: + corosync_live.set_remote_corosync_conf( + node_communicator, +@@ -62,6 +65,12 @@ def distribute_corosync_conf( + failure_forceable + ) + ) ++ ++ reporter.process(reports.corosync_config_distribution_started()) ++ tools_run_parallel( ++ _parallel, ++ [((node, ), {}) for node in node_addr_list] ++ ) + reporter.process_list(report_items) + + def check_corosync_offline_on_nodes( +@@ -77,13 +86,11 @@ def check_corosync_offline_on_nodes( + if skip_offline_nodes: + failure_severity = ReportItemSeverity.WARNING + failure_forceable = None +- +- reporter.process(reports.corosync_not_running_check_started()) + report_items = [] +- # TODO use parallel communication +- for node in node_addr_list: ++ ++ def _parallel(node): + try: +- status = node_communicator.call_node(node, "remote/status", "") ++ status = node_communicator.call_node(node, "remote/status", None) + if not json.loads(status)["corosync"]: + reporter.process( + reports.corosync_not_running_on_node_ok(node.label) +@@ -115,8 +122,48 @@ def check_corosync_offline_on_nodes( + failure_forceable + ) + ) ++ ++ reporter.process(reports.corosync_not_running_check_started()) ++ tools_run_parallel( ++ _parallel, ++ [((node, ), {}) for node in node_addr_list] ++ ) + reporter.process_list(report_items) + ++def qdevice_reload_on_nodes( ++ node_communicator, reporter, node_addr_list, skip_offline_nodes=False ++): ++ """ ++ Reload corosync-qdevice configuration on cluster nodes ++ NodeAddressesList node_addr_list nodes to reload config on ++ bool skip_offline_nodes don't raise an error on node communication errors ++ """ ++ reporter.process(reports.qdevice_client_reload_started()) ++ parallel_params = [ ++ [(reporter, node_communicator, node), {}] ++ for node in node_addr_list ++ ] ++ # catch an exception so we try to start qdevice on nodes where we stopped it ++ report_items = [] ++ try: ++ parallel_nodes_communication_helper( ++ qdevice_client.remote_client_stop, ++ parallel_params, ++ reporter, ++ skip_offline_nodes ++ ) ++ except LibraryError as e: ++ report_items.extend(e.args) ++ try: ++ parallel_nodes_communication_helper( ++ qdevice_client.remote_client_start, ++ parallel_params, ++ reporter, ++ skip_offline_nodes ++ ) ++ except LibraryError as e: ++ report_items.extend(e.args) ++ reporter.process_list(report_items) + + def node_check_auth(communicator, node): + """ +diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py +index 490b4ff..d8f88cd 100644 +--- a/pcs/lib/reports.py ++++ b/pcs/lib/reports.py +@@ -552,6 +552,19 @@ def corosync_running_on_node_fail(node): + info={"node": node} + ) + ++def corosync_quorum_get_status_error(reason): ++ """ ++ unable to get runtime status of quorum on local node ++ string reason an error message ++ """ ++ return ReportItem.error( ++ report_codes.COROSYNC_QUORUM_GET_STATUS_ERROR, ++ "Unable to get quorum status: {reason}", ++ info={ ++ "reason": reason, ++ } ++ ) ++ + def corosync_config_reloaded(): + """ + corosync configuration has been reloaded +@@ -614,6 +627,21 @@ def corosync_config_parser_other_error(): + "Unable to parse corosync config" + ) + ++def corosync_options_incompatible_with_qdevice(options): ++ """ ++ cannot set specified corosync options when qdevice is in use ++ iterable options incompatible options names ++ """ ++ return ReportItem.error( ++ report_codes.COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE, ++ "These options cannot be set when the cluster uses a quorum device: " ++ + "{options_names_str}", ++ info={ ++ "options_names": options, ++ "options_names_str": ", ".join(sorted(options)), ++ } ++ ) ++ + def qdevice_already_defined(): + """ + qdevice is already set up in a cluster, when it was expected not to be +@@ -641,6 +669,15 @@ def qdevice_remove_or_cluster_stop_needed(): + "You need to stop the cluster or remove qdevice from cluster to continue" + ) + ++def qdevice_client_reload_started(): ++ """ ++ qdevice client configuration is about to be reloaded on nodes ++ """ ++ return ReportItem.info( ++ report_codes.QDEVICE_CLIENT_RELOAD_STARTED, ++ "Reloading qdevice configuration on nodes..." ++ ) ++ + def qdevice_already_initialized(model): + """ + cannot create qdevice on local host, it has been already created +@@ -654,6 +691,19 @@ def qdevice_already_initialized(model): + } + ) + ++def qdevice_not_initialized(model): ++ """ ++ cannot work with qdevice on local host, it has not been created yet ++ string model qdevice model ++ """ ++ return ReportItem.error( ++ report_codes.QDEVICE_NOT_INITIALIZED, ++ "Quorum device '{model}' has not been initialized yet", ++ info={ ++ "model": model, ++ } ++ ) ++ + def qdevice_initialization_success(model): + """ + qdevice was successfully initialized on local host +@@ -682,6 +732,72 @@ def qdevice_initialization_error(model, reason): + } + ) + ++def qdevice_certificate_distribution_started(): ++ """ ++ Qdevice certificates are about to be set up on nodes ++ """ ++ return ReportItem.info( ++ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED, ++ "Setting up qdevice certificates on nodes..." ++ ) ++ ++def qdevice_certificate_accepted_by_node(node): ++ """ ++ Qdevice certificates have been saved to a node ++ string node node on which certificates have been saved ++ """ ++ return ReportItem.info( ++ report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE, ++ "{node}: Succeeded", ++ info={"node": node} ++ ) ++ ++def qdevice_certificate_removal_started(): ++ """ ++ Qdevice certificates are about to be removed from nodes ++ """ ++ return ReportItem.info( ++ report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED, ++ "Removing qdevice certificates from nodes..." ++ ) ++ ++def qdevice_certificate_removed_from_node(node): ++ """ ++ Qdevice certificates have been removed from a node ++ string node node on which certificates have been deleted ++ """ ++ return ReportItem.info( ++ report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE, ++ "{node}: Succeeded", ++ info={"node": node} ++ ) ++ ++def qdevice_certificate_import_error(reason): ++ """ ++ an error occured when importing qdevice certificate to a node ++ string reason an error message ++ """ ++ return ReportItem.error( ++ report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR, ++ "Unable to import quorum device certificate: {reason}", ++ info={ ++ "reason": reason, ++ } ++ ) ++ ++def qdevice_certificate_sign_error(reason): ++ """ ++ an error occured when signing qdevice certificate ++ string reason an error message ++ """ ++ return ReportItem.error( ++ report_codes.QDEVICE_CERTIFICATE_SIGN_ERROR, ++ "Unable to sign quorum device certificate: {reason}", ++ info={ ++ "reason": reason, ++ } ++ ) ++ + def qdevice_destroy_success(model): + """ + qdevice configuration successfully removed from local host +@@ -710,6 +826,21 @@ def qdevice_destroy_error(model, reason): + } + ) + ++def qdevice_get_status_error(model, reason): ++ """ ++ unable to get runtime status of qdevice ++ string model qdevice model ++ string reason an error message ++ """ ++ return ReportItem.error( ++ report_codes.QDEVICE_GET_STATUS_ERROR, ++ "Unable to get status of quorum device '{model}': {reason}", ++ info={ ++ "model": model, ++ "reason": reason, ++ } ++ ) ++ + def cman_unsupported_command(): + """ + requested library command is not available as local cluster is CMAN based +@@ -1022,31 +1153,55 @@ def service_start_started(service): + } + ) + +-def service_start_error(service, reason): ++def service_start_error(service, reason, node=None): + """ + system service start failed + string service service name or description + string reason error message ++ string node node on which service has been requested to start + """ ++ msg = "Unable to start {service}: {reason}" + return ReportItem.error( + report_codes.SERVICE_START_ERROR, +- "Unable to start {service}: {reason}", ++ msg if node is None else "{node}: " + msg, + info={ + "service": service, + "reason": reason, ++ "node": node, + } + ) + +-def service_start_success(service): ++def service_start_success(service, node=None): + """ + system service was started successfully + string service service name or description ++ string node node on which service has been requested to start + """ ++ msg = "{service} started" + return ReportItem.info( + report_codes.SERVICE_START_SUCCESS, +- "{service} started", ++ msg if node is None else "{node}: " + msg, + info={ + "service": service, ++ "node": node, ++ } ++ ) ++ ++def service_start_skipped(service, reason, node=None): ++ """ ++ starting system service was skipped, no error occured ++ string service service name or description ++ string reason why the start has been skipped ++ string node node on which service has been requested to start ++ """ ++ msg = "not starting {service} - {reason}" ++ return ReportItem.info( ++ report_codes.SERVICE_START_SKIPPED, ++ msg if node is None else "{node}: " + msg, ++ info={ ++ "service": service, ++ "reason": reason, ++ "node": node, + } + ) + +@@ -1063,31 +1218,37 @@ def service_stop_started(service): + } + ) + +-def service_stop_error(service, reason): ++def service_stop_error(service, reason, node=None): + """ + system service stop failed + string service service name or description + string reason error message ++ string node node on which service has been requested to stop + """ ++ msg = "Unable to stop {service}: {reason}" + return ReportItem.error( + report_codes.SERVICE_STOP_ERROR, +- "Unable to stop {service}: {reason}", ++ msg if node is None else "{node}: " + msg, + info={ + "service": service, + "reason": reason, ++ "node": node, + } + ) + +-def service_stop_success(service): ++def service_stop_success(service, node=None): + """ + system service was stopped successfully + string service service name or description ++ string node node on which service has been requested to stop + """ ++ msg = "{service} stopped" + return ReportItem.info( + report_codes.SERVICE_STOP_SUCCESS, +- "{service} stopped", ++ msg if node is None else "{node}: " + msg, + info={ + "service": service, ++ "node": node, + } + ) + +@@ -1121,6 +1282,19 @@ def service_kill_success(services): + } + ) + ++def service_enable_started(service): ++ """ ++ system service is being enabled ++ string service service name or description ++ """ ++ return ReportItem.info( ++ report_codes.SERVICE_ENABLE_STARTED, ++ "Enabling {service}...", ++ info={ ++ "service": service, ++ } ++ ) ++ + def service_enable_error(service, reason, node=None): + """ + system service enable failed +@@ -1143,7 +1317,7 @@ def service_enable_success(service, node=None): + """ + system service was enabled successfully + string service service name or description +- string node node on which service was enabled ++ string node node on which service has been enabled + """ + msg = "{service} enabled" + return ReportItem.info( +@@ -1155,6 +1329,37 @@ def service_enable_success(service, node=None): + } + ) + ++def service_enable_skipped(service, reason, node=None): ++ """ ++ enabling system service was skipped, no error occured ++ string service service name or description ++ string reason why the enabling has been skipped ++ string node node on which service has been requested to enable ++ """ ++ msg = "not enabling {service} - {reason}" ++ return ReportItem.info( ++ report_codes.SERVICE_ENABLE_SKIPPED, ++ msg if node is None else "{node}: " + msg, ++ info={ ++ "service": service, ++ "reason": reason, ++ "node": node, ++ } ++ ) ++ ++def service_disable_started(service): ++ """ ++ system service is being disabled ++ string service service name or description ++ """ ++ return ReportItem.info( ++ report_codes.SERVICE_DISABLE_STARTED, ++ "Disabling {service}...", ++ info={ ++ "service": service, ++ } ++ ) ++ + def service_disable_error(service, reason, node=None): + """ + system service disable failed +@@ -1189,7 +1394,6 @@ def service_disable_success(service, node=None): + } + ) + +- + def invalid_metadata_format(severity=ReportItemSeverity.ERROR, forceable=None): + """ + Invalid format of metadata +@@ -1201,7 +1405,6 @@ def invalid_metadata_format(severity=ReportItemSeverity.ERROR, forceable=None): + forceable=forceable + ) + +- + def unable_to_get_agent_metadata( + agent, reason, severity=ReportItemSeverity.ERROR, forceable=None + ): +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 425b613..a72a9bd 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -518,8 +518,11 @@ rule remove <rule id> + Remove a rule if a rule id is specified, if rule is last rule in its constraint, the constraint will be removed. + .SS "qdevice" + .TP ++status <device model> [\fB\-\-full\fR] [<cluster name>] ++Show runtime status of specified model of quorum device provider. Using \fB\-\-full\fR will give more detailed output. If <cluster name> is specified, only information about the specified cluster will be displayed. ++.TP + setup model <device model> [\fB\-\-enable\fR] [\fB\-\-start\fR] +-Configure specified model of quorum device provider. Quorum device then may be added to clusters by "pcs quorum device add" command. \fB\-\-start\fR will also start the provider. \fB\-\-enable\fR will configure the provider to start on boot. ++Configure specified model of quorum device provider. Quorum device then can be added to clusters by running "pcs quorum device add" command in a cluster. \fB\-\-start\fR will also start the provider. \fB\-\-enable\fR will configure the provider to start on boot. + .TP + destroy <device model> + Disable and stop specified model of quorum device provider and delete its configuration files. +@@ -531,7 +534,7 @@ stop <device model> + Stop specified model of quorum device provider. + .TP + kill <device model> +-Force specified model of quorum device provider to stop (performs kill -9). ++Force specified model of quorum device provider to stop (performs kill \-9). Note that init system (e.g. systemd) can detect that the qdevice is not running and start it again. If you want to stop the qdevice, run "pcs qdevice stop" command. + .TP + enable <device model> + Configure specified model of quorum device provider to start on boot. +@@ -543,14 +546,22 @@ Configure specified model of quorum device provider to not start on boot. + config + Show quorum configuration. + .TP +-device add [generic options] model <device model> [model options] +-Add quorum device to cluster. Quorum device needs to be created first by "pcs qdevice setup" command. ++status ++Show quorum runtime status. ++.TP ++device add [<generic options>] model <device model> [<model options>] ++Add a quorum device to the cluster. Quorum device needs to be created first by "pcs qdevice setup" command. It is not possible to use more than one quorum device in a cluster simultaneously. Generic options, model and model options are all documented in corosync's corosync\-qdevice(8) man page. + .TP + device remove +-Remove quorum device from cluster. ++Remove a quorum device from the cluster. + .TP +-device update [generic options] [model <model options>] +-Add/Change quorum device options. Requires cluster to be stopped. ++device status [\fB\-\-full\fR] ++Show quorum device runtime status. Using \fB\-\-full\fR will give more detailed output. ++.TP ++device update [<generic options>] [model <model options>] ++Add/Change quorum device options. Generic options and model options are all documented in corosync's corosync\-qdevice(8) man page. Requires the cluster to be stopped. ++ ++WARNING: If you want to change "host" option of qdevice model net, use "pcs quorum device remove" and "pcs quorum device add" commands to set up configuration properly unless old and new host is the same machine. + .TP + unblock [\fB\-\-force\fR] + Cancel waiting for all nodes when establishing quorum. Useful in situations where you know the cluster is inquorate, but you are confident that the cluster should proceed with resource management regardless. This command should ONLY be used when nodes which the cluster is waiting for have been confirmed to be powered off and to have no access to shared resources. +@@ -558,7 +569,7 @@ Cancel waiting for all nodes when establishing quorum. Useful in situations whe + .B WARNING: If the nodes are not actually powered off or they do have access to shared resources, data corruption/cluster failure can occur. To prevent accidental running of this command, \-\-force or interactive user response is required in order to proceed. + .TP + update [auto_tie_breaker=[0|1]] [last_man_standing=[0|1]] [last_man_standing_window=[<time in ms>]] [wait_for_all=[0|1]] +-Add/Change quorum options. At least one option must be specified. Options are documented in corosync's votequorum(5) man page. Requires cluster to be stopped. ++Add/Change quorum options. At least one option must be specified. Options are documented in corosync's votequorum(5) man page. Requires the cluster to be stopped. + .SS "status" + .TP + [status] [\fB\-\-full\fR | \fB\-\-hide-inactive\fR] +diff --git a/pcs/qdevice.py b/pcs/qdevice.py +index 1f06709..0037704 100644 +--- a/pcs/qdevice.py ++++ b/pcs/qdevice.py +@@ -23,6 +23,8 @@ def qdevice_cmd(lib, argv, modifiers): + try: + if sub_cmd == "help": + usage.qdevice(argv) ++ elif sub_cmd == "status": ++ qdevice_status_cmd(lib, argv_next, modifiers) + elif sub_cmd == "setup": + qdevice_setup_cmd(lib, argv_next, modifiers) + elif sub_cmd == "destroy": +@@ -37,6 +39,11 @@ def qdevice_cmd(lib, argv, modifiers): + qdevice_enable_cmd(lib, argv_next, modifiers) + elif sub_cmd == "disable": + qdevice_disable_cmd(lib, argv_next, modifiers) ++ # following commands are internal use only, called from pcsd ++ elif sub_cmd == "sign-net-cert-request": ++ qdevice_sign_net_cert_request_cmd(lib, argv_next, modifiers) ++ elif sub_cmd == "net-client": ++ qdevice_net_client_cmd(lib, argv_next, modifiers) + else: + raise CmdLineInputError() + except LibraryError as e: +@@ -44,6 +51,35 @@ def qdevice_cmd(lib, argv, modifiers): + except CmdLineInputError as e: + utils.exit_on_cmdline_input_errror(e, "qdevice", sub_cmd) + ++# this is internal use only, called from pcsd ++def qdevice_net_client_cmd(lib, argv, modifiers): ++ if len(argv) < 1: ++ utils.err("invalid command") ++ ++ sub_cmd, argv_next = argv[0], argv[1:] ++ try: ++ if sub_cmd == "setup": ++ qdevice_net_client_setup_cmd(lib, argv_next, modifiers) ++ elif sub_cmd == "import-certificate": ++ qdevice_net_client_import_certificate_cmd(lib, argv_next, modifiers) ++ elif sub_cmd == "destroy": ++ qdevice_net_client_destroy(lib, argv_next, modifiers) ++ else: ++ raise CmdLineInputError("invalid command") ++ except LibraryError as e: ++ utils.process_library_reports(e.args) ++ except CmdLineInputError as e: ++ utils.err(e.message) ++ ++def qdevice_status_cmd(lib, argv, modifiers): ++ if len(argv) < 1 or len(argv) > 2: ++ raise CmdLineInputError() ++ model = argv[0] ++ cluster = None if len(argv) < 2 else argv[1] ++ print( ++ lib.qdevice.status(model, modifiers["full"], cluster) ++ ) ++ + def qdevice_setup_cmd(lib, argv, modifiers): + if len(argv) != 2: + raise CmdLineInputError() +@@ -87,3 +123,38 @@ def qdevice_disable_cmd(lib, argv, modifiers): + raise CmdLineInputError() + model = argv[0] + lib.qdevice.disable(model) ++ ++# following commands are internal use only, called from pcsd ++ ++def qdevice_net_client_setup_cmd(lib, argv, modifiers): ++ ca_certificate = _read_stdin() ++ lib.qdevice.client_net_setup(ca_certificate) ++ ++def qdevice_net_client_import_certificate_cmd(lib, argv, modifiers): ++ certificate = _read_stdin() ++ lib.qdevice.client_net_import_certificate(certificate) ++ ++def qdevice_net_client_destroy(lib, argv, modifiers): ++ lib.qdevice.client_net_destroy() ++ ++def qdevice_sign_net_cert_request_cmd(lib, argv, modifiers): ++ certificate_request = _read_stdin() ++ signed = lib.qdevice.sign_net_cert_request( ++ certificate_request, ++ modifiers["name"] ++ ) ++ if sys.version_info.major > 2: ++ # In python3 base64.b64encode returns bytes. ++ # In python2 base64.b64encode returns string. ++ # Bytes is printed like this: b'bytes content' ++ # and we need to get rid of that b'', so we change bytes to string. ++ # Since it's base64encoded, it's safe to use ascii. ++ signed = signed.decode("ascii") ++ print(signed) ++ ++def _read_stdin(): ++ # in python3 stdin returns str so we need to use buffer ++ if hasattr(sys.stdin, "buffer"): ++ return sys.stdin.buffer.read() ++ else: ++ return sys.stdin.read() +diff --git a/pcs/quorum.py b/pcs/quorum.py +index f793a21..27085ac 100644 +--- a/pcs/quorum.py ++++ b/pcs/quorum.py +@@ -28,6 +28,8 @@ def quorum_cmd(lib, argv, modificators): + usage.quorum(argv) + elif sub_cmd == "config": + quorum_config_cmd(lib, argv_next, modificators) ++ elif sub_cmd == "status": ++ quorum_status_cmd(lib, argv_next, modificators) + elif sub_cmd == "device": + quorum_device_cmd(lib, argv_next, modificators) + elif sub_cmd == "unblock": +@@ -51,6 +53,8 @@ def quorum_device_cmd(lib, argv, modificators): + quorum_device_add_cmd(lib, argv_next, modificators) + elif sub_cmd == "remove": + quorum_device_remove_cmd(lib, argv_next, modificators) ++ elif sub_cmd == "status": ++ quorum_device_status_cmd(lib, argv_next, modificators) + elif sub_cmd == "update": + quorum_device_update_cmd(lib, argv_next, modificators) + else: +@@ -97,6 +101,21 @@ def quorum_config_to_str(config): + + return lines + ++def quorum_status_cmd(lib, argv, modificators): ++ if argv: ++ raise CmdLineInputError() ++ print(lib.quorum.status()) ++ ++def quorum_update_cmd(lib, argv, modificators): ++ options = parse_args.prepare_options(argv) ++ if not options: ++ raise CmdLineInputError() ++ ++ lib.quorum.set_options( ++ options, ++ skip_offline_nodes=modificators["skip_offline_nodes"] ++ ) ++ + def quorum_device_add_cmd(lib, argv, modificators): + # we expect "model" keyword once, followed by the actual model value + options_lists = parse_args.split_list(argv, "model") +@@ -131,6 +150,11 @@ def quorum_device_remove_cmd(lib, argv, modificators): + skip_offline_nodes=modificators["skip_offline_nodes"] + ) + ++def quorum_device_status_cmd(lib, argv, modificators): ++ if argv: ++ raise CmdLineInputError() ++ print(lib.quorum.status_device(modificators["full"])) ++ + def quorum_device_update_cmd(lib, argv, modificators): + # we expect "model" keyword once + options_lists = parse_args.split_list(argv, "model") +@@ -154,13 +178,3 @@ def quorum_device_update_cmd(lib, argv, modificators): + force_options=modificators["force"], + skip_offline_nodes=modificators["skip_offline_nodes"] + ) +- +-def quorum_update_cmd(lib, argv, modificators): +- options = parse_args.prepare_options(argv) +- if not options: +- raise CmdLineInputError() +- +- lib.quorum.set_options( +- options, +- skip_offline_nodes=modificators["skip_offline_nodes"] +- ) +diff --git a/pcs/settings_default.py b/pcs/settings_default.py +index 3acd8e0..9d44918 100644 +--- a/pcs/settings_default.py ++++ b/pcs/settings_default.py +@@ -2,18 +2,20 @@ import os.path + + pacemaker_binaries = "/usr/sbin/" + corosync_binaries = "/usr/sbin/" ++corosync_qnet_binaries = "/usr/bin/" + ccs_binaries = "/usr/sbin/" + corosync_conf_dir = "/etc/corosync/" + corosync_conf_file = os.path.join(corosync_conf_dir, "corosync.conf") + corosync_uidgid_dir = os.path.join(corosync_conf_dir, "uidgid.d/") + corosync_qdevice_net_server_certs_dir = os.path.join( + corosync_conf_dir, +- "qdevice/net/qnetd/nssdb" ++ "qnetd/nssdb" + ) + corosync_qdevice_net_client_certs_dir = os.path.join( + corosync_conf_dir, +- "qdevice/net/node/nssdb" ++ "qdevice/net/nssdb" + ) ++corosync_qdevice_net_client_ca_file_name = "qnetd-cacert.crt" + cluster_conf_file = "/etc/cluster/cluster.conf" + fence_agent_binaries = "/usr/sbin/" + pengine_binary = "/usr/libexec/pacemaker/pengine" +diff --git a/pcs/test/resources/qdevice-certs/qnetd-cacert.crt b/pcs/test/resources/qdevice-certs/qnetd-cacert.crt +new file mode 100644 +index 0000000..34dcab0 +--- /dev/null ++++ b/pcs/test/resources/qdevice-certs/qnetd-cacert.crt +@@ -0,0 +1 @@ ++certificate data +\ No newline at end of file +diff --git a/pcs/test/test_lib_commands_qdevice.py b/pcs/test/test_lib_commands_qdevice.py +index 3900c1d..ff588d5 100644 +--- a/pcs/test/test_lib_commands_qdevice.py ++++ b/pcs/test/test_lib_commands_qdevice.py +@@ -6,6 +6,7 @@ from __future__ import ( + ) + + from unittest import TestCase ++import base64 + import logging + + from pcs.test.tools.pcs_mock import mock +@@ -58,6 +59,11 @@ class QdeviceDisabledOnCmanTest(QdeviceTestCase): + lambda: lib.qdevice_destroy(self.lib_env, "bad model") + ) + ++ def test_status_text(self): ++ self.base_test( ++ lambda: lib.qdevice_status_text(self.lib_env, "bad model") ++ ) ++ + def test_enable(self): + self.base_test( + lambda: lib.qdevice_enable(self.lib_env, "bad model") +@@ -83,6 +89,30 @@ class QdeviceDisabledOnCmanTest(QdeviceTestCase): + lambda: lib.qdevice_kill(self.lib_env, "bad model") + ) + ++ def test_qdevice_net_sign_certificate_request(self): ++ self.base_test( ++ lambda: lib.qdevice_net_sign_certificate_request( ++ self.lib_env, ++ "certificate request", ++ "cluster name" ++ ) ++ ) ++ ++ def test_client_net_setup(self): ++ self.base_test( ++ lambda: lib.client_net_setup(self.lib_env, "ca certificate") ++ ) ++ ++ def test_client_net_import_certificate(self): ++ self.base_test( ++ lambda: lib.client_net_import_certificate(self.lib_env, "cert") ++ ) ++ ++ def test_client_net_destroy(self): ++ self.base_test( ++ lambda: lib.client_net_destroy(self.lib_env) ++ ) ++ + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) + class QdeviceBadModelTest(QdeviceTestCase): +@@ -110,6 +140,11 @@ class QdeviceBadModelTest(QdeviceTestCase): + lambda: lib.qdevice_destroy(self.lib_env, "bad model") + ) + ++ def test_status_text(self): ++ self.base_test( ++ lambda: lib.qdevice_status_text(self.lib_env, "bad model") ++ ) ++ + def test_enable(self): + self.base_test( + lambda: lib.qdevice_enable(self.lib_env, "bad model") +@@ -489,6 +524,80 @@ class QdeviceNetDestroyTest(QdeviceTestCase): + ) + + ++@mock.patch("pcs.lib.commands.qdevice.qdevice_net.qdevice_status_cluster_text") ++@mock.patch("pcs.lib.commands.qdevice.qdevice_net.qdevice_status_generic_text") ++@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) ++@mock.patch.object( ++ LibraryEnvironment, ++ "cmd_runner", ++ lambda self: "mock_runner" ++) ++class TestQdeviceNetStatusTextTest(QdeviceTestCase): ++ def test_success(self, mock_status_generic, mock_status_cluster): ++ mock_status_generic.return_value = "generic status info\n" ++ mock_status_cluster.return_value = "cluster status info\n" ++ ++ self.assertEquals( ++ lib.qdevice_status_text(self.lib_env, "net"), ++ "generic status info\ncluster status info\n" ++ ) ++ ++ mock_status_generic.assert_called_once_with("mock_runner", False) ++ mock_status_cluster.assert_called_once_with("mock_runner", None, False) ++ ++ def test_success_verbose(self, mock_status_generic, mock_status_cluster): ++ mock_status_generic.return_value = "generic status info\n" ++ mock_status_cluster.return_value = "cluster status info\n" ++ ++ self.assertEquals( ++ lib.qdevice_status_text(self.lib_env, "net", verbose=True), ++ "generic status info\ncluster status info\n" ++ ) ++ ++ mock_status_generic.assert_called_once_with("mock_runner", True) ++ mock_status_cluster.assert_called_once_with("mock_runner", None, True) ++ ++ def test_success_cluster(self, mock_status_generic, mock_status_cluster): ++ mock_status_generic.return_value = "generic status info\n" ++ mock_status_cluster.return_value = "cluster status info\n" ++ ++ self.assertEquals( ++ lib.qdevice_status_text(self.lib_env, "net", cluster="name"), ++ "generic status info\ncluster status info\n" ++ ) ++ ++ mock_status_generic.assert_called_once_with("mock_runner", False) ++ mock_status_cluster.assert_called_once_with("mock_runner", "name", False) ++ ++ def test_error_generic_status( ++ self, mock_status_generic, mock_status_cluster ++ ): ++ mock_status_generic.side_effect = LibraryError("mock_report_item") ++ mock_status_cluster.return_value = "cluster status info\n" ++ ++ self.assertRaises( ++ LibraryError, ++ lambda: lib.qdevice_status_text(self.lib_env, "net") ++ ) ++ ++ mock_status_generic.assert_called_once_with("mock_runner", False) ++ mock_status_cluster.assert_not_called() ++ ++ def test_error_cluster_status( ++ self, mock_status_generic, mock_status_cluster ++ ): ++ mock_status_generic.return_value = "generic status info\n" ++ mock_status_cluster.side_effect = LibraryError("mock_report_item") ++ ++ self.assertRaises( ++ LibraryError, ++ lambda: lib.qdevice_status_text(self.lib_env, "net") ++ ) ++ ++ mock_status_generic.assert_called_once_with("mock_runner", False) ++ mock_status_cluster.assert_called_once_with("mock_runner", None, False) ++ ++ + @mock.patch("pcs.lib.external.enable_service") + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) + @mock.patch.object( +@@ -757,3 +866,149 @@ class QdeviceNetKillTest(QdeviceTestCase): + "mock_runner", + ["corosync-qnetd"] + ) ++ ++ ++@mock.patch( ++ "pcs.lib.commands.qdevice.qdevice_net.qdevice_sign_certificate_request" ++) ++@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) ++@mock.patch.object( ++ LibraryEnvironment, ++ "cmd_runner", ++ lambda self: "mock_runner" ++) ++class QdeviceNetSignCertificateRequestTest(QdeviceTestCase): ++ def test_success(self, mock_qdevice_func): ++ qdevice_func_input = "certificate request".encode("utf-8") ++ qdevice_func_output = "signed certificate".encode("utf-8") ++ mock_qdevice_func.return_value = qdevice_func_output ++ cluster_name = "clusterName" ++ ++ self.assertEqual( ++ base64.b64encode(qdevice_func_output), ++ lib.qdevice_net_sign_certificate_request( ++ self.lib_env, ++ base64.b64encode(qdevice_func_input), ++ cluster_name ++ ) ++ ) ++ ++ mock_qdevice_func.assert_called_once_with( ++ "mock_runner", ++ qdevice_func_input, ++ cluster_name ++ ) ++ ++ def test_bad_input(self, mock_qdevice_func): ++ qdevice_func_input = "certificate request".encode("utf-8") ++ cluster_name = "clusterName" ++ ++ assert_raise_library_error( ++ lambda: lib.qdevice_net_sign_certificate_request( ++ self.lib_env, ++ qdevice_func_input, ++ cluster_name ++ ), ++ ( ++ severity.ERROR, ++ report_codes.INVALID_OPTION_VALUE, ++ { ++ "option_name": "qnetd certificate request", ++ "option_value": qdevice_func_input, ++ "allowed_values": ["base64 encoded certificate"], ++ } ++ ) ++ ) ++ ++ mock_qdevice_func.assert_not_called() ++ ++ ++@mock.patch("pcs.lib.commands.qdevice.qdevice_net.client_setup") ++@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) ++@mock.patch.object( ++ LibraryEnvironment, ++ "cmd_runner", ++ lambda self: "mock_runner" ++) ++class ClientNetSetupTest(QdeviceTestCase): ++ def test_success(self, mock_qdevice_func): ++ qdevice_func_input = "CA certificate".encode("utf-8") ++ ++ lib.client_net_setup(self.lib_env, base64.b64encode(qdevice_func_input)) ++ ++ mock_qdevice_func.assert_called_once_with( ++ "mock_runner", ++ qdevice_func_input ++ ) ++ ++ def test_bad_input(self, mock_qdevice_func): ++ qdevice_func_input = "CA certificate".encode("utf-8") ++ ++ assert_raise_library_error( ++ lambda: lib.client_net_setup(self.lib_env, qdevice_func_input), ++ ( ++ severity.ERROR, ++ report_codes.INVALID_OPTION_VALUE, ++ { ++ "option_name": "qnetd CA certificate", ++ "option_value": qdevice_func_input, ++ "allowed_values": ["base64 encoded certificate"], ++ } ++ ) ++ ) ++ ++ mock_qdevice_func.assert_not_called() ++ ++ ++@mock.patch( ++ "pcs.lib.commands.qdevice.qdevice_net.client_import_certificate_and_key" ++) ++@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) ++@mock.patch.object( ++ LibraryEnvironment, ++ "cmd_runner", ++ lambda self: "mock_runner" ++) ++class ClientNetImportCertificateTest(QdeviceTestCase): ++ def test_success(self, mock_qdevice_func): ++ qdevice_func_input = "client certificate".encode("utf-8") ++ ++ lib.client_net_import_certificate( ++ self.lib_env, ++ base64.b64encode(qdevice_func_input) ++ ) ++ ++ mock_qdevice_func.assert_called_once_with( ++ "mock_runner", ++ qdevice_func_input ++ ) ++ ++ def test_bad_input(self, mock_qdevice_func): ++ qdevice_func_input = "client certificate".encode("utf-8") ++ ++ assert_raise_library_error( ++ lambda: lib.client_net_import_certificate( ++ self.lib_env, ++ qdevice_func_input ++ ), ++ ( ++ severity.ERROR, ++ report_codes.INVALID_OPTION_VALUE, ++ { ++ "option_name": "qnetd client certificate", ++ "option_value": qdevice_func_input, ++ "allowed_values": ["base64 encoded certificate"], ++ } ++ ) ++ ) ++ ++ mock_qdevice_func.assert_not_called() ++ ++ ++@mock.patch("pcs.lib.commands.qdevice.qdevice_net.client_destroy") ++@mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) ++class ClientNetDestroyTest(QdeviceTestCase): ++ def test_success(self, mock_qdevice_func): ++ lib.client_net_destroy(self.lib_env) ++ mock_qdevice_func.assert_called_once_with() ++ +diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py +index 5725381..e824f37 100644 +--- a/pcs/test/test_lib_commands_quorum.py ++++ b/pcs/test/test_lib_commands_quorum.py +@@ -21,7 +21,12 @@ from pcs.test.tools.pcs_mock import mock + + from pcs.common import report_codes + from pcs.lib.env import LibraryEnvironment +-from pcs.lib.errors import ReportItemSeverity as severity ++from pcs.lib.errors import ( ++ LibraryError, ++ ReportItemSeverity as severity, ++) ++from pcs.lib.external import NodeCommunicationException ++from pcs.lib.node import NodeAddresses, NodeAddressesList + + from pcs.lib.commands import quorum as lib + +@@ -243,25 +248,102 @@ class SetQuorumOptionsTest(TestCase, CmanMixin): + mock_push_corosync.assert_not_called() + + ++@mock.patch("pcs.lib.commands.quorum.corosync_live.get_quorum_status_text") ++@mock.patch.object( ++ LibraryEnvironment, ++ "cmd_runner", ++ lambda self: "mock_runner" ++) ++class StatusTextTest(TestCase, CmanMixin): ++ def setUp(self): ++ self.mock_logger = mock.MagicMock(logging.Logger) ++ self.mock_reporter = MockLibraryReportProcessor() ++ self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) ++ ++ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True) ++ def test_disabled_on_cman(self, mock_status): ++ self.assert_disabled_on_cman( ++ lambda: lib.status_text(self.lib_env) ++ ) ++ mock_status.assert_not_called() ++ ++ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) ++ def test_success(self, mock_status): ++ mock_status.return_value = "status text" ++ self.assertEqual( ++ lib.status_text(self.lib_env), ++ "status text" ++ ) ++ mock_status.assert_called_once_with("mock_runner") ++ ++ ++@mock.patch("pcs.lib.commands.quorum.qdevice_client.get_status_text") ++@mock.patch.object( ++ LibraryEnvironment, ++ "cmd_runner", ++ lambda self: "mock_runner" ++) ++class StatusDeviceTextTest(TestCase, CmanMixin): ++ def setUp(self): ++ self.mock_logger = mock.MagicMock(logging.Logger) ++ self.mock_reporter = MockLibraryReportProcessor() ++ self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) ++ ++ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True) ++ def test_disabled_on_cman(self, mock_status): ++ self.assert_disabled_on_cman( ++ lambda: lib.status_device_text(self.lib_env) ++ ) ++ mock_status.assert_not_called() ++ ++ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) ++ def test_success(self, mock_status): ++ mock_status.return_value = "status text" ++ self.assertEqual( ++ lib.status_device_text(self.lib_env), ++ "status text" ++ ) ++ mock_status.assert_called_once_with("mock_runner", False) ++ ++ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) ++ def test_success_verbose(self, mock_status): ++ mock_status.return_value = "status text" ++ self.assertEqual( ++ lib.status_device_text(self.lib_env, True), ++ "status text" ++ ) ++ mock_status.assert_called_once_with("mock_runner", True) ++ ++ + @mock.patch.object(LibraryEnvironment, "push_corosync_conf") + @mock.patch.object(LibraryEnvironment, "get_corosync_conf_data") ++@mock.patch("pcs.lib.commands.quorum._add_device_model_net") ++@mock.patch("pcs.lib.commands.quorum.qdevice_client.remote_client_enable") ++@mock.patch("pcs.lib.commands.quorum.qdevice_client.remote_client_start") + class AddDeviceTest(TestCase, CmanMixin): + def setUp(self): + self.mock_logger = mock.MagicMock(logging.Logger) + self.mock_reporter = MockLibraryReportProcessor() + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True) +- def test_disabled_on_cman(self, mock_get_corosync, mock_push_corosync): ++ def test_disabled_on_cman( ++ self, mock_client_start, mock_client_enable, mock_add_net, ++ mock_get_corosync, mock_push_corosync ++ ): + lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) + self.assert_disabled_on_cman( + lambda: lib.add_device(lib_env, "net", {"host": "127.0.0.1"}, {}) + ) + mock_get_corosync.assert_not_called() + mock_push_corosync.assert_not_called() ++ mock_add_net.assert_not_called() ++ mock_client_enable.assert_not_called() ++ mock_client_start.assert_not_called() + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True) + def test_enabled_on_cman_if_not_live( +- self, mock_get_corosync, mock_push_corosync ++ self, mock_client_start, mock_client_enable, mock_add_net, ++ mock_get_corosync, mock_push_corosync + ): + original_conf = open(rc("corosync-3nodes.conf")).read() + mock_get_corosync.return_value = original_conf +@@ -287,9 +369,15 @@ class AddDeviceTest(TestCase, CmanMixin): + + self.assertEqual(1, mock_get_corosync.call_count) + self.assertEqual(0, mock_push_corosync.call_count) ++ mock_add_net.assert_not_called() ++ mock_client_enable.assert_not_called() ++ mock_client_start.assert_not_called() + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) +- def test_success(self, mock_get_corosync, mock_push_corosync): ++ def test_success( ++ self, mock_client_start, mock_client_enable, mock_add_net, ++ mock_get_corosync, mock_push_corosync ++ ): + original_conf = open(rc("corosync-3nodes.conf")).read() + mock_get_corosync.return_value = original_conf + lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) +@@ -311,6 +399,70 @@ class AddDeviceTest(TestCase, CmanMixin): + device { + timeout: 12345 + model: net ++ votes: 1 ++ ++ net { ++ algorithm: ffsplit ++ host: 127.0.0.1 ++ } ++ } ++""" ++ ) ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.SERVICE_ENABLE_STARTED, ++ { ++ "service": "corosync-qdevice", ++ } ++ ), ++ ( ++ severity.INFO, ++ report_codes.SERVICE_START_STARTED, ++ { ++ "service": "corosync-qdevice", ++ } ++ ), ++ ] ++ ) ++ self.assertEqual(1, len(mock_add_net.mock_calls)) ++ self.assertEqual(3, len(mock_client_enable.mock_calls)) ++ self.assertEqual(3, len(mock_client_start.mock_calls)) ++ ++ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) ++ def test_success_file( ++ self, mock_client_start, mock_client_enable, mock_add_net, ++ mock_get_corosync, mock_push_corosync ++ ): ++ original_conf = open(rc("corosync-3nodes.conf")).read() ++ mock_get_corosync.return_value = original_conf ++ lib_env = LibraryEnvironment( ++ self.mock_logger, ++ self.mock_reporter, ++ corosync_conf_data=original_conf ++ ) ++ ++ lib.add_device( ++ lib_env, ++ "net", ++ {"host": "127.0.0.1", "algorithm": "ffsplit"}, ++ {"timeout": "12345"} ++ ) ++ ++ self.assertEqual(1, len(mock_push_corosync.mock_calls)) ++ ac( ++ mock_push_corosync.mock_calls[0][1][0].config.export(), ++ original_conf.replace( ++ "provider: corosync_votequorum\n", ++ """provider: corosync_votequorum ++ ++ device { ++ timeout: 12345 ++ model: net ++ votes: 1 + + net { + algorithm: ffsplit +@@ -321,9 +473,15 @@ class AddDeviceTest(TestCase, CmanMixin): + ) + ) + self.assertEqual([], self.mock_reporter.report_item_list) ++ mock_add_net.assert_not_called() ++ mock_client_enable.assert_not_called() ++ mock_client_start.assert_not_called() + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) +- def test_invalid_options(self, mock_get_corosync, mock_push_corosync): ++ def test_invalid_options( ++ self, mock_client_start, mock_client_enable, mock_add_net, ++ mock_get_corosync, mock_push_corosync ++ ): + original_conf = open(rc("corosync-3nodes.conf")).read() + mock_get_corosync.return_value = original_conf + lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) +@@ -349,9 +507,15 @@ class AddDeviceTest(TestCase, CmanMixin): + + self.assertEqual(1, mock_get_corosync.call_count) + self.assertEqual(0, mock_push_corosync.call_count) ++ mock_add_net.assert_not_called() ++ mock_client_enable.assert_not_called() ++ mock_client_start.assert_not_called() + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) +- def test_invalid_options_forced(self, mock_get_corosync, mock_push_corosync): ++ def test_invalid_options_forced( ++ self, mock_client_start, mock_client_enable, mock_add_net, ++ mock_get_corosync, mock_push_corosync ++ ): + original_conf = open(rc("corosync-3nodes.conf")).read() + mock_get_corosync.return_value = original_conf + lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) +@@ -375,7 +539,21 @@ class AddDeviceTest(TestCase, CmanMixin): + "option_type": "quorum device", + "allowed": ["sync_timeout", "timeout"], + } +- ) ++ ), ++ ( ++ severity.INFO, ++ report_codes.SERVICE_ENABLE_STARTED, ++ { ++ "service": "corosync-qdevice", ++ } ++ ), ++ ( ++ severity.INFO, ++ report_codes.SERVICE_START_STARTED, ++ { ++ "service": "corosync-qdevice", ++ } ++ ), + ] + ) + self.assertEqual(1, mock_get_corosync.call_count) +@@ -389,6 +567,7 @@ class AddDeviceTest(TestCase, CmanMixin): + device { + bad_option: bad_value + model: net ++ votes: 1 + + net { + algorithm: ffsplit +@@ -398,9 +577,15 @@ class AddDeviceTest(TestCase, CmanMixin): + """ + ) + ) ++ self.assertEqual(1, len(mock_add_net.mock_calls)) ++ self.assertEqual(3, len(mock_client_enable.mock_calls)) ++ self.assertEqual(3, len(mock_client_start.mock_calls)) + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) +- def test_invalid_model(self, mock_get_corosync, mock_push_corosync): ++ def test_invalid_model( ++ self, mock_client_start, mock_client_enable, mock_add_net, ++ mock_get_corosync, mock_push_corosync ++ ): + original_conf = open(rc("corosync-3nodes.conf")).read() + mock_get_corosync.return_value = original_conf + lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) +@@ -421,9 +606,15 @@ class AddDeviceTest(TestCase, CmanMixin): + + self.assertEqual(1, mock_get_corosync.call_count) + self.assertEqual(0, mock_push_corosync.call_count) ++ mock_add_net.assert_not_called() ++ mock_client_enable.assert_not_called() ++ mock_client_start.assert_not_called() + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) +- def test_invalid_model_forced(self, mock_get_corosync, mock_push_corosync): ++ def test_invalid_model_forced( ++ self, mock_client_start, mock_client_enable, mock_add_net, ++ mock_get_corosync, mock_push_corosync ++ ): + original_conf = open(rc("corosync-3nodes.conf")).read() + mock_get_corosync.return_value = original_conf + lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) +@@ -441,7 +632,21 @@ class AddDeviceTest(TestCase, CmanMixin): + "option_value": "bad model", + "allowed_values": ("net", ), + }, +- ) ++ ), ++ ( ++ severity.INFO, ++ report_codes.SERVICE_ENABLE_STARTED, ++ { ++ "service": "corosync-qdevice", ++ } ++ ), ++ ( ++ severity.INFO, ++ report_codes.SERVICE_START_STARTED, ++ { ++ "service": "corosync-qdevice", ++ } ++ ), + ] + ) + self.assertEqual(1, mock_get_corosync.call_count) +@@ -458,25 +663,678 @@ class AddDeviceTest(TestCase, CmanMixin): + """ + ) + ) ++ mock_add_net.assert_not_called() # invalid model - don't setup net model ++ self.assertEqual(3, len(mock_client_enable.mock_calls)) ++ self.assertEqual(3, len(mock_client_start.mock_calls)) ++ ++ ++@mock.patch( ++ "pcs.lib.commands.quorum.qdevice_net.remote_client_import_certificate_and_key" ++) ++@mock.patch("pcs.lib.commands.quorum.qdevice_net.client_cert_request_to_pk12") ++@mock.patch( ++ "pcs.lib.commands.quorum.qdevice_net.remote_sign_certificate_request" ++) ++@mock.patch( ++ "pcs.lib.commands.quorum.qdevice_net.client_generate_certificate_request" ++) ++@mock.patch("pcs.lib.commands.quorum.qdevice_net.remote_client_setup") ++@mock.patch( ++ "pcs.lib.commands.quorum.qdevice_net.remote_qdevice_get_ca_certificate" ++) ++@mock.patch.object( ++ LibraryEnvironment, ++ "cmd_runner", ++ lambda self: "mock_runner" ++) ++@mock.patch.object( ++ LibraryEnvironment, ++ "node_communicator", ++ lambda self: "mock_communicator" ++) ++class AddDeviceNetTest(TestCase): ++ #pylint: disable=too-many-instance-attributes ++ def setUp(self): ++ self.mock_logger = mock.MagicMock(logging.Logger) ++ self.mock_reporter = MockLibraryReportProcessor() ++ self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) ++ self.qnetd_host = "qnetd_host" ++ self.cluster_name = "clusterName" ++ self.nodes = NodeAddressesList([ ++ NodeAddresses("node1"), ++ NodeAddresses("node2"), ++ ]) ++ self.ca_cert = "CA certificate" ++ self.cert_request = "client certificate request" ++ self.signed_cert = "signed certificate" ++ self.final_cert = "final client certificate" ++ ++ def test_success( ++ self, mock_get_ca, mock_client_setup, mock_get_cert_request, ++ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert ++ ): ++ mock_get_ca.return_value = self.ca_cert ++ mock_get_cert_request.return_value = self.cert_request ++ mock_sign_cert_request.return_value = self.signed_cert ++ mock_cert_to_pk12.return_value = self.final_cert ++ skip_offline_nodes = False ++ ++ lib._add_device_model_net( ++ self.lib_env, ++ self.qnetd_host, ++ self.cluster_name, ++ self.nodes, ++ skip_offline_nodes ++ ) ++ ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED, ++ {} ++ ), ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE, ++ { ++ "node": self.nodes[0].label ++ } ++ ), ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE, ++ { ++ "node": self.nodes[1].label ++ } ++ ), ++ ] ++ ) ++ mock_get_ca.assert_called_once_with( ++ "mock_communicator", ++ self.qnetd_host ++ ) ++ client_setup_calls = [ ++ mock.call("mock_communicator", self.nodes[0], self.ca_cert), ++ mock.call("mock_communicator", self.nodes[1], self.ca_cert), ++ ] ++ self.assertEqual( ++ len(client_setup_calls), ++ len(mock_client_setup.mock_calls) ++ ) ++ mock_client_setup.assert_has_calls(client_setup_calls) ++ mock_get_cert_request.assert_called_once_with( ++ "mock_runner", ++ self.cluster_name ++ ) ++ mock_sign_cert_request.assert_called_once_with( ++ "mock_communicator", ++ self.qnetd_host, ++ self.cert_request, ++ self.cluster_name ++ ) ++ mock_cert_to_pk12.assert_called_once_with( ++ "mock_runner", ++ self.signed_cert ++ ) ++ client_import_calls = [ ++ mock.call("mock_communicator", self.nodes[0], self.final_cert), ++ mock.call("mock_communicator", self.nodes[1], self.final_cert), ++ ] ++ self.assertEqual( ++ len(client_import_calls), ++ len(mock_import_cert.mock_calls) ++ ) ++ mock_import_cert.assert_has_calls(client_import_calls) ++ ++ def test_error_get_ca_cert( ++ self, mock_get_ca, mock_client_setup, mock_get_cert_request, ++ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert ++ ): ++ mock_get_ca.side_effect = NodeCommunicationException( ++ "host", "command", "reason" ++ ) ++ mock_get_cert_request.return_value = self.cert_request ++ mock_sign_cert_request.return_value = self.signed_cert ++ mock_cert_to_pk12.return_value = self.final_cert ++ skip_offline_nodes = False ++ ++ assert_raise_library_error( ++ lambda: lib._add_device_model_net( ++ self.lib_env, ++ self.qnetd_host, ++ self.cluster_name, ++ self.nodes, ++ skip_offline_nodes ++ ), ++ ( ++ severity.ERROR, ++ report_codes.NODE_COMMUNICATION_ERROR, ++ {} ++ ) ++ ) ++ ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED, ++ {} ++ ) ++ ] ++ ) ++ mock_get_ca.assert_called_once_with( ++ "mock_communicator", ++ self.qnetd_host ++ ) ++ mock_client_setup.assert_not_called() ++ mock_get_cert_request.assert_not_called() ++ mock_sign_cert_request.assert_not_called() ++ mock_cert_to_pk12.assert_not_called() ++ mock_import_cert.assert_not_called() ++ ++ ++ def test_error_client_setup( ++ self, mock_get_ca, mock_client_setup, mock_get_cert_request, ++ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert ++ ): ++ mock_get_ca.return_value = self.ca_cert ++ def raiser(communicator, node, cert): ++ if node == self.nodes[1]: ++ raise NodeCommunicationException("host", "command", "reason") ++ mock_client_setup.side_effect = raiser ++ mock_get_cert_request.return_value = self.cert_request ++ mock_sign_cert_request.return_value = self.signed_cert ++ mock_cert_to_pk12.return_value = self.final_cert ++ skip_offline_nodes = False ++ ++ assert_raise_library_error( ++ lambda: lib._add_device_model_net( ++ self.lib_env, ++ self.qnetd_host, ++ self.cluster_name, ++ self.nodes, ++ skip_offline_nodes ++ ), ++ ( ++ severity.ERROR, ++ report_codes.NODE_COMMUNICATION_ERROR, ++ {}, ++ report_codes.SKIP_OFFLINE_NODES ++ ) ++ ) ++ ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED, ++ {} ++ ), ++ ( ++ severity.ERROR, ++ report_codes.NODE_COMMUNICATION_ERROR, ++ {}, ++ report_codes.SKIP_OFFLINE_NODES ++ ), ++ ] ++ ) ++ mock_get_ca.assert_called_once_with( ++ "mock_communicator", ++ self.qnetd_host ++ ) ++ client_setup_calls = [ ++ mock.call("mock_communicator", self.nodes[0], self.ca_cert), ++ mock.call("mock_communicator", self.nodes[1], self.ca_cert), ++ ] ++ self.assertEqual( ++ len(client_setup_calls), ++ len(mock_client_setup.mock_calls) ++ ) ++ mock_client_setup.assert_has_calls(client_setup_calls) ++ ++ def test_error_client_setup_skip_offline( ++ self, mock_get_ca, mock_client_setup, mock_get_cert_request, ++ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert ++ ): ++ mock_get_ca.return_value = self.ca_cert ++ def raiser(communicator, node, cert): ++ if node == self.nodes[1]: ++ raise NodeCommunicationException("host", "command", "reason") ++ mock_client_setup.side_effect = raiser ++ mock_get_cert_request.return_value = self.cert_request ++ mock_sign_cert_request.return_value = self.signed_cert ++ mock_cert_to_pk12.return_value = self.final_cert ++ skip_offline_nodes = True ++ ++ lib._add_device_model_net( ++ self.lib_env, ++ self.qnetd_host, ++ self.cluster_name, ++ self.nodes, ++ skip_offline_nodes ++ ) ++ ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED, ++ {} ++ ), ++ ( ++ severity.WARNING, ++ report_codes.NODE_COMMUNICATION_ERROR, ++ {} ++ ), ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE, ++ { ++ "node": self.nodes[0].label ++ } ++ ), ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE, ++ { ++ "node": self.nodes[1].label ++ } ++ ), ++ ] ++ ) ++ mock_get_ca.assert_called_once_with( ++ "mock_communicator", ++ self.qnetd_host ++ ) ++ client_setup_calls = [ ++ mock.call("mock_communicator", self.nodes[0], self.ca_cert), ++ mock.call("mock_communicator", self.nodes[1], self.ca_cert), ++ ] ++ self.assertEqual( ++ len(client_setup_calls), ++ len(mock_client_setup.mock_calls) ++ ) ++ mock_client_setup.assert_has_calls(client_setup_calls) ++ ++ def test_generate_cert_request_error( ++ self, mock_get_ca, mock_client_setup, mock_get_cert_request, ++ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert ++ ): ++ mock_get_ca.return_value = self.ca_cert ++ mock_get_cert_request.side_effect = LibraryError() ++ mock_sign_cert_request.return_value = self.signed_cert ++ mock_cert_to_pk12.return_value = self.final_cert ++ skip_offline_nodes = False ++ ++ self.assertRaises( ++ LibraryError, ++ lambda: lib._add_device_model_net( ++ self.lib_env, ++ self.qnetd_host, ++ self.cluster_name, ++ self.nodes, ++ skip_offline_nodes ++ ) ++ ) ++ ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED, ++ {} ++ ) ++ ] ++ ) ++ mock_get_ca.assert_called_once_with( ++ "mock_communicator", ++ self.qnetd_host ++ ) ++ client_setup_calls = [ ++ mock.call("mock_communicator", self.nodes[0], self.ca_cert), ++ mock.call("mock_communicator", self.nodes[1], self.ca_cert), ++ ] ++ self.assertEqual( ++ len(client_setup_calls), ++ len(mock_client_setup.mock_calls) ++ ) ++ mock_client_setup.assert_has_calls(client_setup_calls) ++ mock_get_cert_request.assert_called_once_with( ++ "mock_runner", ++ self.cluster_name ++ ) ++ mock_sign_cert_request.assert_not_called() ++ mock_cert_to_pk12.assert_not_called() ++ mock_import_cert.assert_not_called() ++ ++ def test_sign_certificate_error( ++ self, mock_get_ca, mock_client_setup, mock_get_cert_request, ++ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert ++ ): ++ mock_get_ca.return_value = self.ca_cert ++ mock_get_cert_request.return_value = self.cert_request ++ mock_sign_cert_request.side_effect = NodeCommunicationException( ++ "host", "command", "reason" ++ ) ++ mock_cert_to_pk12.return_value = self.final_cert ++ skip_offline_nodes = False ++ ++ assert_raise_library_error( ++ lambda: lib._add_device_model_net( ++ self.lib_env, ++ self.qnetd_host, ++ self.cluster_name, ++ self.nodes, ++ skip_offline_nodes ++ ), ++ ( ++ severity.ERROR, ++ report_codes.NODE_COMMUNICATION_ERROR, ++ {} ++ ) ++ ) ++ ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED, ++ {} ++ ) ++ ] ++ ) ++ mock_get_ca.assert_called_once_with( ++ "mock_communicator", ++ self.qnetd_host ++ ) ++ client_setup_calls = [ ++ mock.call("mock_communicator", self.nodes[0], self.ca_cert), ++ mock.call("mock_communicator", self.nodes[1], self.ca_cert), ++ ] ++ self.assertEqual( ++ len(client_setup_calls), ++ len(mock_client_setup.mock_calls) ++ ) ++ mock_client_setup.assert_has_calls(client_setup_calls) ++ mock_get_cert_request.assert_called_once_with( ++ "mock_runner", ++ self.cluster_name ++ ) ++ mock_sign_cert_request.assert_called_once_with( ++ "mock_communicator", ++ self.qnetd_host, ++ self.cert_request, ++ self.cluster_name ++ ) ++ mock_cert_to_pk12.assert_not_called() ++ mock_import_cert.assert_not_called() ++ ++ def test_certificate_to_pk12_error( ++ self, mock_get_ca, mock_client_setup, mock_get_cert_request, ++ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert ++ ): ++ mock_get_ca.return_value = self.ca_cert ++ mock_get_cert_request.return_value = self.cert_request ++ mock_sign_cert_request.return_value = self.signed_cert ++ mock_cert_to_pk12.side_effect = LibraryError() ++ skip_offline_nodes = False ++ ++ self.assertRaises( ++ LibraryError, ++ lambda: lib._add_device_model_net( ++ self.lib_env, ++ self.qnetd_host, ++ self.cluster_name, ++ self.nodes, ++ skip_offline_nodes ++ ) ++ ) ++ ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED, ++ {} ++ ) ++ ] ++ ) ++ mock_get_ca.assert_called_once_with( ++ "mock_communicator", ++ self.qnetd_host ++ ) ++ client_setup_calls = [ ++ mock.call("mock_communicator", self.nodes[0], self.ca_cert), ++ mock.call("mock_communicator", self.nodes[1], self.ca_cert), ++ ] ++ self.assertEqual( ++ len(client_setup_calls), ++ len(mock_client_setup.mock_calls) ++ ) ++ mock_client_setup.assert_has_calls(client_setup_calls) ++ mock_get_cert_request.assert_called_once_with( ++ "mock_runner", ++ self.cluster_name ++ ) ++ mock_sign_cert_request.assert_called_once_with( ++ "mock_communicator", ++ self.qnetd_host, ++ self.cert_request, ++ self.cluster_name ++ ) ++ mock_cert_to_pk12.assert_called_once_with( ++ "mock_runner", ++ self.signed_cert ++ ) ++ mock_import_cert.assert_not_called() ++ ++ def test_client_import_cert_error( ++ self, mock_get_ca, mock_client_setup, mock_get_cert_request, ++ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert ++ ): ++ mock_get_ca.return_value = self.ca_cert ++ mock_get_cert_request.return_value = self.cert_request ++ mock_sign_cert_request.return_value = self.signed_cert ++ mock_cert_to_pk12.return_value = self.final_cert ++ def raiser(communicator, node, cert): ++ if node == self.nodes[1]: ++ raise NodeCommunicationException("host", "command", "reason") ++ mock_import_cert.side_effect = raiser ++ skip_offline_nodes = False ++ ++ assert_raise_library_error( ++ lambda: lib._add_device_model_net( ++ self.lib_env, ++ self.qnetd_host, ++ self.cluster_name, ++ self.nodes, ++ skip_offline_nodes ++ ), ++ ( ++ severity.ERROR, ++ report_codes.NODE_COMMUNICATION_ERROR, ++ {}, ++ report_codes.SKIP_OFFLINE_NODES ++ ) ++ ) ++ ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED, ++ {} ++ ), ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE, ++ { ++ "node": self.nodes[0].label ++ } ++ ), ++ ( ++ severity.ERROR, ++ report_codes.NODE_COMMUNICATION_ERROR, ++ {}, ++ report_codes.SKIP_OFFLINE_NODES ++ ), ++ ] ++ ) ++ mock_get_ca.assert_called_once_with( ++ "mock_communicator", ++ self.qnetd_host ++ ) ++ client_setup_calls = [ ++ mock.call("mock_communicator", self.nodes[0], self.ca_cert), ++ mock.call("mock_communicator", self.nodes[1], self.ca_cert), ++ ] ++ self.assertEqual( ++ len(client_setup_calls), ++ len(mock_client_setup.mock_calls) ++ ) ++ mock_client_setup.assert_has_calls(client_setup_calls) ++ mock_get_cert_request.assert_called_once_with( ++ "mock_runner", ++ self.cluster_name ++ ) ++ mock_sign_cert_request.assert_called_once_with( ++ "mock_communicator", ++ self.qnetd_host, ++ self.cert_request, ++ self.cluster_name ++ ) ++ mock_cert_to_pk12.assert_called_once_with( ++ "mock_runner", ++ self.signed_cert ++ ) ++ client_import_calls = [ ++ mock.call("mock_communicator", self.nodes[0], self.final_cert), ++ mock.call("mock_communicator", self.nodes[1], self.final_cert), ++ ] ++ self.assertEqual( ++ len(client_import_calls), ++ len(mock_import_cert.mock_calls) ++ ) ++ mock_import_cert.assert_has_calls(client_import_calls) ++ ++ def test_client_import_cert_error_skip_offline( ++ self, mock_get_ca, mock_client_setup, mock_get_cert_request, ++ mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert ++ ): ++ mock_get_ca.return_value = self.ca_cert ++ mock_get_cert_request.return_value = self.cert_request ++ mock_sign_cert_request.return_value = self.signed_cert ++ mock_cert_to_pk12.return_value = self.final_cert ++ def raiser(communicator, node, cert): ++ if node == self.nodes[1]: ++ raise NodeCommunicationException("host", "command", "reason") ++ mock_import_cert.side_effect = raiser ++ skip_offline_nodes = True ++ ++ lib._add_device_model_net( ++ self.lib_env, ++ self.qnetd_host, ++ self.cluster_name, ++ self.nodes, ++ skip_offline_nodes ++ ) ++ ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED, ++ {} ++ ), ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE, ++ { ++ "node": self.nodes[0].label ++ } ++ ), ++ ( ++ severity.WARNING, ++ report_codes.NODE_COMMUNICATION_ERROR, ++ {} ++ ), ++ ] ++ ) ++ mock_get_ca.assert_called_once_with( ++ "mock_communicator", ++ self.qnetd_host ++ ) ++ client_setup_calls = [ ++ mock.call("mock_communicator", self.nodes[0], self.ca_cert), ++ mock.call("mock_communicator", self.nodes[1], self.ca_cert), ++ ] ++ self.assertEqual( ++ len(client_setup_calls), ++ len(mock_client_setup.mock_calls) ++ ) ++ mock_client_setup.assert_has_calls(client_setup_calls) ++ mock_get_cert_request.assert_called_once_with( ++ "mock_runner", ++ self.cluster_name ++ ) ++ mock_sign_cert_request.assert_called_once_with( ++ "mock_communicator", ++ self.qnetd_host, ++ self.cert_request, ++ self.cluster_name ++ ) ++ mock_cert_to_pk12.assert_called_once_with( ++ "mock_runner", ++ self.signed_cert ++ ) ++ client_import_calls = [ ++ mock.call("mock_communicator", self.nodes[0], self.final_cert), ++ mock.call("mock_communicator", self.nodes[1], self.final_cert), ++ ] ++ self.assertEqual( ++ len(client_import_calls), ++ len(mock_import_cert.mock_calls) ++ ) ++ mock_import_cert.assert_has_calls(client_import_calls) + + + @mock.patch.object(LibraryEnvironment, "push_corosync_conf") + @mock.patch.object(LibraryEnvironment, "get_corosync_conf_data") ++@mock.patch("pcs.lib.commands.quorum._remove_device_model_net") ++@mock.patch("pcs.lib.commands.quorum.qdevice_client.remote_client_disable") ++@mock.patch("pcs.lib.commands.quorum.qdevice_client.remote_client_stop") + class RemoveDeviceTest(TestCase, CmanMixin): + def setUp(self): + self.mock_logger = mock.MagicMock(logging.Logger) + self.mock_reporter = MockLibraryReportProcessor() + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True) +- def test_disabled_on_cman(self, mock_get_corosync, mock_push_corosync): ++ def test_disabled_on_cman( ++ self, mock_remote_stop, mock_remote_disable, mock_remove_net, ++ mock_get_corosync, mock_push_corosync ++ ): + lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) + self.assert_disabled_on_cman(lambda: lib.remove_device(lib_env)) + mock_get_corosync.assert_not_called() + mock_push_corosync.assert_not_called() ++ mock_remove_net.assert_not_called() ++ mock_remote_disable.assert_not_called() ++ mock_remote_stop.assert_not_called() + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True) + def test_enabled_on_cman_if_not_live( +- self, mock_get_corosync, mock_push_corosync ++ self, mock_remote_stop, mock_remote_disable, mock_remove_net, ++ mock_get_corosync, mock_push_corosync + ): + original_conf = open(rc("corosync-3nodes.conf")).read() + mock_get_corosync.return_value = original_conf +@@ -495,9 +1353,17 @@ class RemoveDeviceTest(TestCase, CmanMixin): + ) + ) + ++ self.assertEqual(1, mock_get_corosync.call_count) ++ self.assertEqual(0, mock_push_corosync.call_count) ++ mock_remove_net.assert_not_called() ++ mock_remote_disable.assert_not_called() ++ mock_remote_stop.assert_not_called() + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) +- def test_no_device(self, mock_get_corosync, mock_push_corosync): ++ def test_no_device( ++ self, mock_remote_stop, mock_remote_disable, mock_remove_net, ++ mock_get_corosync, mock_push_corosync ++ ): + original_conf = open(rc("corosync-3nodes.conf")).read() + mock_get_corosync.return_value = original_conf + lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) +@@ -511,10 +1377,17 @@ class RemoveDeviceTest(TestCase, CmanMixin): + ) + ) + +- mock_push_corosync.assert_not_called() ++ self.assertEqual(1, mock_get_corosync.call_count) ++ self.assertEqual(0, mock_push_corosync.call_count) ++ mock_remove_net.assert_not_called() ++ mock_remote_disable.assert_not_called() ++ mock_remote_stop.assert_not_called() + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) +- def test_success(self, mock_get_corosync, mock_push_corosync): ++ def test_success( ++ self, mock_remote_stop, mock_remote_disable, mock_remove_net, ++ mock_get_corosync, mock_push_corosync ++ ): + original_conf = open(rc("corosync-3nodes-qdevice.conf")).read() + no_device_conf = open(rc("corosync-3nodes.conf")).read() + mock_get_corosync.return_value = original_conf +@@ -527,7 +1400,213 @@ class RemoveDeviceTest(TestCase, CmanMixin): + mock_push_corosync.mock_calls[0][1][0].config.export(), + no_device_conf + ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.SERVICE_DISABLE_STARTED, ++ { ++ "service": "corosync-qdevice", ++ } ++ ), ++ ( ++ severity.INFO, ++ report_codes.SERVICE_STOP_STARTED, ++ { ++ "service": "corosync-qdevice", ++ } ++ ), ++ ] ++ ) ++ self.assertEqual(1, len(mock_remove_net.mock_calls)) ++ self.assertEqual(3, len(mock_remote_disable.mock_calls)) ++ self.assertEqual(3, len(mock_remote_stop.mock_calls)) ++ ++ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) ++ def test_success_file( ++ self, mock_remote_stop, mock_remote_disable, mock_remove_net, ++ mock_get_corosync, mock_push_corosync ++ ): ++ original_conf = open(rc("corosync-3nodes-qdevice.conf")).read() ++ no_device_conf = open(rc("corosync-3nodes.conf")).read() ++ mock_get_corosync.return_value = original_conf ++ lib_env = LibraryEnvironment( ++ self.mock_logger, ++ self.mock_reporter, ++ corosync_conf_data=original_conf ++ ) ++ ++ lib.remove_device(lib_env) ++ ++ self.assertEqual(1, len(mock_push_corosync.mock_calls)) ++ ac( ++ mock_push_corosync.mock_calls[0][1][0].config.export(), ++ no_device_conf ++ ) + self.assertEqual([], self.mock_reporter.report_item_list) ++ mock_remove_net.assert_not_called() ++ mock_remote_disable.assert_not_called() ++ mock_remote_stop.assert_not_called() ++ ++ ++@mock.patch("pcs.lib.commands.quorum.qdevice_net.remote_client_destroy") ++@mock.patch.object( ++ LibraryEnvironment, ++ "node_communicator", ++ lambda self: "mock_communicator" ++) ++class RemoveDeviceNetTest(TestCase): ++ def setUp(self): ++ self.mock_logger = mock.MagicMock(logging.Logger) ++ self.mock_reporter = MockLibraryReportProcessor() ++ self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) ++ self.nodes = NodeAddressesList([ ++ NodeAddresses("node1"), ++ NodeAddresses("node2"), ++ ]) ++ ++ def test_success(self, mock_client_destroy): ++ skip_offline_nodes = False ++ ++ lib._remove_device_model_net( ++ self.lib_env, ++ self.nodes, ++ skip_offline_nodes ++ ) ++ ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED, ++ {} ++ ), ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE, ++ { ++ "node": self.nodes[0].label ++ } ++ ), ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE, ++ { ++ "node": self.nodes[1].label ++ } ++ ), ++ ] ++ ) ++ client_destroy_calls = [ ++ mock.call("mock_communicator", self.nodes[0]), ++ mock.call("mock_communicator", self.nodes[1]), ++ ] ++ self.assertEqual( ++ len(client_destroy_calls), ++ len(mock_client_destroy.mock_calls) ++ ) ++ mock_client_destroy.assert_has_calls(client_destroy_calls) ++ ++ def test_error_client_destroy(self, mock_client_destroy): ++ def raiser(communicator, node): ++ if node == self.nodes[1]: ++ raise NodeCommunicationException("host", "command", "reason") ++ mock_client_destroy.side_effect = raiser ++ skip_offline_nodes = False ++ ++ assert_raise_library_error( ++ lambda: lib._remove_device_model_net( ++ self.lib_env, ++ self.nodes, ++ skip_offline_nodes ++ ), ++ ( ++ severity.ERROR, ++ report_codes.NODE_COMMUNICATION_ERROR, ++ {}, ++ report_codes.SKIP_OFFLINE_NODES ++ ) ++ ) ++ ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED, ++ {} ++ ), ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE, ++ { ++ "node": self.nodes[0].label ++ } ++ ), ++ ( ++ severity.ERROR, ++ report_codes.NODE_COMMUNICATION_ERROR, ++ {}, ++ report_codes.SKIP_OFFLINE_NODES ++ ), ++ ] ++ ) ++ client_destroy_calls = [ ++ mock.call("mock_communicator", self.nodes[0]), ++ mock.call("mock_communicator", self.nodes[1]), ++ ] ++ self.assertEqual( ++ len(client_destroy_calls), ++ len(mock_client_destroy.mock_calls) ++ ) ++ mock_client_destroy.assert_has_calls(client_destroy_calls) ++ ++ def test_error_client_destroy_skip_offline(self, mock_client_destroy): ++ def raiser(communicator, node): ++ if node == self.nodes[1]: ++ raise NodeCommunicationException("host", "command", "reason") ++ mock_client_destroy.side_effect = raiser ++ skip_offline_nodes = True ++ ++ lib._remove_device_model_net( ++ self.lib_env, ++ self.nodes, ++ skip_offline_nodes ++ ) ++ ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED, ++ {} ++ ), ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE, ++ { ++ "node": self.nodes[0].label ++ } ++ ), ++ ( ++ severity.WARNING, ++ report_codes.NODE_COMMUNICATION_ERROR, ++ {} ++ ), ++ ] ++ ) ++ client_destroy_calls = [ ++ mock.call("mock_communicator", self.nodes[0]), ++ mock.call("mock_communicator", self.nodes[1]), ++ ] ++ self.assertEqual( ++ len(client_destroy_calls), ++ len(mock_client_destroy.mock_calls) ++ ) ++ mock_client_destroy.assert_has_calls(client_destroy_calls) + + + @mock.patch.object(LibraryEnvironment, "push_corosync_conf") +diff --git a/pcs/test/test_lib_corosync_config_facade.py b/pcs/test/test_lib_corosync_config_facade.py +index 5700016..4a35fd9 100644 +--- a/pcs/test/test_lib_corosync_config_facade.py ++++ b/pcs/test/test_lib_corosync_config_facade.py +@@ -31,6 +31,7 @@ class FromStringTest(TestCase): + self.assertEqual(facade.__class__, lib.ConfigFacade) + self.assertEqual(facade.config.export(), config) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_parse_error_missing_brace(self): + config = "section {" +@@ -55,6 +56,43 @@ class FromStringTest(TestCase): + ) + + ++class GetClusterNametest(TestCase): ++ def test_no_name(self): ++ config = "" ++ facade = lib.ConfigFacade.from_string(config) ++ self.assertEqual("", facade.get_cluster_name()) ++ self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) ++ ++ def test_empty_name(self): ++ config = "totem {\n cluster_name:\n}\n" ++ facade = lib.ConfigFacade.from_string(config) ++ self.assertEqual("", facade.get_cluster_name()) ++ self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) ++ ++ def test_one_name(self): ++ config = "totem {\n cluster_name: test\n}\n" ++ facade = lib.ConfigFacade.from_string(config) ++ self.assertEqual("test", facade.get_cluster_name()) ++ self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) ++ ++ def test_more_names(self): ++ config = "totem {\n cluster_name: test\n cluster_name: TEST\n}\n" ++ facade = lib.ConfigFacade.from_string(config) ++ self.assertEqual("TEST", facade.get_cluster_name()) ++ self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) ++ ++ def test_more_sections(self): ++ config = "totem{\ncluster_name:test\n}\ntotem{\ncluster_name:TEST\n}\n" ++ facade = lib.ConfigFacade.from_string(config) ++ self.assertEqual("TEST", facade.get_cluster_name()) ++ self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) ++ ++ + class GetNodesTest(TestCase): + def assert_equal_nodelist(self, expected_nodes, real_nodelist): + real_nodes = [ +@@ -69,6 +107,7 @@ class GetNodesTest(TestCase): + nodes = facade.get_nodes() + self.assertEqual(0, len(nodes)) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_empty_nodelist(self): + config = """\ +@@ -79,6 +118,7 @@ nodelist { + nodes = facade.get_nodes() + self.assertEqual(0, len(nodes)) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_one_nodelist(self): + config = """\ +@@ -107,6 +147,7 @@ nodelist { + nodes + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_more_nodelists(self): + config = """\ +@@ -137,6 +178,7 @@ nodelist { + nodes + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + + class GetQuorumOptionsTest(TestCase): +@@ -146,6 +188,7 @@ class GetQuorumOptionsTest(TestCase): + options = facade.get_quorum_options() + self.assertEqual({}, options) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_empty_quorum(self): + config = """\ +@@ -156,6 +199,7 @@ quorum { + options = facade.get_quorum_options() + self.assertEqual({}, options) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_no_options(self): + config = """\ +@@ -167,6 +211,7 @@ quorum { + options = facade.get_quorum_options() + self.assertEqual({}, options) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_some_options(self): + config = """\ +@@ -191,6 +236,7 @@ quorum { + options + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_option_repeated(self): + config = """\ +@@ -208,6 +254,7 @@ quorum { + options + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_quorum_repeated(self): + config = """\ +@@ -231,6 +278,7 @@ quorum { + options + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + + class SetQuorumOptionsTest(TestCase): +@@ -247,6 +295,7 @@ class SetQuorumOptionsTest(TestCase): + facade = lib.ConfigFacade.from_string(config) + facade.set_quorum_options(reporter, {"wait_for_all": "0"}) + self.assertTrue(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + self.assertEqual( + """\ + quorum { +@@ -263,6 +312,7 @@ quorum { + facade = lib.ConfigFacade.from_string(config) + facade.set_quorum_options(reporter, {"wait_for_all": ""}) + self.assertTrue(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + self.assertEqual("", facade.config.export()) + self.assertEqual([], reporter.report_item_list) + +@@ -279,6 +329,7 @@ quorum { + facade.set_quorum_options(reporter, expected_options) + + self.assertTrue(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + test_facade = lib.ConfigFacade.from_string(facade.config.export()) + self.assertEqual( + expected_options, +@@ -309,6 +360,7 @@ quorum { + ) + + self.assertTrue(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + test_facade = lib.ConfigFacade.from_string(facade.config.export()) + self.assertEqual( + { +@@ -329,6 +381,7 @@ quorum { + facade.set_quorum_options(reporter, {"auto_tie_breaker": "1"}) + + self.assertTrue(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + self.assertEqual( + "1", + facade.get_quorum_options().get("auto_tie_breaker", None) +@@ -347,6 +400,7 @@ quorum { + facade.set_quorum_options(reporter, {"auto_tie_breaker": "0"}) + + self.assertTrue(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + self.assertEqual( + "0", + facade.get_quorum_options().get("auto_tie_breaker", None) +@@ -365,6 +419,7 @@ quorum { + facade.set_quorum_options(reporter, {"auto_tie_breaker": "1"}) + + self.assertTrue(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + self.assertEqual( + "1", + facade.get_quorum_options().get("auto_tie_breaker", None) +@@ -383,6 +438,7 @@ quorum { + facade.set_quorum_options(reporter, {"auto_tie_breaker": "0"}) + + self.assertTrue(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + self.assertEqual( + "0", + facade.get_quorum_options().get("auto_tie_breaker", None) +@@ -421,6 +477,7 @@ quorum { + ) + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + self.assertEqual( + lib.ConfigFacade.from_string(config).get_quorum_options(), + facade.get_quorum_options() +@@ -476,6 +533,7 @@ quorum { + ) + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + self.assertEqual( + lib.ConfigFacade.from_string(config).get_quorum_options(), + facade.get_quorum_options() +@@ -522,11 +580,60 @@ quorum { + ) + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) ++ self.assertEqual( ++ lib.ConfigFacade.from_string(config).get_quorum_options(), ++ facade.get_quorum_options() ++ ) ++ ++ def test_qdevice_incompatible_options(self): ++ config = open(rc("corosync-3nodes-qdevice.conf")).read() ++ reporter = MockLibraryReportProcessor() ++ facade = lib.ConfigFacade.from_string(config) ++ options = { ++ "auto_tie_breaker": "1", ++ "last_man_standing": "1", ++ "last_man_standing_window": "250", ++ } ++ assert_raise_library_error( ++ lambda: facade.set_quorum_options(reporter, options), ++ ( ++ severity.ERROR, ++ report_codes.COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE, ++ { ++ "options_names": [ ++ "auto_tie_breaker", ++ "last_man_standing", ++ "last_man_standing_window", ++ ], ++ } ++ ) ++ ) ++ self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + self.assertEqual( + lib.ConfigFacade.from_string(config).get_quorum_options(), + facade.get_quorum_options() + ) + ++ def test_qdevice_compatible_options(self): ++ config = open(rc("corosync-3nodes-qdevice.conf")).read() ++ reporter = MockLibraryReportProcessor() ++ facade = lib.ConfigFacade.from_string(config) ++ expected_options = { ++ "wait_for_all": "1", ++ } ++ facade.set_quorum_options(reporter, expected_options) ++ ++ self.assertTrue(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) ++ test_facade = lib.ConfigFacade.from_string(facade.config.export()) ++ self.assertEqual( ++ expected_options, ++ test_facade.get_quorum_options() ++ ) ++ self.assertEqual([], reporter.report_item_list) ++ + + class HasQuorumDeviceTest(TestCase): + def test_empty_config(self): +@@ -534,12 +641,14 @@ class HasQuorumDeviceTest(TestCase): + facade = lib.ConfigFacade.from_string(config) + self.assertFalse(facade.has_quorum_device()) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_no_device(self): + config = open(rc("corosync.conf")).read() + facade = lib.ConfigFacade.from_string(config) + self.assertFalse(facade.has_quorum_device()) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_empty_device(self): + config = """\ +@@ -551,6 +660,7 @@ quorum { + facade = lib.ConfigFacade.from_string(config) + self.assertFalse(facade.has_quorum_device()) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_device_set(self): + config = """\ +@@ -563,6 +673,7 @@ quorum { + facade = lib.ConfigFacade.from_string(config) + self.assertTrue(facade.has_quorum_device()) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_no_model(self): + config = """\ +@@ -578,6 +689,7 @@ quorum { + facade = lib.ConfigFacade.from_string(config) + self.assertFalse(facade.has_quorum_device()) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + + class GetQuorumDeviceSettingsTest(TestCase): +@@ -589,6 +701,7 @@ class GetQuorumDeviceSettingsTest(TestCase): + facade.get_quorum_device_settings() + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_no_device(self): + config = open(rc("corosync.conf")).read() +@@ -598,6 +711,7 @@ class GetQuorumDeviceSettingsTest(TestCase): + facade.get_quorum_device_settings() + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_empty_device(self): + config = """\ +@@ -612,6 +726,7 @@ quorum { + facade.get_quorum_device_settings() + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_no_model(self): + config = """\ +@@ -630,6 +745,7 @@ quorum { + facade.get_quorum_device_settings() + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_configured_properly(self): + config = """\ +@@ -649,6 +765,7 @@ quorum { + facade.get_quorum_device_settings() + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_more_devices_one_quorum(self): + config = """\ +@@ -681,6 +798,7 @@ quorum { + facade.get_quorum_device_settings() + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_more_devices_more_quorum(self): + config = """\ +@@ -715,6 +833,7 @@ quorum { + facade.get_quorum_device_settings() + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + + class AddQuorumDeviceTest(TestCase): +@@ -754,9 +873,10 @@ quorum { + ) + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + ac(config, facade.config.export()) + +- def test_success_net_minimal(self): ++ def test_success_net_minimal_ffsplit(self): + config = open(rc("corosync-3nodes.conf")).read() + reporter = MockLibraryReportProcessor() + facade = lib.ConfigFacade.from_string(config) +@@ -774,6 +894,7 @@ quorum { + + device { + model: net ++ votes: 1 + + net { + algorithm: ffsplit +@@ -784,55 +905,10 @@ quorum { + facade.config.export() + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + self.assertEqual([], reporter.report_item_list) + +- def test_success_net_full(self): +- config = open(rc("corosync-3nodes.conf")).read() +- reporter = MockLibraryReportProcessor() +- facade = lib.ConfigFacade.from_string(config) +- facade.add_quorum_device( +- reporter, +- "net", +- { +- "host": "127.0.0.1", +- "port": "4433", +- "algorithm": "ffsplit", +- "connect_timeout": "12345", +- "force_ip_version": "4", +- "tie_breaker": "lowest", +- }, +- { +- "timeout": "23456", +- "sync_timeout": "34567" +- } +- ) +- ac( +- config.replace( +- " provider: corosync_votequorum", +- """\ +- provider: corosync_votequorum +- +- device { +- sync_timeout: 34567 +- timeout: 23456 +- model: net +- +- net { +- algorithm: ffsplit +- connect_timeout: 12345 +- force_ip_version: 4 +- host: 127.0.0.1 +- port: 4433 +- tie_breaker: lowest +- } +- }""" +- ), +- facade.config.export() +- ) +- self.assertFalse(facade.need_stopped_cluster) +- self.assertEqual([], reporter.report_item_list) +- +- def test_succes_net_lms_3node(self): ++ def test_success_net_minimal_lms(self): + config = open(rc("corosync-3nodes.conf")).read() + reporter = MockLibraryReportProcessor() + facade = lib.ConfigFacade.from_string(config) +@@ -860,16 +936,18 @@ quorum { + facade.config.export() + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + self.assertEqual([], reporter.report_item_list) + +- def test_succes_net_2nodelms_3node(self): ++ def test_success_remove_nodes_votes(self): + config = open(rc("corosync-3nodes.conf")).read() ++ config_votes = config.replace("node {", "node {\nquorum_votes: 2") + reporter = MockLibraryReportProcessor() +- facade = lib.ConfigFacade.from_string(config) ++ facade = lib.ConfigFacade.from_string(config_votes) + facade.add_quorum_device( + reporter, + "net", +- {"host": "127.0.0.1", "algorithm": "2nodelms"}, ++ {"host": "127.0.0.1", "algorithm": "lms"}, + {} + ) + ac( +@@ -890,47 +968,28 @@ quorum { + facade.config.export() + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + self.assertEqual([], reporter.report_item_list) + +- def test_succes_net_lms_2node(self): +- config = open(rc("corosync.conf")).read() +- reporter = MockLibraryReportProcessor() +- facade = lib.ConfigFacade.from_string(config) +- facade.add_quorum_device( +- reporter, +- "net", +- {"host": "127.0.0.1", "algorithm": "lms"}, +- {} +- ) +- ac( +- config.replace( +- " provider: corosync_votequorum", +- """\ +- provider: corosync_votequorum +- +- device { +- model: net +- +- net { +- algorithm: 2nodelms +- host: 127.0.0.1 +- } +- }""" +- ).replace(" two_node: 1\n", ""), +- facade.config.export() +- ) +- self.assertFalse(facade.need_stopped_cluster) +- self.assertEqual([], reporter.report_item_list) +- +- def test_succes_net_2nodelms_2node(self): +- config = open(rc("corosync.conf")).read() ++ def test_success_net_full(self): ++ config = open(rc("corosync-3nodes.conf")).read() + reporter = MockLibraryReportProcessor() + facade = lib.ConfigFacade.from_string(config) + facade.add_quorum_device( + reporter, + "net", +- {"host": "127.0.0.1", "algorithm": "2nodelms"}, +- {} ++ { ++ "host": "127.0.0.1", ++ "port": "4433", ++ "algorithm": "ffsplit", ++ "connect_timeout": "12345", ++ "force_ip_version": "4", ++ "tie_breaker": "lowest", ++ }, ++ { ++ "timeout": "23456", ++ "sync_timeout": "34567" ++ } + ) + ac( + config.replace( +@@ -939,17 +998,25 @@ quorum { + provider: corosync_votequorum + + device { ++ sync_timeout: 34567 ++ timeout: 23456 + model: net ++ votes: 1 + + net { +- algorithm: 2nodelms ++ algorithm: ffsplit ++ connect_timeout: 12345 ++ force_ip_version: 4 + host: 127.0.0.1 ++ port: 4433 ++ tie_breaker: lowest + } + }""" +- ).replace(" two_node: 1\n", ""), ++ ), + facade.config.export() + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + self.assertEqual([], reporter.report_item_list) + + def test_remove_conflicting_options(self): +@@ -982,6 +1049,7 @@ quorum { + + device { + model: net ++ votes: 1 + + net { + algorithm: ffsplit +@@ -994,6 +1062,7 @@ quorum { + facade.config.export() + ) + self.assertTrue(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + self.assertEqual([], reporter.report_item_list) + + def test_remove_old_configuration(self): +@@ -1030,6 +1099,7 @@ quorum { + + device { + model: net ++ votes: 1 + + net { + algorithm: ffsplit +@@ -1042,6 +1112,7 @@ quorum { + facade.config.export() + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + self.assertEqual([], reporter.report_item_list) + + def test_bad_model(self): +@@ -1062,6 +1133,7 @@ quorum { + ) + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + ac(config, facade.config.export()) + + def test_bad_model_forced(self): +@@ -1082,6 +1154,7 @@ quorum { + facade.config.export() + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + assert_report_item_list_equal( + reporter.report_item_list, + [ +@@ -1115,6 +1188,7 @@ quorum { + ) + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + ac(config, facade.config.export()) + + def test_bad_options_net(self): +@@ -1147,7 +1221,7 @@ quorum { + { + "option_name": "algorithm", + "option_value": "bad algorithm", +- "allowed_values": ("2nodelms", "ffsplit", "lms"), ++ "allowed_values": ("ffsplit", "lms"), + }, + report_codes.FORCE_OPTIONS + ), +@@ -1254,6 +1328,7 @@ quorum { + ) + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + ac(config, facade.config.export()) + + def test_mandatory_options_missing_net_forced(self): +@@ -1277,6 +1352,7 @@ quorum { + ) + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + ac(config, facade.config.export()) + + def test_mandatory_options_empty_net_forced(self): +@@ -1300,6 +1376,7 @@ quorum { + ) + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + ac(config, facade.config.export()) + + def test_bad_options_net_forced(self): +@@ -1326,6 +1403,7 @@ quorum { + force_options=True + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + ac( + config.replace( + " provider: corosync_votequorum", +@@ -1360,7 +1438,7 @@ quorum { + { + "option_name": "algorithm", + "option_value": "bad algorithm", +- "allowed_values": ("2nodelms", "ffsplit", "lms"), ++ "allowed_values": ("ffsplit", "lms"), + } + ), + ( +@@ -1445,9 +1523,52 @@ quorum { + ] + ) + ++ def test_bad_options_net_disallowed_algorithms(self): ++ config = open(rc("corosync-3nodes.conf")).read() ++ reporter = MockLibraryReportProcessor() ++ facade = lib.ConfigFacade.from_string(config) ++ assert_raise_library_error( ++ lambda: facade.add_quorum_device( ++ reporter, ++ "net", ++ {"host": "127.0.0.1", "algorithm": "test"}, ++ {} ++ ), ++ ( ++ severity.ERROR, ++ report_codes.INVALID_OPTION_VALUE, ++ { ++ "option_name": "algorithm", ++ "option_value": "test", ++ "allowed_values": ("ffsplit", "lms"), ++ }, ++ report_codes.FORCE_OPTIONS ++ ) ++ ) ++ ++ assert_raise_library_error( ++ lambda: facade.add_quorum_device( ++ reporter, ++ "net", ++ {"host": "127.0.0.1", "algorithm": "2nodelms"}, ++ {} ++ ), ++ ( ++ severity.ERROR, ++ report_codes.INVALID_OPTION_VALUE, ++ { ++ "option_name": "algorithm", ++ "option_value": "2nodelms", ++ "allowed_values": ("ffsplit", "lms"), ++ }, ++ report_codes.FORCE_OPTIONS ++ ) ++ ) ++ ++ + class UpdateQuorumDeviceTest(TestCase): +- def fixture_add_device(self, config): +- return re.sub( ++ def fixture_add_device(self, config, votes=None): ++ with_device = re.sub( + re.compile(r"quorum {[^}]*}", re.MULTILINE | re.DOTALL), + """\ + quorum { +@@ -1465,6 +1586,12 @@ quorum { + }""", + config + ) ++ if votes: ++ with_device = with_device.replace( ++ "model: net", ++ "model: net\n votes: {0}".format(votes) ++ ) ++ return with_device + + def test_not_existing(self): + config = open(rc("corosync.conf")).read() +@@ -1483,11 +1610,13 @@ quorum { + ) + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + ac(config, facade.config.export()) + + def test_success_model_options_net(self): + config = self.fixture_add_device( +- open(rc("corosync-3nodes.conf")).read() ++ open(rc("corosync-3nodes.conf")).read(), ++ votes="1" + ) + reporter = MockLibraryReportProcessor() + facade = lib.ConfigFacade.from_string(config) +@@ -1496,7 +1625,8 @@ quorum { + {"host": "127.0.0.2", "port": "", "algorithm": "ffsplit"}, + {} + ) +- self.assertTrue(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_stopped_cluster) ++ self.assertTrue(facade.need_qdevice_reload) + ac( + config.replace( + "host: 127.0.0.1\n port: 4433", +@@ -1506,27 +1636,6 @@ quorum { + ) + self.assertEqual([], reporter.report_item_list) + +- def test_success_net_3node_2nodelms(self): +- config = self.fixture_add_device( +- open(rc("corosync-3nodes.conf")).read() +- ) +- reporter = MockLibraryReportProcessor() +- facade = lib.ConfigFacade.from_string(config) +- facade.update_quorum_device( +- reporter, +- {"algorithm": "2nodelms"}, +- {} +- ) +- self.assertTrue(facade.need_stopped_cluster) +- ac( +- config.replace( +- "port: 4433", +- "port: 4433\n algorithm: lms" +- ), +- facade.config.export() +- ) +- self.assertEqual([], reporter.report_item_list) +- + def test_success_net_doesnt_require_host_and_algorithm(self): + config = self.fixture_add_device( + open(rc("corosync-3nodes.conf")).read() +@@ -1534,7 +1643,8 @@ quorum { + reporter = MockLibraryReportProcessor() + facade = lib.ConfigFacade.from_string(config) + facade.update_quorum_device(reporter, {"port": "4444"}, {}) +- self.assertTrue(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_stopped_cluster) ++ self.assertTrue(facade.need_qdevice_reload) + ac( + config.replace( + "host: 127.0.0.1\n port: 4433", +@@ -1572,12 +1682,13 @@ quorum { + { + "option_name": "algorithm", + "option_value": "", +- "allowed_values": ("2nodelms", "ffsplit", "lms") ++ "allowed_values": ("ffsplit", "lms") + }, + report_codes.FORCE_OPTIONS + ) + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + ac(config, facade.config.export()) + + def test_net_required_options_cannot_be_removed_forced(self): +@@ -1605,6 +1716,7 @@ quorum { + ) + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + ac(config, facade.config.export()) + + def test_bad_net_options(self): +@@ -1632,7 +1744,7 @@ quorum { + { + "option_name": "algorithm", + "option_value": "bad algorithm", +- "allowed_values": ("2nodelms", "ffsplit", "lms"), ++ "allowed_values": ("ffsplit", "lms"), + }, + report_codes.FORCE_OPTIONS + ), +@@ -1695,6 +1807,7 @@ quorum { + ), + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + ac(config, facade.config.export()) + + def test_bad_net_options_forced(self): +@@ -1716,7 +1829,8 @@ quorum { + {}, + force_options=True + ) +- self.assertTrue(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_stopped_cluster) ++ self.assertTrue(facade.need_qdevice_reload) + ac( + config.replace( + " host: 127.0.0.1\n port: 4433", +@@ -1740,7 +1854,7 @@ quorum { + { + "option_name": "algorithm", + "option_value": "bad algorithm", +- "allowed_values": ("2nodelms", "ffsplit", "lms"), ++ "allowed_values": ("ffsplit", "lms"), + }, + ), + ( +@@ -1809,7 +1923,8 @@ quorum { + {}, + {"timeout": "", "sync_timeout": "23456"} + ) +- self.assertTrue(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_stopped_cluster) ++ self.assertTrue(facade.need_qdevice_reload) + ac( + config.replace( + "timeout: 12345\n model: net", +@@ -1830,7 +1945,8 @@ quorum { + {"port": "4444"}, + {"timeout": "23456"} + ) +- self.assertTrue(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_stopped_cluster) ++ self.assertTrue(facade.need_qdevice_reload) + ac( + config + .replace("port: 4433", "port: 4444") +@@ -1898,6 +2014,7 @@ quorum { + ) + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + ac(config, facade.config.export()) + + def test_bad_generic_options_cannot_force_model(self): +@@ -1924,6 +2041,7 @@ quorum { + ) + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + ac(config, facade.config.export()) + + def test_bad_generic_options_forced(self): +@@ -1942,7 +2060,8 @@ quorum { + }, + force_options=True + ) +- self.assertTrue(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_stopped_cluster) ++ self.assertTrue(facade.need_qdevice_reload) + ac( + config.replace( + " timeout: 12345\n model: net", +@@ -2001,6 +2120,7 @@ class RemoveQuorumDeviceTest(TestCase): + ) + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_no_device(self): + config = open(rc("corosync-3nodes.conf")).read() +@@ -2014,6 +2134,7 @@ class RemoveQuorumDeviceTest(TestCase): + ) + ) + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + + def test_remove_all_devices(self): + config_no_devices = open(rc("corosync-3nodes.conf")).read() +@@ -2054,6 +2175,7 @@ quorum { + facade = lib.ConfigFacade.from_string(config) + facade.remove_quorum_device() + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + ac( + config_no_devices, + facade.config.export() +@@ -2082,6 +2204,7 @@ quorum { + facade = lib.ConfigFacade.from_string(config) + facade.remove_quorum_device() + self.assertFalse(facade.need_stopped_cluster) ++ self.assertFalse(facade.need_qdevice_reload) + ac( + config_no_devices, + facade.config.export() +diff --git a/pcs/test/test_lib_corosync_live.py b/pcs/test/test_lib_corosync_live.py +index 4878136..96fe235 100644 +--- a/pcs/test/test_lib_corosync_live.py ++++ b/pcs/test/test_lib_corosync_live.py +@@ -47,6 +47,22 @@ class GetLocalCorosyncConfTest(TestCase): + ) + + ++class SetRemoteCorosyncConfTest(TestCase): ++ def test_success(self): ++ config = "test {\nconfig: data\n}\n" ++ node = NodeAddresses("node1") ++ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) ++ mock_communicator.call_node.return_value = "dummy return" ++ ++ lib.set_remote_corosync_conf(mock_communicator, node, config) ++ ++ mock_communicator.call_node.assert_called_once_with( ++ node, ++ "remote/set_corosync_conf", ++ "corosync_conf=test+%7B%0Aconfig%3A+data%0A%7D%0A" ++ ) ++ ++ + class ReloadConfigTest(TestCase): + def path(self, name): + return os.path.join(settings.corosync_binaries, name) +@@ -85,17 +101,43 @@ class ReloadConfigTest(TestCase): + ]) + + +-class SetRemoteCorosyncConfTest(TestCase): ++class GetQuorumStatusTextTest(TestCase): ++ def setUp(self): ++ self.mock_runner = mock.MagicMock(spec_set=CommandRunner) ++ self.quorum_tool = "/usr/sbin/corosync-quorumtool" ++ + def test_success(self): +- config = "test {\nconfig: data\n}\n" +- node = NodeAddresses("node1") +- mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) +- mock_communicator.call_node.return_value = "dummy return" ++ self.mock_runner.run.return_value = ("status info", 0) ++ self.assertEqual( ++ "status info", ++ lib.get_quorum_status_text(self.mock_runner) ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ self.quorum_tool, "-p" ++ ]) + +- lib.set_remote_corosync_conf(mock_communicator, node, config) ++ def test_success_with_retval_1(self): ++ self.mock_runner.run.return_value = ("status info", 1) ++ self.assertEqual( ++ "status info", ++ lib.get_quorum_status_text(self.mock_runner) ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ self.quorum_tool, "-p" ++ ]) + +- mock_communicator.call_node.assert_called_once_with( +- node, +- "remote/set_corosync_conf", +- "corosync_conf=test+%7B%0Aconfig%3A+data%0A%7D%0A" ++ def test_error(self): ++ self.mock_runner.run.return_value = ("status error", 2) ++ assert_raise_library_error( ++ lambda: lib.get_quorum_status_text(self.mock_runner), ++ ( ++ severity.ERROR, ++ report_codes.COROSYNC_QUORUM_GET_STATUS_ERROR, ++ { ++ "reason": "status error", ++ } ++ ) + ) ++ self.mock_runner.run.assert_called_once_with([ ++ self.quorum_tool, "-p" ++ ]) +diff --git a/pcs/test/test_lib_corosync_qdevice_client.py b/pcs/test/test_lib_corosync_qdevice_client.py +new file mode 100644 +index 0000000..e0332f1 +--- /dev/null ++++ b/pcs/test/test_lib_corosync_qdevice_client.py +@@ -0,0 +1,60 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from unittest import TestCase ++ ++from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.assertions import assert_raise_library_error ++ ++from pcs.common import report_codes ++from pcs.lib.errors import ReportItemSeverity as severity ++from pcs.lib.external import CommandRunner ++ ++import pcs.lib.corosync.qdevice_client as lib ++ ++ ++class GetStatusTextTest(TestCase): ++ def setUp(self): ++ self.mock_runner = mock.MagicMock(spec_set=CommandRunner) ++ self.qdevice_tool = "/usr/sbin/corosync-qdevice-tool" ++ ++ def test_success(self): ++ self.mock_runner.run.return_value = ("status info", 0) ++ self.assertEqual( ++ "status info", ++ lib.get_status_text(self.mock_runner) ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ self.qdevice_tool, "-s" ++ ]) ++ ++ def test_success_verbose(self): ++ self.mock_runner.run.return_value = ("status info", 0) ++ self.assertEqual( ++ "status info", ++ lib.get_status_text(self.mock_runner, True) ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ self.qdevice_tool, "-s", "-v" ++ ]) ++ ++ def test_error(self): ++ self.mock_runner.run.return_value = ("status error", 1) ++ assert_raise_library_error( ++ lambda: lib.get_status_text(self.mock_runner), ++ ( ++ severity.ERROR, ++ report_codes.COROSYNC_QUORUM_GET_STATUS_ERROR, ++ { ++ "reason": "status error", ++ } ++ ) ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ self.qdevice_tool, "-s" ++ ]) ++ +diff --git a/pcs/test/test_lib_corosync_qdevice_net.py b/pcs/test/test_lib_corosync_qdevice_net.py +index 38bc9c8..3d473f7 100644 +--- a/pcs/test/test_lib_corosync_qdevice_net.py ++++ b/pcs/test/test_lib_corosync_qdevice_net.py +@@ -7,18 +7,40 @@ from __future__ import ( + + from unittest import TestCase + ++import base64 ++import os.path ++ + from pcs.test.tools.pcs_mock import mock + from pcs.test.tools.assertions import assert_raise_library_error ++from pcs.test.tools.misc import get_test_resource + ++from pcs import settings + from pcs.common import report_codes +-from pcs.lib.errors import ReportItemSeverity as severity +-from pcs.lib.external import CommandRunner ++from pcs.lib import reports ++from pcs.lib.errors import ReportItemSeverity as severity, LibraryError ++from pcs.lib.external import ( ++ CommandRunner, ++ NodeCommunicator, ++ NodeCommunicationException, ++) + + import pcs.lib.corosync.qdevice_net as lib + + +-_qnetd_cert_dir = "/etc/corosync/qdevice/net/qnetd/nssdb" +-_qnetd_tool = "/usr/sbin/corosync-qnetd-certutil" ++_qnetd_cert_dir = "/etc/corosync/qnetd/nssdb" ++_qnetd_cert_tool = "/usr/bin/corosync-qnetd-certutil" ++_qnetd_tool = "/usr/bin/corosync-qnetd-tool" ++_client_cert_dir = "/etc/corosync/qdevice/net/nssdb" ++_client_cert_tool = "/usr/sbin/corosync-qdevice-net-certutil" ++ ++def cert_to_url(cert): ++ return base64.b64encode(cert).decode("utf-8").replace("=", "%3D") ++ ++class CertificateTestCase(TestCase): ++ def setUp(self): ++ self.mock_runner = mock.MagicMock(spec_set=CommandRunner) ++ self.mock_tmpfile = mock.MagicMock() ++ self.mock_tmpfile.name = "tmpfile path" + + @mock.patch("pcs.lib.corosync.qdevice_net.external.is_dir_nonempty") + class QdeviceSetupTest(TestCase): +@@ -32,7 +54,7 @@ class QdeviceSetupTest(TestCase): + lib.qdevice_setup(self.mock_runner) + + mock_is_dir_nonempty.assert_called_once_with(_qnetd_cert_dir) +- self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-i"]) ++ self.mock_runner.run.assert_called_once_with([_qnetd_cert_tool, "-i"]) + + def test_cert_db_exists(self, mock_is_dir_nonempty): + mock_is_dir_nonempty.return_value = True +@@ -47,7 +69,7 @@ class QdeviceSetupTest(TestCase): + ) + + mock_is_dir_nonempty.assert_called_once_with(_qnetd_cert_dir) +- self.mock_runner.assert_not_called() ++ self.mock_runner.run.assert_not_called() + + def test_init_tool_fail(self, mock_is_dir_nonempty): + mock_is_dir_nonempty.return_value = False +@@ -66,16 +88,24 @@ class QdeviceSetupTest(TestCase): + ) + + mock_is_dir_nonempty.assert_called_once_with(_qnetd_cert_dir) +- self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-i"]) ++ self.mock_runner.run.assert_called_once_with([_qnetd_cert_tool, "-i"]) + + + @mock.patch("pcs.lib.corosync.qdevice_net.shutil.rmtree") ++@mock.patch("pcs.lib.corosync.qdevice_net.qdevice_initialized") + class QdeviceDestroyTest(TestCase): +- def test_success(self, mock_rmtree): ++ def test_success(self, mock_initialized, mock_rmtree): ++ mock_initialized.return_value = True + lib.qdevice_destroy() + mock_rmtree.assert_called_once_with(_qnetd_cert_dir) + +- def test_cert_dir_rm_error(self, mock_rmtree): ++ def test_not_initialized(self, mock_initialized, mock_rmtree): ++ mock_initialized.return_value = False ++ lib.qdevice_destroy() ++ mock_rmtree.assert_not_called() ++ ++ def test_cert_dir_rm_error(self, mock_initialized, mock_rmtree): ++ mock_initialized.return_value = True + mock_rmtree.side_effect = EnvironmentError("test errno", "test message") + assert_raise_library_error( + lib.qdevice_destroy, +@@ -89,3 +119,920 @@ class QdeviceDestroyTest(TestCase): + ) + ) + mock_rmtree.assert_called_once_with(_qnetd_cert_dir) ++ ++ ++class QdeviceStatusGenericTest(TestCase): ++ def setUp(self): ++ self.mock_runner = mock.MagicMock(spec_set=CommandRunner) ++ ++ def test_success(self): ++ self.mock_runner.run.return_value = ("status info", 0) ++ self.assertEqual( ++ "status info", ++ lib.qdevice_status_generic_text(self.mock_runner) ++ ) ++ self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-s"]) ++ ++ def test_success_verbose(self): ++ self.mock_runner.run.return_value = ("status info", 0) ++ self.assertEqual( ++ "status info", ++ lib.qdevice_status_generic_text(self.mock_runner, True) ++ ) ++ self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-s", "-v"]) ++ ++ def test_error(self): ++ self.mock_runner.run.return_value = ("status error", 1) ++ assert_raise_library_error( ++ lambda: lib.qdevice_status_generic_text(self.mock_runner), ++ ( ++ severity.ERROR, ++ report_codes.QDEVICE_GET_STATUS_ERROR, ++ { ++ "model": "net", ++ "reason": "status error", ++ } ++ ) ++ ) ++ self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-s"]) ++ ++ ++class QdeviceStatusClusterTest(TestCase): ++ def setUp(self): ++ self.mock_runner = mock.MagicMock(spec_set=CommandRunner) ++ ++ def test_success(self): ++ self.mock_runner.run.return_value = ("status info", 0) ++ self.assertEqual( ++ "status info", ++ lib.qdevice_status_cluster_text(self.mock_runner) ++ ) ++ self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l"]) ++ ++ def test_success_verbose(self): ++ self.mock_runner.run.return_value = ("status info", 0) ++ self.assertEqual( ++ "status info", ++ lib.qdevice_status_cluster_text(self.mock_runner, verbose=True) ++ ) ++ self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l", "-v"]) ++ ++ def test_success_cluster(self): ++ self.mock_runner.run.return_value = ("status info", 0) ++ self.assertEqual( ++ "status info", ++ lib.qdevice_status_cluster_text(self.mock_runner, "cluster") ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ _qnetd_tool, "-l", "-c", "cluster" ++ ]) ++ ++ def test_success_cluster_verbose(self): ++ self.mock_runner.run.return_value = ("status info", 0) ++ self.assertEqual( ++ "status info", ++ lib.qdevice_status_cluster_text(self.mock_runner, "cluster", True) ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ _qnetd_tool, "-l", "-v", "-c", "cluster" ++ ]) ++ ++ def test_error(self): ++ self.mock_runner.run.return_value = ("status error", 1) ++ assert_raise_library_error( ++ lambda: lib.qdevice_status_cluster_text(self.mock_runner), ++ ( ++ severity.ERROR, ++ report_codes.QDEVICE_GET_STATUS_ERROR, ++ { ++ "model": "net", ++ "reason": "status error", ++ } ++ ) ++ ) ++ self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l"]) ++ ++ ++@mock.patch("pcs.lib.corosync.qdevice_net._get_output_certificate") ++@mock.patch("pcs.lib.corosync.qdevice_net._store_to_tmpfile") ++class QdeviceSignCertificateRequestTest(CertificateTestCase): ++ @mock.patch( ++ "pcs.lib.corosync.qdevice_net.qdevice_initialized", ++ lambda: True ++ ) ++ def test_success(self, mock_tmp_store, mock_get_cert): ++ mock_tmp_store.return_value = self.mock_tmpfile ++ self.mock_runner.run.return_value = ("tool output", 0) ++ mock_get_cert.return_value = "new certificate".encode("utf-8") ++ ++ result = lib.qdevice_sign_certificate_request( ++ self.mock_runner, ++ "certificate request", ++ "clusterName" ++ ) ++ self.assertEqual(result, mock_get_cert.return_value) ++ ++ mock_tmp_store.assert_called_once_with( ++ "certificate request", ++ reports.qdevice_certificate_sign_error ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ _qnetd_cert_tool, ++ "-s", "-c", self.mock_tmpfile.name, "-n", "clusterName" ++ ]) ++ mock_get_cert.assert_called_once_with( ++ "tool output", ++ reports.qdevice_certificate_sign_error ++ ) ++ ++ @mock.patch( ++ "pcs.lib.corosync.qdevice_net.qdevice_initialized", ++ lambda: False ++ ) ++ def test_not_initialized(self, mock_tmp_store, mock_get_cert): ++ assert_raise_library_error( ++ lambda: lib.qdevice_sign_certificate_request( ++ self.mock_runner, ++ "certificate request", ++ "clusterName" ++ ), ++ ( ++ severity.ERROR, ++ report_codes.QDEVICE_NOT_INITIALIZED, ++ { ++ "model": "net", ++ } ++ ) ++ ) ++ mock_tmp_store.assert_not_called() ++ self.mock_runner.run.assert_not_called() ++ mock_get_cert.assert_not_called() ++ ++ @mock.patch( ++ "pcs.lib.corosync.qdevice_net.qdevice_initialized", ++ lambda: True ++ ) ++ def test_input_write_error(self, mock_tmp_store, mock_get_cert): ++ mock_tmp_store.side_effect = LibraryError ++ ++ self.assertRaises( ++ LibraryError, ++ lambda: lib.qdevice_sign_certificate_request( ++ self.mock_runner, ++ "certificate request", ++ "clusterName" ++ ) ++ ) ++ ++ self.mock_runner.run.assert_not_called() ++ mock_get_cert.assert_not_called() ++ ++ @mock.patch( ++ "pcs.lib.corosync.qdevice_net.qdevice_initialized", ++ lambda: True ++ ) ++ def test_sign_error(self, mock_tmp_store, mock_get_cert): ++ mock_tmp_store.return_value = self.mock_tmpfile ++ self.mock_runner.run.return_value = ("tool output error", 1) ++ ++ assert_raise_library_error( ++ lambda: lib.qdevice_sign_certificate_request( ++ self.mock_runner, ++ "certificate request", ++ "clusterName" ++ ), ++ ( ++ severity.ERROR, ++ report_codes.QDEVICE_CERTIFICATE_SIGN_ERROR, ++ { ++ "reason": "tool output error", ++ } ++ ) ++ ) ++ ++ mock_tmp_store.assert_called_once_with( ++ "certificate request", ++ reports.qdevice_certificate_sign_error ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ _qnetd_cert_tool, ++ "-s", "-c", self.mock_tmpfile.name, "-n", "clusterName" ++ ]) ++ mock_get_cert.assert_not_called() ++ ++ @mock.patch( ++ "pcs.lib.corosync.qdevice_net.qdevice_initialized", ++ lambda: True ++ ) ++ def test_output_read_error(self, mock_tmp_store, mock_get_cert): ++ mock_tmp_store.return_value = self.mock_tmpfile ++ self.mock_runner.run.return_value = ("tool output", 0) ++ mock_get_cert.side_effect = LibraryError ++ ++ self.assertRaises( ++ LibraryError, ++ lambda: lib.qdevice_sign_certificate_request( ++ self.mock_runner, ++ "certificate request", ++ "clusterName" ++ ) ++ ) ++ ++ mock_tmp_store.assert_called_once_with( ++ "certificate request", ++ reports.qdevice_certificate_sign_error ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ _qnetd_cert_tool, ++ "-s", "-c", self.mock_tmpfile.name, "-n", "clusterName" ++ ]) ++ mock_get_cert.assert_called_once_with( ++ "tool output", ++ reports.qdevice_certificate_sign_error ++ ) ++ ++ ++@mock.patch("pcs.lib.corosync.qdevice_net.shutil.rmtree") ++@mock.patch("pcs.lib.corosync.qdevice_net.client_initialized") ++class ClientDestroyTest(TestCase): ++ def test_success(self, mock_initialized, mock_rmtree): ++ mock_initialized.return_value = True ++ lib.client_destroy() ++ mock_rmtree.assert_called_once_with(_client_cert_dir) ++ ++ def test_not_initialized(self, mock_initialized, mock_rmtree): ++ mock_initialized.return_value = False ++ lib.client_destroy() ++ mock_rmtree.assert_not_called() ++ ++ def test_cert_dir_rm_error(self, mock_initialized, mock_rmtree): ++ mock_initialized.return_value = True ++ mock_rmtree.side_effect = EnvironmentError("test errno", "test message") ++ assert_raise_library_error( ++ lib.client_destroy, ++ ( ++ severity.ERROR, ++ report_codes.QDEVICE_DESTROY_ERROR, ++ { ++ "model": "net", ++ "reason": "test message", ++ } ++ ) ++ ) ++ mock_rmtree.assert_called_once_with(_client_cert_dir) ++ ++ ++class ClientSetupTest(TestCase): ++ def setUp(self): ++ self.mock_runner = mock.MagicMock(spec_set=CommandRunner) ++ self.original_path = settings.corosync_qdevice_net_client_certs_dir ++ settings.corosync_qdevice_net_client_certs_dir = get_test_resource( ++ "qdevice-certs" ++ ) ++ self.ca_file_path = os.path.join( ++ settings.corosync_qdevice_net_client_certs_dir, ++ settings.corosync_qdevice_net_client_ca_file_name ++ ) ++ ++ def tearDown(self): ++ settings.corosync_qdevice_net_client_certs_dir = self.original_path ++ ++ @mock.patch("pcs.lib.corosync.qdevice_net.client_destroy") ++ def test_success(self, mock_destroy): ++ self.mock_runner.run.return_value = ("tool output", 0) ++ ++ lib.client_setup(self.mock_runner, "certificate data".encode("utf-8")) ++ ++ self.assertEqual( ++ "certificate data".encode("utf-8"), ++ open(self.ca_file_path, "rb").read() ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ _client_cert_tool, "-i", "-c", self.ca_file_path ++ ]) ++ mock_destroy.assert_called_once_with() ++ ++ @mock.patch("pcs.lib.corosync.qdevice_net.client_destroy") ++ def test_init_error(self, mock_destroy): ++ self.mock_runner.run.return_value = ("tool output error", 1) ++ ++ assert_raise_library_error( ++ lambda: lib.client_setup( ++ self.mock_runner, ++ "certificate data".encode("utf-8") ++ ), ++ ( ++ severity.ERROR, ++ report_codes.QDEVICE_INITIALIZATION_ERROR, ++ { ++ "model": "net", ++ "reason": "tool output error", ++ } ++ ) ++ ) ++ ++ self.assertEqual( ++ "certificate data".encode("utf-8"), ++ open(self.ca_file_path, "rb").read() ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ _client_cert_tool, "-i", "-c", self.ca_file_path ++ ]) ++ mock_destroy.assert_called_once_with() ++ ++ ++@mock.patch("pcs.lib.corosync.qdevice_net._get_output_certificate") ++class ClientGenerateCertificateRequestTest(CertificateTestCase): ++ @mock.patch( ++ "pcs.lib.corosync.qdevice_net.client_initialized", ++ lambda: True ++ ) ++ def test_success(self, mock_get_cert): ++ self.mock_runner.run.return_value = ("tool output", 0) ++ mock_get_cert.return_value = "new certificate".encode("utf-8") ++ ++ result = lib.client_generate_certificate_request( ++ self.mock_runner, ++ "clusterName" ++ ) ++ self.assertEqual(result, mock_get_cert.return_value) ++ ++ self.mock_runner.run.assert_called_once_with([ ++ _client_cert_tool, "-r", "-n", "clusterName" ++ ]) ++ self.assertEqual(1, len(mock_get_cert.mock_calls)) ++ self.assertEqual( ++ "tool output", ++ mock_get_cert.call_args[0][0] ++ ) ++ ++ @mock.patch( ++ "pcs.lib.corosync.qdevice_net.client_initialized", ++ lambda: False ++ ) ++ def test_not_initialized(self, mock_get_cert): ++ assert_raise_library_error( ++ lambda: lib.client_generate_certificate_request( ++ self.mock_runner, ++ "clusterName" ++ ), ++ ( ++ severity.ERROR, ++ report_codes.QDEVICE_NOT_INITIALIZED, ++ { ++ "model": "net", ++ } ++ ) ++ ) ++ self.mock_runner.run.assert_not_called() ++ mock_get_cert.assert_not_called() ++ ++ @mock.patch( ++ "pcs.lib.corosync.qdevice_net.client_initialized", ++ lambda: True ++ ) ++ def test_tool_error(self, mock_get_cert): ++ self.mock_runner.run.return_value = ("tool output error", 1) ++ ++ assert_raise_library_error( ++ lambda: lib.client_generate_certificate_request( ++ self.mock_runner, ++ "clusterName" ++ ), ++ ( ++ severity.ERROR, ++ report_codes.QDEVICE_INITIALIZATION_ERROR, ++ { ++ "model": "net", ++ "reason": "tool output error", ++ } ++ ) ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ _client_cert_tool, "-r", "-n", "clusterName" ++ ]) ++ mock_get_cert.assert_not_called() ++ ++ ++@mock.patch("pcs.lib.corosync.qdevice_net._get_output_certificate") ++@mock.patch("pcs.lib.corosync.qdevice_net._store_to_tmpfile") ++class ClientCertRequestToPk12Test(CertificateTestCase): ++ @mock.patch( ++ "pcs.lib.corosync.qdevice_net.client_initialized", ++ lambda: True ++ ) ++ def test_success(self, mock_tmp_store, mock_get_cert): ++ mock_tmp_store.return_value = self.mock_tmpfile ++ self.mock_runner.run.return_value = ("tool output", 0) ++ mock_get_cert.return_value = "new certificate".encode("utf-8") ++ ++ result = lib.client_cert_request_to_pk12( ++ self.mock_runner, ++ "certificate request" ++ ) ++ self.assertEqual(result, mock_get_cert.return_value) ++ ++ mock_tmp_store.assert_called_once_with( ++ "certificate request", ++ reports.qdevice_certificate_import_error ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ _client_cert_tool, "-M", "-c", self.mock_tmpfile.name ++ ]) ++ mock_get_cert.assert_called_once_with( ++ "tool output", ++ reports.qdevice_certificate_import_error ++ ) ++ ++ @mock.patch( ++ "pcs.lib.corosync.qdevice_net.client_initialized", ++ lambda: False ++ ) ++ def test_not_initialized(self, mock_tmp_store, mock_get_cert): ++ assert_raise_library_error( ++ lambda: lib.client_cert_request_to_pk12( ++ self.mock_runner, ++ "certificate request" ++ ), ++ ( ++ severity.ERROR, ++ report_codes.QDEVICE_NOT_INITIALIZED, ++ { ++ "model": "net", ++ } ++ ) ++ ) ++ mock_tmp_store.assert_not_called() ++ self.mock_runner.run.assert_not_called() ++ mock_get_cert.assert_not_called() ++ ++ @mock.patch( ++ "pcs.lib.corosync.qdevice_net.client_initialized", ++ lambda: True ++ ) ++ def test_input_write_error(self, mock_tmp_store, mock_get_cert): ++ mock_tmp_store.side_effect = LibraryError ++ ++ self.assertRaises( ++ LibraryError, ++ lambda: lib.client_cert_request_to_pk12( ++ self.mock_runner, ++ "certificate request" ++ ) ++ ) ++ ++ mock_tmp_store.assert_called_once_with( ++ "certificate request", ++ reports.qdevice_certificate_import_error ++ ) ++ self.mock_runner.run.assert_not_called() ++ mock_get_cert.assert_not_called() ++ ++ @mock.patch( ++ "pcs.lib.corosync.qdevice_net.client_initialized", ++ lambda: True ++ ) ++ def test_transform_error(self, mock_tmp_store, mock_get_cert): ++ mock_tmp_store.return_value = self.mock_tmpfile ++ self.mock_runner.run.return_value = ("tool output error", 1) ++ ++ assert_raise_library_error( ++ lambda: lib.client_cert_request_to_pk12( ++ self.mock_runner, ++ "certificate request" ++ ), ++ ( ++ severity.ERROR, ++ report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR, ++ { ++ "reason": "tool output error", ++ } ++ ) ++ ) ++ ++ mock_tmp_store.assert_called_once_with( ++ "certificate request", ++ reports.qdevice_certificate_import_error ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ _client_cert_tool, "-M", "-c", self.mock_tmpfile.name ++ ]) ++ mock_get_cert.assert_not_called() ++ ++ @mock.patch( ++ "pcs.lib.corosync.qdevice_net.client_initialized", ++ lambda: True ++ ) ++ def test_output_read_error(self, mock_tmp_store, mock_get_cert): ++ mock_tmp_store.return_value = self.mock_tmpfile ++ self.mock_runner.run.return_value = ("tool output", 0) ++ mock_get_cert.side_effect = LibraryError ++ ++ self.assertRaises( ++ LibraryError, ++ lambda: lib.client_cert_request_to_pk12( ++ self.mock_runner, ++ "certificate request" ++ ) ++ ) ++ ++ mock_tmp_store.assert_called_once_with( ++ "certificate request", ++ reports.qdevice_certificate_import_error ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ _client_cert_tool, "-M", "-c", self.mock_tmpfile.name ++ ]) ++ mock_get_cert.assert_called_once_with( ++ "tool output", ++ reports.qdevice_certificate_import_error ++ ) ++ ++ ++@mock.patch("pcs.lib.corosync.qdevice_net._store_to_tmpfile") ++class ClientImportCertificateAndKeyTest(CertificateTestCase): ++ @mock.patch( ++ "pcs.lib.corosync.qdevice_net.client_initialized", ++ lambda: True ++ ) ++ def test_success(self, mock_tmp_store): ++ mock_tmp_store.return_value = self.mock_tmpfile ++ self.mock_runner.run.return_value = ("tool output", 0) ++ ++ lib.client_import_certificate_and_key( ++ self.mock_runner, ++ "pk12 certificate" ++ ) ++ ++ mock_tmp_store.assert_called_once_with( ++ "pk12 certificate", ++ reports.qdevice_certificate_import_error ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ _client_cert_tool, "-m", "-c", self.mock_tmpfile.name ++ ]) ++ ++ @mock.patch( ++ "pcs.lib.corosync.qdevice_net.client_initialized", ++ lambda: False ++ ) ++ def test_not_initialized(self, mock_tmp_store): ++ assert_raise_library_error( ++ lambda: lib.client_import_certificate_and_key( ++ self.mock_runner, ++ "pk12 certificate" ++ ), ++ ( ++ severity.ERROR, ++ report_codes.QDEVICE_NOT_INITIALIZED, ++ { ++ "model": "net", ++ } ++ ) ++ ) ++ ++ mock_tmp_store.assert_not_called() ++ self.mock_runner.run.assert_not_called() ++ ++ @mock.patch( ++ "pcs.lib.corosync.qdevice_net.client_initialized", ++ lambda: True ++ ) ++ def test_input_write_error(self, mock_tmp_store): ++ mock_tmp_store.side_effect = LibraryError ++ ++ self.assertRaises( ++ LibraryError, ++ lambda: lib.client_import_certificate_and_key( ++ self.mock_runner, ++ "pk12 certificate" ++ ) ++ ) ++ ++ mock_tmp_store.assert_called_once_with( ++ "pk12 certificate", ++ reports.qdevice_certificate_import_error ++ ) ++ self.mock_runner.run.assert_not_called() ++ ++ @mock.patch( ++ "pcs.lib.corosync.qdevice_net.client_initialized", ++ lambda: True ++ ) ++ def test_import_error(self, mock_tmp_store): ++ mock_tmp_store.return_value = self.mock_tmpfile ++ self.mock_runner.run.return_value = ("tool output error", 1) ++ ++ assert_raise_library_error( ++ lambda: lib.client_import_certificate_and_key( ++ self.mock_runner, ++ "pk12 certificate" ++ ), ++ ( ++ severity.ERROR, ++ report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR, ++ { ++ "reason": "tool output error", ++ } ++ ) ++ ) ++ ++ mock_tmp_store.assert_called_once_with( ++ "pk12 certificate", ++ reports.qdevice_certificate_import_error ++ ) ++ mock_tmp_store.assert_called_once_with( ++ "pk12 certificate", ++ reports.qdevice_certificate_import_error ++ ) ++ self.mock_runner.run.assert_called_once_with([ ++ _client_cert_tool, "-m", "-c", self.mock_tmpfile.name ++ ]) ++ ++ ++class RemoteQdeviceGetCaCertificate(TestCase): ++ def test_success(self): ++ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) ++ expected_result = "abcd".encode("utf-8") ++ mock_communicator.call_host.return_value = base64.b64encode( ++ expected_result ++ ) ++ ++ result = lib.remote_qdevice_get_ca_certificate( ++ mock_communicator, ++ "qdevice host" ++ ) ++ self.assertEqual(result, expected_result) ++ ++ mock_communicator.call_host.assert_called_once_with( ++ "qdevice host", ++ "remote/qdevice_net_get_ca_certificate", ++ None ++ ) ++ ++ def test_decode_error(self): ++ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) ++ mock_communicator.call_host.return_value = "error" ++ ++ assert_raise_library_error( ++ lambda: lib.remote_qdevice_get_ca_certificate( ++ mock_communicator, ++ "qdevice host" ++ ), ++ ( ++ severity.ERROR, ++ report_codes.INVALID_RESPONSE_FORMAT, ++ { ++ "node": "qdevice host", ++ } ++ ) ++ ) ++ ++ def test_comunication_error(self): ++ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) ++ mock_communicator.call_host.side_effect = NodeCommunicationException( ++ "qdevice host", "command", "reason" ++ ) ++ ++ self.assertRaises( ++ NodeCommunicationException, ++ lambda: lib.remote_qdevice_get_ca_certificate( ++ mock_communicator, ++ "qdevice host" ++ ) ++ ) ++ ++ ++class RemoteClientSetupTest(TestCase): ++ def test_success(self): ++ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) ++ node = "node address" ++ ca_cert = "CA certificate".encode("utf-8") ++ ++ lib.remote_client_setup(mock_communicator, node, ca_cert) ++ ++ mock_communicator.call_node.assert_called_once_with( ++ node, ++ "remote/qdevice_net_client_init_certificate_storage", ++ "ca_certificate={0}".format( ++ cert_to_url(ca_cert) ++ ) ++ ) ++ ++ def test_comunication_error(self): ++ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) ++ mock_communicator.call_node.side_effect = NodeCommunicationException( ++ "node address", "command", "reason" ++ ) ++ ++ self.assertRaises( ++ NodeCommunicationException, ++ lambda: lib.remote_client_setup( ++ mock_communicator, ++ "node address", ++ "ca cert".encode("utf-8") ++ ) ++ ) ++ ++ ++class RemoteSignCertificateRequestTest(TestCase): ++ def test_success(self): ++ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) ++ cert_request = "request".encode("utf-8") ++ expected_result = "abcd".encode("utf-8") ++ host = "qdevice host" ++ cluster_name = "ClusterName" ++ mock_communicator.call_host.return_value = base64.b64encode( ++ expected_result ++ ) ++ ++ result = lib.remote_sign_certificate_request( ++ mock_communicator, ++ host, ++ cert_request, ++ cluster_name ++ ) ++ self.assertEqual(result, expected_result) ++ ++ mock_communicator.call_host.assert_called_once_with( ++ host, ++ "remote/qdevice_net_sign_node_certificate", ++ "certificate_request={0}&cluster_name={1}".format( ++ cert_to_url(cert_request), ++ cluster_name ++ ) ++ ) ++ ++ def test_decode_error(self): ++ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) ++ mock_communicator.call_host.return_value = "error" ++ ++ assert_raise_library_error( ++ lambda: lib.remote_sign_certificate_request( ++ mock_communicator, ++ "qdevice host", ++ "cert request".encode("utf-8"), ++ "cluster name" ++ ), ++ ( ++ severity.ERROR, ++ report_codes.INVALID_RESPONSE_FORMAT, ++ { ++ "node": "qdevice host", ++ } ++ ) ++ ) ++ ++ def test_comunication_error(self): ++ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) ++ mock_communicator.call_host.side_effect = NodeCommunicationException( ++ "qdevice host", "command", "reason" ++ ) ++ ++ self.assertRaises( ++ NodeCommunicationException, ++ lambda: lib.remote_sign_certificate_request( ++ mock_communicator, ++ "qdevice host", ++ "cert request".encode("utf-8"), ++ "cluster name" ++ ) ++ ) ++ ++ ++class RemoteClientImportCertificateAndKeyTest(TestCase): ++ def test_success(self): ++ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) ++ node = "node address" ++ pk12_cert = "pk12 certificate".encode("utf-8") ++ ++ lib.remote_client_import_certificate_and_key( ++ mock_communicator, ++ node, ++ pk12_cert ++ ) ++ ++ mock_communicator.call_node.assert_called_once_with( ++ node, ++ "remote/qdevice_net_client_import_certificate", ++ "certificate={0}".format( ++ cert_to_url(pk12_cert) ++ ) ++ ) ++ ++ def test_comunication_error(self): ++ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) ++ mock_communicator.call_node.side_effect = NodeCommunicationException( ++ "node address", "command", "reason" ++ ) ++ ++ self.assertRaises( ++ NodeCommunicationException, ++ lambda: lib.remote_client_import_certificate_and_key( ++ mock_communicator, ++ "node address", ++ "pk12 cert".encode("utf-8") ++ ) ++ ) ++ ++ ++class RemoteClientDestroy(TestCase): ++ def test_success(self): ++ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) ++ node = "node address" ++ ++ lib.remote_client_destroy(mock_communicator, node) ++ ++ mock_communicator.call_node.assert_called_once_with( ++ node, ++ "remote/qdevice_net_client_destroy", ++ None ++ ) ++ ++ def test_comunication_error(self): ++ mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) ++ mock_communicator.call_node.side_effect = NodeCommunicationException( ++ "node address", "command", "reason" ++ ) ++ ++ self.assertRaises( ++ NodeCommunicationException, ++ lambda: lib.remote_client_destroy(mock_communicator, "node address") ++ ) ++ ++ ++class GetOutputCertificateTest(TestCase): ++ def setUp(self): ++ self.file_path = get_test_resource("qdevice-certs/qnetd-cacert.crt") ++ self.file_data = open(self.file_path, "rb").read() ++ ++ def test_success(self): ++ cert_tool_output = """ ++some line ++Certificate stored in {0} ++some other line ++ """.format(self.file_path) ++ report_func = mock.MagicMock() ++ ++ self.assertEqual( ++ self.file_data, ++ lib._get_output_certificate(cert_tool_output, report_func) ++ ) ++ report_func.assert_not_called() ++ ++ def test_success_request(self): ++ cert_tool_output = """ ++some line ++Certificate request stored in {0} ++some other line ++ """.format(self.file_path) ++ report_func = mock.MagicMock() ++ ++ self.assertEqual( ++ self.file_data, ++ lib._get_output_certificate(cert_tool_output, report_func) ++ ) ++ report_func.assert_not_called() ++ ++ def test_message_not_found(self): ++ cert_tool_output = "some rubbish output" ++ report_func = reports.qdevice_certificate_import_error ++ ++ assert_raise_library_error( ++ lambda: lib._get_output_certificate( ++ cert_tool_output, ++ report_func ++ ), ++ ( ++ severity.ERROR, ++ report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR, ++ { ++ "reason": cert_tool_output, ++ } ++ ) ++ ) ++ ++ def test_cannot_read_file(self): ++ cert_tool_output = """ ++some line ++Certificate request stored in {0}.bad ++some other line ++ """.format(self.file_path) ++ report_func = reports.qdevice_certificate_import_error ++ ++ assert_raise_library_error( ++ lambda: lib._get_output_certificate( ++ cert_tool_output, ++ report_func ++ ), ++ ( ++ severity.ERROR, ++ report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR, ++ { ++ "reason": "{0}.bad: No such file or directory".format( ++ self.file_path ++ ), ++ } ++ ) ++ ) ++ +diff --git a/pcs/test/test_lib_env.py b/pcs/test/test_lib_env.py +index 95f7a00..c6322b7 100644 +--- a/pcs/test/test_lib_env.py ++++ b/pcs/test/test_lib_env.py +@@ -235,13 +235,24 @@ class LibraryEnvironmentTest(TestCase): + )] + ) + ++ @mock.patch("pcs.lib.env.qdevice_reload_on_nodes") + @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes") + @mock.patch("pcs.lib.env.reload_corosync_config") + @mock.patch("pcs.lib.env.distribute_corosync_conf") + @mock.patch("pcs.lib.env.get_local_corosync_conf") ++ @mock.patch.object( ++ LibraryEnvironment, ++ "node_communicator", ++ lambda self: "mock node communicator" ++ ) ++ @mock.patch.object( ++ LibraryEnvironment, ++ "cmd_runner", ++ lambda self: "mock cmd runner" ++ ) + def test_corosync_conf_set( + self, mock_get_corosync, mock_distribute, mock_reload, +- mock_check_offline ++ mock_check_offline, mock_qdevice_reload + ): + corosync_data = "totem {\n version: 2\n}\n" + new_corosync_data = "totem {\n version: 3\n}\n" +@@ -266,8 +277,11 @@ class LibraryEnvironmentTest(TestCase): + self.assertEqual(0, mock_get_corosync.call_count) + mock_check_offline.assert_not_called() + mock_reload.assert_not_called() ++ mock_qdevice_reload.assert_not_called() + ++ @mock.patch("pcs.lib.env.qdevice_reload_on_nodes") + @mock.patch("pcs.lib.env.reload_corosync_config") ++ @mock.patch("pcs.lib.env.is_service_running") + @mock.patch("pcs.lib.env.distribute_corosync_conf") + @mock.patch("pcs.lib.env.get_local_corosync_conf") + @mock.patch.object( +@@ -285,12 +299,14 @@ class LibraryEnvironmentTest(TestCase): + "cmd_runner", + lambda self: "mock cmd runner" + ) +- def test_corosync_conf_not_set( +- self, mock_get_corosync, mock_distribute, mock_reload ++ def test_corosync_conf_not_set_online( ++ self, mock_get_corosync, mock_distribute, mock_is_running, mock_reload, ++ mock_qdevice_reload + ): + corosync_data = open(rc("corosync.conf")).read() + new_corosync_data = corosync_data.replace("version: 2", "version: 3") + mock_get_corosync.return_value = corosync_data ++ mock_is_running.return_value = True + env = LibraryEnvironment(self.mock_logger, self.mock_reporter) + + self.assertTrue(env.is_corosync_conf_live) +@@ -309,10 +325,120 @@ class LibraryEnvironmentTest(TestCase): + new_corosync_data, + False + ) ++ mock_is_running.assert_called_once_with("mock cmd runner", "corosync") + mock_reload.assert_called_once_with("mock cmd runner") ++ mock_qdevice_reload.assert_not_called() + ++ @mock.patch("pcs.lib.env.qdevice_reload_on_nodes") ++ @mock.patch("pcs.lib.env.reload_corosync_config") ++ @mock.patch("pcs.lib.env.is_service_running") ++ @mock.patch("pcs.lib.env.distribute_corosync_conf") ++ @mock.patch("pcs.lib.env.get_local_corosync_conf") ++ @mock.patch.object( ++ CorosyncConfigFacade, ++ "get_nodes", ++ lambda self: "mock node list" ++ ) ++ @mock.patch.object( ++ LibraryEnvironment, ++ "node_communicator", ++ lambda self: "mock node communicator" ++ ) ++ @mock.patch.object( ++ LibraryEnvironment, ++ "cmd_runner", ++ lambda self: "mock cmd runner" ++ ) ++ def test_corosync_conf_not_set_offline( ++ self, mock_get_corosync, mock_distribute, mock_is_running, mock_reload, ++ mock_qdevice_reload ++ ): ++ corosync_data = open(rc("corosync.conf")).read() ++ new_corosync_data = corosync_data.replace("version: 2", "version: 3") ++ mock_get_corosync.return_value = corosync_data ++ mock_is_running.return_value = False ++ env = LibraryEnvironment(self.mock_logger, self.mock_reporter) ++ ++ self.assertTrue(env.is_corosync_conf_live) ++ ++ self.assertEqual(corosync_data, env.get_corosync_conf_data()) ++ self.assertEqual(corosync_data, env.get_corosync_conf().config.export()) ++ self.assertEqual(2, mock_get_corosync.call_count) ++ ++ env.push_corosync_conf( ++ CorosyncConfigFacade.from_string(new_corosync_data) ++ ) ++ mock_distribute.assert_called_once_with( ++ "mock node communicator", ++ self.mock_reporter, ++ "mock node list", ++ new_corosync_data, ++ False ++ ) ++ mock_is_running.assert_called_once_with("mock cmd runner", "corosync") ++ mock_reload.assert_not_called() ++ mock_qdevice_reload.assert_not_called() ++ ++ @mock.patch("pcs.lib.env.qdevice_reload_on_nodes") ++ @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes") ++ @mock.patch("pcs.lib.env.reload_corosync_config") ++ @mock.patch("pcs.lib.env.is_service_running") ++ @mock.patch("pcs.lib.env.distribute_corosync_conf") ++ @mock.patch("pcs.lib.env.get_local_corosync_conf") ++ @mock.patch.object( ++ CorosyncConfigFacade, ++ "get_nodes", ++ lambda self: "mock node list" ++ ) ++ @mock.patch.object( ++ LibraryEnvironment, ++ "node_communicator", ++ lambda self: "mock node communicator" ++ ) ++ @mock.patch.object( ++ LibraryEnvironment, ++ "cmd_runner", ++ lambda self: "mock cmd runner" ++ ) ++ def test_corosync_conf_not_set_need_qdevice_reload_success( ++ self, mock_get_corosync, mock_distribute, mock_is_running, mock_reload, ++ mock_check_offline, mock_qdevice_reload ++ ): ++ corosync_data = open(rc("corosync.conf")).read() ++ new_corosync_data = corosync_data.replace("version: 2", "version: 3") ++ mock_get_corosync.return_value = corosync_data ++ mock_is_running.return_value = True ++ env = LibraryEnvironment(self.mock_logger, self.mock_reporter) ++ ++ self.assertTrue(env.is_corosync_conf_live) ++ ++ self.assertEqual(corosync_data, env.get_corosync_conf_data()) ++ self.assertEqual(corosync_data, env.get_corosync_conf().config.export()) ++ self.assertEqual(2, mock_get_corosync.call_count) ++ ++ conf_facade = CorosyncConfigFacade.from_string(new_corosync_data) ++ conf_facade._need_qdevice_reload = True ++ env.push_corosync_conf(conf_facade) ++ mock_check_offline.assert_not_called() ++ mock_distribute.assert_called_once_with( ++ "mock node communicator", ++ self.mock_reporter, ++ "mock node list", ++ new_corosync_data, ++ False ++ ) ++ mock_reload.assert_called_once_with("mock cmd runner") ++ mock_qdevice_reload.assert_called_once_with( ++ "mock node communicator", ++ self.mock_reporter, ++ "mock node list", ++ False ++ ) ++ ++ @mock.patch("pcs.lib.env.qdevice_reload_on_nodes") + @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes") + @mock.patch("pcs.lib.env.reload_corosync_config") ++ @mock.patch("pcs.lib.env.is_service_running") + @mock.patch("pcs.lib.env.distribute_corosync_conf") + @mock.patch("pcs.lib.env.get_local_corosync_conf") + @mock.patch.object( +@@ -326,12 +452,13 @@ class LibraryEnvironmentTest(TestCase): + lambda self: "mock node communicator" + ) + def test_corosync_conf_not_set_need_offline_success( +- self, mock_get_corosync, mock_distribute, mock_reload, +- mock_check_offline ++ self, mock_get_corosync, mock_distribute, mock_is_running, mock_reload, ++ mock_check_offline, mock_qdevice_reload + ): + corosync_data = open(rc("corosync.conf")).read() + new_corosync_data = corosync_data.replace("version: 2", "version: 3") + mock_get_corosync.return_value = corosync_data ++ mock_is_running.return_value = False + env = LibraryEnvironment(self.mock_logger, self.mock_reporter) + + self.assertTrue(env.is_corosync_conf_live) +@@ -357,7 +484,9 @@ class LibraryEnvironmentTest(TestCase): + False + ) + mock_reload.assert_not_called() ++ mock_qdevice_reload.assert_not_called() + ++ @mock.patch("pcs.lib.env.qdevice_reload_on_nodes") + @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes") + @mock.patch("pcs.lib.env.reload_corosync_config") + @mock.patch("pcs.lib.env.distribute_corosync_conf") +@@ -374,7 +503,7 @@ class LibraryEnvironmentTest(TestCase): + ) + def test_corosync_conf_not_set_need_offline_fail( + self, mock_get_corosync, mock_distribute, mock_reload, +- mock_check_offline ++ mock_check_offline, mock_qdevice_reload + ): + corosync_data = open(rc("corosync.conf")).read() + new_corosync_data = corosync_data.replace("version: 2", "version: 3") +@@ -410,6 +539,7 @@ class LibraryEnvironmentTest(TestCase): + ) + mock_distribute.assert_not_called() + mock_reload.assert_not_called() ++ mock_qdevice_reload.assert_not_called() + + @mock.patch("pcs.lib.env.CommandRunner") + def test_cmd_runner_no_options(self, mock_runner): +diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py +index c08b059..929a50d 100644 +--- a/pcs/test/test_lib_external.py ++++ b/pcs/test/test_lib_external.py +@@ -31,7 +31,11 @@ from pcs.test.tools.pcs_mock import mock + + from pcs import settings + from pcs.common import report_codes +-from pcs.lib.errors import ReportItemSeverity as severity ++from pcs.lib import reports ++from pcs.lib.errors import ( ++ LibraryError, ++ ReportItemSeverity as severity ++) + + import pcs.lib.external as lib + +@@ -830,6 +834,126 @@ class NodeCommunicatorExceptionTransformTest(TestCase): + self.assertTrue(raised) + + ++class ParallelCommunicationHelperTest(TestCase): ++ def setUp(self): ++ self.mock_reporter = MockLibraryReportProcessor() ++ ++ def fixture_raiser(self): ++ def raiser(x, *args, **kwargs): ++ if x == 1: ++ raise lib.NodeConnectionException("node", "command", "reason") ++ elif x == 2: ++ raise LibraryError( ++ reports.corosync_config_distribution_node_error("node") ++ ) ++ return raiser ++ ++ def test_success(self): ++ func = mock.MagicMock() ++ lib.parallel_nodes_communication_helper( ++ func, ++ [([x], {"a": x*2,}) for x in range(3)], ++ self.mock_reporter, ++ skip_offline_nodes=False ++ ) ++ expected_calls = [ ++ mock.call(0, a=0), ++ mock.call(1, a=2), ++ mock.call(2, a=4), ++ ] ++ self.assertEqual(len(expected_calls), len(func.mock_calls)) ++ func.assert_has_calls(expected_calls) ++ self.assertEqual(self.mock_reporter.report_item_list, []) ++ ++ def test_errors(self): ++ func = self.fixture_raiser() ++ assert_raise_library_error( ++ lambda: lib.parallel_nodes_communication_helper( ++ func, ++ [([x], {"a": x*2,}) for x in range(4)], ++ self.mock_reporter, ++ skip_offline_nodes=False ++ ), ++ ( ++ severity.ERROR, ++ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, ++ { ++ "node": "node", ++ "reason": "reason", ++ "command": "command", ++ }, ++ report_codes.SKIP_OFFLINE_NODES ++ ), ++ ( ++ severity.ERROR, ++ report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR, ++ { ++ "node": "node", ++ } ++ ) ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.ERROR, ++ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, ++ { ++ "node": "node", ++ "reason": "reason", ++ "command": "command", ++ }, ++ report_codes.SKIP_OFFLINE_NODES ++ ), ++ ( ++ severity.ERROR, ++ report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR, ++ { ++ "node": "node", ++ } ++ ) ++ ] ++ ) ++ ++ def test_errors_skip_offline(self): ++ func = self.fixture_raiser() ++ assert_raise_library_error( ++ lambda: lib.parallel_nodes_communication_helper( ++ func, ++ [([x], {"a": x*2,}) for x in range(4)], ++ self.mock_reporter, ++ skip_offline_nodes=True ++ ), ++ ( ++ severity.ERROR, ++ report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR, ++ { ++ "node": "node", ++ } ++ ) ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.WARNING, ++ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, ++ { ++ "node": "node", ++ "reason": "reason", ++ "command": "command", ++ } ++ ), ++ ( ++ severity.ERROR, ++ report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR, ++ { ++ "node": "node", ++ } ++ ) ++ ] ++ ) ++ + class IsCmanClusterTest(TestCase): + def template_test(self, is_cman, corosync_output, corosync_retval=0): + mock_runner = mock.MagicMock(spec_set=lib.CommandRunner) +diff --git a/pcs/test/test_lib_nodes_task.py b/pcs/test/test_lib_nodes_task.py +index 6af47d7..cff88eb 100644 +--- a/pcs/test/test_lib_nodes_task.py ++++ b/pcs/test/test_lib_nodes_task.py +@@ -27,14 +27,6 @@ class DistributeCorosyncConfTest(TestCase): + self.mock_reporter = MockLibraryReportProcessor() + self.mock_communicator = "mock node communicator" + +- def assert_set_remote_corosync_conf_call(self, a_call, node_ring0, config): +- self.assertEqual("set_remote_corosync_conf", a_call[0]) +- self.assertEqual(3, len(a_call[1])) +- self.assertEqual(self.mock_communicator, a_call[1][0]) +- self.assertEqual(node_ring0, a_call[1][1].ring0) +- self.assertEqual(config, a_call[1][2]) +- self.assertEqual(0, len(a_call[2])) +- + @mock.patch("pcs.lib.nodes_task.corosync_live") + def test_success(self, mock_corosync_live): + conf_text = "test conf text" +@@ -53,21 +45,19 @@ class DistributeCorosyncConfTest(TestCase): + + corosync_live_calls = [ + mock.call.set_remote_corosync_conf( +- "mock node communicator", nodes[0], conf_text ++ "mock node communicator", node_addrs_list[0], conf_text + ), + mock.call.set_remote_corosync_conf( +- "mock node communicator", nodes[1], conf_text ++ "mock node communicator", node_addrs_list[1], conf_text + ), + ] + self.assertEqual( + len(corosync_live_calls), + len(mock_corosync_live.mock_calls) + ) +- self.assert_set_remote_corosync_conf_call( +- mock_corosync_live.mock_calls[0], nodes[0], conf_text +- ) +- self.assert_set_remote_corosync_conf_call( +- mock_corosync_live.mock_calls[1], nodes[1], conf_text ++ mock_corosync_live.set_remote_corosync_conf.assert_has_calls( ++ corosync_live_calls, ++ any_order=True + ) + + assert_report_item_list_equal( +@@ -145,12 +135,10 @@ class DistributeCorosyncConfTest(TestCase): + len(corosync_live_calls), + len(mock_corosync_live.mock_calls) + ) +- self.assert_set_remote_corosync_conf_call( +- mock_corosync_live.mock_calls[0], nodes[0], conf_text +- ) +- self.assert_set_remote_corosync_conf_call( +- mock_corosync_live.mock_calls[1], nodes[1], conf_text +- ) ++ mock_corosync_live.set_remote_corosync_conf.assert_has_calls([ ++ mock.call("mock node communicator", node_addrs_list[0], conf_text), ++ mock.call("mock node communicator", node_addrs_list[1], conf_text), ++ ], any_order=True) + + assert_report_item_list_equal( + self.mock_reporter.report_item_list, +@@ -221,12 +209,10 @@ class DistributeCorosyncConfTest(TestCase): + len(corosync_live_calls), + len(mock_corosync_live.mock_calls) + ) +- self.assert_set_remote_corosync_conf_call( +- mock_corosync_live.mock_calls[0], nodes[0], conf_text +- ) +- self.assert_set_remote_corosync_conf_call( +- mock_corosync_live.mock_calls[1], nodes[1], conf_text +- ) ++ mock_corosync_live.set_remote_corosync_conf.assert_has_calls([ ++ mock.call("mock node communicator", node_addrs_list[0], conf_text), ++ mock.call("mock node communicator", node_addrs_list[1], conf_text), ++ ], any_order=True) + + assert_report_item_list_equal( + self.mock_reporter.report_item_list, +@@ -452,6 +438,134 @@ class CheckCorosyncOfflineTest(TestCase): + ) + + ++@mock.patch("pcs.lib.nodes_task.qdevice_client.remote_client_stop") ++@mock.patch("pcs.lib.nodes_task.qdevice_client.remote_client_start") ++class QdeviceReloadOnNodesTest(TestCase): ++ def setUp(self): ++ self.mock_reporter = MockLibraryReportProcessor() ++ self.mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) ++ ++ def test_success(self, mock_remote_start, mock_remote_stop): ++ nodes = ["node1", "node2"] ++ node_addrs_list = NodeAddressesList( ++ [NodeAddresses(addr) for addr in nodes] ++ ) ++ ++ lib.qdevice_reload_on_nodes( ++ self.mock_communicator, ++ self.mock_reporter, ++ node_addrs_list ++ ) ++ ++ node_calls = [ ++ mock.call( ++ self.mock_reporter, self.mock_communicator, node_addrs_list[0] ++ ), ++ mock.call( ++ self.mock_reporter, self.mock_communicator, node_addrs_list[1] ++ ), ++ ] ++ self.assertEqual(len(node_calls), len(mock_remote_stop.mock_calls)) ++ self.assertEqual(len(node_calls), len(mock_remote_start.mock_calls)) ++ mock_remote_stop.assert_has_calls(node_calls, any_order=True) ++ mock_remote_start.assert_has_calls(node_calls, any_order=True) ++ ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CLIENT_RELOAD_STARTED, ++ {} ++ ), ++ ] ++ ) ++ ++ def test_fail_doesnt_prevent_start( ++ self, mock_remote_start, mock_remote_stop ++ ): ++ nodes = ["node1", "node2"] ++ node_addrs_list = NodeAddressesList( ++ [NodeAddresses(addr) for addr in nodes] ++ ) ++ def raiser(reporter, communicator, node): ++ if node.ring0 == nodes[1]: ++ raise NodeAuthenticationException( ++ node.label, "command", "HTTP error: 401" ++ ) ++ mock_remote_stop.side_effect = raiser ++ ++ assert_raise_library_error( ++ lambda: lib.qdevice_reload_on_nodes( ++ self.mock_communicator, ++ self.mock_reporter, ++ node_addrs_list ++ ), ++ ( ++ severity.ERROR, ++ report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, ++ { ++ "node": nodes[1], ++ "command": "command", ++ "reason" : "HTTP error: 401", ++ }, ++ report_codes.SKIP_OFFLINE_NODES ++ ) ++ ) ++ ++ node_calls = [ ++ mock.call( ++ self.mock_reporter, self.mock_communicator, node_addrs_list[0] ++ ), ++ mock.call( ++ self.mock_reporter, self.mock_communicator, node_addrs_list[1] ++ ), ++ ] ++ self.assertEqual(len(node_calls), len(mock_remote_stop.mock_calls)) ++ self.assertEqual(len(node_calls), len(mock_remote_start.mock_calls)) ++ mock_remote_stop.assert_has_calls(node_calls, any_order=True) ++ mock_remote_start.assert_has_calls(node_calls, any_order=True) ++ ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_CLIENT_RELOAD_STARTED, ++ {} ++ ), ++ # why the same error twice? ++ # 1. Tested piece of code calls a function which puts an error ++ # into the reporter. The reporter raises an exception. The ++ # exception is caught in the tested piece of code, stored, and ++ # later put to reporter again. ++ # 2. Mock reporter remembers everything that goes through it ++ # and by the machanism described in 1 the error goes througt it ++ # twice. ++ ( ++ severity.ERROR, ++ report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, ++ { ++ "node": nodes[1], ++ "command": "command", ++ "reason" : "HTTP error: 401", ++ }, ++ report_codes.SKIP_OFFLINE_NODES ++ ), ++ ( ++ severity.ERROR, ++ report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED, ++ { ++ "node": nodes[1], ++ "command": "command", ++ "reason" : "HTTP error: 401", ++ }, ++ report_codes.SKIP_OFFLINE_NODES ++ ), ++ ] ++ ) ++ ++ + class NodeCheckAuthTest(TestCase): + def test_success(self): + mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) +diff --git a/pcs/test/test_quorum.py b/pcs/test/test_quorum.py +index 8167ad9..86de4c6 100644 +--- a/pcs/test/test_quorum.py ++++ b/pcs/test/test_quorum.py +@@ -144,7 +144,7 @@ class DeviceAddTest(TestBase): + + def test_success_model_only(self): + self.assert_pcs_success( +- "quorum device add model net host=127.0.0.1 algorithm=ffsplit" ++ "quorum device add model net host=127.0.0.1 algorithm=lms" + ) + self.assert_pcs_success( + "quorum config", +@@ -152,7 +152,7 @@ class DeviceAddTest(TestBase): + Options: + Device: + Model: net +- algorithm: ffsplit ++ algorithm: lms + host: 127.0.0.1 + """ + ) +@@ -167,6 +167,7 @@ Device: + Options: + Device: + timeout: 12345 ++ votes: 1 + Model: net + algorithm: ffsplit + host: 127.0.0.1 +@@ -193,7 +194,7 @@ Error: required option 'host' is missing + self.assert_pcs_fail( + "quorum device add a=b timeout=-1 model net host=127.0.0.1 algorithm=x c=d", + """\ +-Error: 'x' is not a valid algorithm value, use 2nodelms, ffsplit, lms, use --force to override ++Error: 'x' is not a valid algorithm value, use ffsplit, lms, use --force to override + Error: invalid quorum device model option 'c', allowed options are: algorithm, connect_timeout, force_ip_version, host, port, tie_breaker, use --force to override + Error: invalid quorum device option 'a', allowed options are: sync_timeout, timeout, use --force to override + Error: '-1' is not a valid timeout value, use positive integer, use --force to override +@@ -203,7 +204,7 @@ Error: '-1' is not a valid timeout value, use positive integer, use --force to o + self.assert_pcs_success( + "quorum device add a=b timeout=-1 model net host=127.0.0.1 algorithm=x c=d --force", + """\ +-Warning: 'x' is not a valid algorithm value, use 2nodelms, ffsplit, lms ++Warning: 'x' is not a valid algorithm value, use ffsplit, lms + Warning: invalid quorum device model option 'c', allowed options are: algorithm, connect_timeout, force_ip_version, host, port, tie_breaker + Warning: invalid quorum device option 'a', allowed options are: sync_timeout, timeout + Warning: '-1' is not a valid timeout value, use positive integer +diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py +index c61a2b8..819f8ee 100644 +--- a/pcs/test/test_utils.py ++++ b/pcs/test/test_utils.py +@@ -967,1359 +967,1607 @@ class UtilsTest(unittest.TestCase): + } + ) + +- def test_parse_cman_quorum_info(self): +- parsed = utils.parse_cman_quorum_info("""\ +-Version: 6.2.0 +-Config Version: 23 +-Cluster Name: cluster66 +-Cluster Id: 22265 +-Cluster Member: Yes +-Cluster Generation: 3612 +-Membership state: Cluster-Member +-Nodes: 3 +-Expected votes: 3 +-Total votes: 3 +-Node votes: 1 +-Quorum: 2 +-Active subsystems: 8 +-Flags: +-Ports Bound: 0 +-Node name: rh66-node2 +-Node ID: 2 +-Multicast addresses: 239.192.86.80 +-Node addresses: 192.168.122.61 +----Votes--- +-1 M 3 rh66-node1 +-2 M 2 rh66-node2 +-3 M 1 rh66-node3 +-""") +- self.assertEqual(True, parsed["quorate"]) +- self.assertEqual(2, parsed["quorum"]) ++ def test_get_operations_from_transitions(self): ++ transitions = utils.parse(rc("transitions01.xml")) + self.assertEqual( + [ +- {"name": "rh66-node1", "votes": 3, "local": False}, +- {"name": "rh66-node2", "votes": 2, "local": True}, +- {"name": "rh66-node3", "votes": 1, "local": False}, ++ { ++ 'id': 'dummy', ++ 'long_id': 'dummy', ++ 'operation': 'stop', ++ 'on_node': 'rh7-3', ++ }, ++ { ++ 'id': 'dummy', ++ 'long_id': 'dummy', ++ 'operation': 'start', ++ 'on_node': 'rh7-2', ++ }, ++ { ++ 'id': 'd0', ++ 'long_id': 'd0:1', ++ 'operation': 'stop', ++ 'on_node': 'rh7-1', ++ }, ++ { ++ 'id': 'd0', ++ 'long_id': 'd0:1', ++ 'operation': 'start', ++ 'on_node': 'rh7-2', ++ }, ++ { ++ 'id': 'state', ++ 'long_id': 'state:0', ++ 'operation': 'stop', ++ 'on_node': 'rh7-3', ++ }, ++ { ++ 'id': 'state', ++ 'long_id': 'state:0', ++ 'operation': 'start', ++ 'on_node': 'rh7-2', ++ }, + ], +- parsed["node_list"] ++ utils.get_operations_from_transitions(transitions) + ) + +- parsed = utils.parse_cman_quorum_info("""\ +-Version: 6.2.0 +-Config Version: 23 +-Cluster Name: cluster66 +-Cluster Id: 22265 +-Cluster Member: Yes +-Cluster Generation: 3612 +-Membership state: Cluster-Member +-Nodes: 3 +-Expected votes: 3 +-Total votes: 3 +-Node votes: 1 +-Quorum: 2 Activity blocked +-Active subsystems: 8 +-Flags: +-Ports Bound: 0 +-Node name: rh66-node1 +-Node ID: 1 +-Multicast addresses: 239.192.86.80 +-Node addresses: 192.168.122.61 +----Votes--- +-1 M 3 rh66-node1 +-2 X 2 rh66-node2 +-3 X 1 rh66-node3 +-""") +- self.assertEqual(False, parsed["quorate"]) +- self.assertEqual(2, parsed["quorum"]) ++ transitions = utils.parse(rc("transitions02.xml")) + self.assertEqual( + [ +- {"name": "rh66-node1", "votes": 3, "local": True}, ++ { ++ "id": "RemoteNode", ++ "long_id": "RemoteNode", ++ "operation": "stop", ++ "on_node": "virt-143", ++ }, ++ { ++ "id": "RemoteNode", ++ "long_id": "RemoteNode", ++ "operation": "migrate_to", ++ "on_node": "virt-143", ++ }, ++ { ++ "id": "RemoteNode", ++ "long_id": "RemoteNode", ++ "operation": "migrate_from", ++ "on_node": "virt-142", ++ }, ++ { ++ "id": "dummy8", ++ "long_id": "dummy8", ++ "operation": "stop", ++ "on_node": "virt-143", ++ }, ++ { ++ "id": "dummy8", ++ "long_id": "dummy8", ++ "operation": "start", ++ "on_node": "virt-142", ++ } + ], +- parsed["node_list"] ++ utils.get_operations_from_transitions(transitions) + ) + +- parsed = utils.parse_cman_quorum_info("") +- self.assertEqual(None, parsed) +- +- parsed = utils.parse_cman_quorum_info("""\ +-Version: 6.2.0 +-Config Version: 23 +-Cluster Name: cluster66 +-Cluster Id: 22265 +-Cluster Member: Yes +-Cluster Generation: 3612 +-Membership state: Cluster-Member +-Nodes: 3 +-Expected votes: 3 +-Total votes: 3 +-Node votes: 1 +-Quorum: +-Active subsystems: 8 +-Flags: +-Ports Bound: 0 +-Node name: rh66-node2 +-Node ID: 2 +-Multicast addresses: 239.192.86.80 +-Node addresses: 192.168.122.61 +----Votes--- +-1 M 3 rh66-node1 +-2 M 2 rh66-node2 +-3 M 1 rh66-node3 +-""") +- self.assertEqual(None, parsed) +- +- parsed = utils.parse_cman_quorum_info("""\ +-Version: 6.2.0 +-Config Version: 23 +-Cluster Name: cluster66 +-Cluster Id: 22265 +-Cluster Member: Yes +-Cluster Generation: 3612 +-Membership state: Cluster-Member +-Nodes: 3 +-Expected votes: 3 +-Total votes: 3 +-Node votes: 1 +-Quorum: Foo +-Active subsystems: 8 +-Flags: +-Ports Bound: 0 +-Node name: rh66-node2 +-Node ID: 2 +-Multicast addresses: 239.192.86.80 +-Node addresses: 192.168.122.61 +----Votes--- +-1 M 3 rh66-node1 +-2 M 2 rh66-node2 +-3 M 1 rh66-node3 +-""") +- self.assertEqual(None, parsed) +- +- parsed = utils.parse_cman_quorum_info("""\ +-Version: 6.2.0 +-Config Version: 23 +-Cluster Name: cluster66 +-Cluster Id: 22265 +-Cluster Member: Yes +-Cluster Generation: 3612 +-Membership state: Cluster-Member +-Nodes: 3 +-Expected votes: 3 +-Total votes: 3 +-Node votes: 1 +-Quorum: 4 +-Active subsystems: 8 +-Flags: +-Ports Bound: 0 +-Node name: rh66-node2 +-Node ID: 2 +-Multicast addresses: 239.192.86.80 +-Node addresses: 192.168.122.61 +----Votes--- +-1 M 3 rh66-node1 +-2 M Foo rh66-node2 +-3 M 1 rh66-node3 +-""") +- self.assertEqual(None, parsed) +- +- def test_parse_quorumtool_output(self): +- parsed = utils.parse_quorumtool_output("""\ +-Quorum information +------------------- +-Date: Fri Jan 16 13:03:28 2015 +-Quorum provider: corosync_votequorum +-Nodes: 3 +-Node ID: 1 +-Ring ID: 19860 +-Quorate: Yes +- +-Votequorum information +----------------------- +-Expected votes: 3 +-Highest expected: 3 +-Total votes: 3 +-Quorum: 2 +-Flags: Quorate ++ def test_get_resources_location_from_operations(self): ++ cib_dom = self.get_cib_resources() + +-Membership information +----------------------- +- Nodeid Votes Qdevice Name +- 1 3 NR rh70-node1 +- 2 2 NR rh70-node2 (local) +- 3 1 NR rh70-node3 +-""") +- self.assertEqual(True, parsed["quorate"]) +- self.assertEqual(2, parsed["quorum"]) ++ operations = [] + self.assertEqual( +- [ +- {"name": "rh70-node1", "votes": 3, "local": False}, +- {"name": "rh70-node2", "votes": 2, "local": True}, +- {"name": "rh70-node3", "votes": 1, "local": False}, +- ], +- parsed["node_list"] ++ {}, ++ utils.get_resources_location_from_operations(cib_dom, operations) + ) + +- parsed = utils.parse_quorumtool_output("""\ +-Quorum information +------------------- +-Date: Fri Jan 16 13:03:35 2015 +-Quorum provider: corosync_votequorum +-Nodes: 1 +-Node ID: 1 +-Ring ID: 19868 +-Quorate: No +- +-Votequorum information +----------------------- +-Expected votes: 3 +-Highest expected: 3 +-Total votes: 1 +-Quorum: 2 Activity blocked +-Flags: ++ operations = [ ++ { ++ "id": "myResource", ++ "long_id": "myResource", ++ "operation": "start", ++ "on_node": "rh7-1", ++ }, ++ ] ++ self.assertEqual( ++ { ++ 'myResource': { ++ 'id': 'myResource', ++ 'id_for_constraint': 'myResource', ++ 'long_id': 'myResource', ++ 'start_on_node': 'rh7-1', ++ }, ++ }, ++ utils.get_resources_location_from_operations(cib_dom, operations) ++ ) + +-Membership information +----------------------- +- Nodeid Votes Qdevice Name +- 1 1 NR rh70-node1 (local) +-""") +- self.assertEqual(False, parsed["quorate"]) +- self.assertEqual(2, parsed["quorum"]) ++ operations = [ ++ { ++ "id": "myResource", ++ "long_id": "myResource", ++ "operation": "start", ++ "on_node": "rh7-1", ++ }, ++ { ++ "id": "myResource", ++ "long_id": "myResource", ++ "operation": "start", ++ "on_node": "rh7-2", ++ }, ++ { ++ "id": "myResource", ++ "long_id": "myResource", ++ "operation": "monitor", ++ "on_node": "rh7-3", ++ }, ++ { ++ "id": "myResource", ++ "long_id": "myResource", ++ "operation": "stop", ++ "on_node": "rh7-3", ++ }, ++ ] + self.assertEqual( +- [ +- {"name": "rh70-node1", "votes": 1, "local": True}, +- ], +- parsed["node_list"] ++ { ++ 'myResource': { ++ 'id': 'myResource', ++ 'id_for_constraint': 'myResource', ++ 'long_id': 'myResource', ++ 'start_on_node': 'rh7-2', ++ }, ++ }, ++ utils.get_resources_location_from_operations(cib_dom, operations) + ) + +- parsed = utils.parse_quorumtool_output("") +- self.assertEqual(None, parsed) ++ operations = [ ++ { ++ "id": "myResource", ++ "long_id": "myResource", ++ "operation": "start", ++ "on_node": "rh7-1", ++ }, ++ { ++ "id": "myClonedResource", ++ "long_id": "myClonedResource:0", ++ "operation": "start", ++ "on_node": "rh7-1", ++ }, ++ { ++ "id": "myClonedResource", ++ "long_id": "myClonedResource:0", ++ "operation": "start", ++ "on_node": "rh7-2", ++ }, ++ { ++ "id": "myClonedResource", ++ "long_id": "myClonedResource:1", ++ "operation": "start", ++ "on_node": "rh7-3", ++ }, ++ ] ++ self.assertEqual( ++ { ++ 'myResource': { ++ 'id': 'myResource', ++ 'id_for_constraint': 'myResource', ++ 'long_id': 'myResource', ++ 'start_on_node': 'rh7-1', ++ }, ++ 'myClonedResource:0': { ++ 'id': 'myClonedResource', ++ 'id_for_constraint': 'myClone', ++ 'long_id': 'myClonedResource:0', ++ 'start_on_node': 'rh7-2', ++ }, ++ 'myClonedResource:1': { ++ 'id': 'myClonedResource', ++ 'id_for_constraint': 'myClone', ++ 'long_id': 'myClonedResource:1', ++ 'start_on_node': 'rh7-3', ++ }, ++ }, ++ utils.get_resources_location_from_operations(cib_dom, operations) ++ ) + +- parsed = utils.parse_quorumtool_output("""\ +-Quorum information +------------------- +-Date: Fri Jan 16 13:03:28 2015 +-Quorum provider: corosync_votequorum +-Nodes: 3 +-Node ID: 1 +-Ring ID: 19860 +-Quorate: Yes ++ operations = [ ++ { ++ "id": "myUniqueClonedResource:0", ++ "long_id": "myUniqueClonedResource:0", ++ "operation": "start", ++ "on_node": "rh7-1", ++ }, ++ { ++ "id": "myUniqueClonedResource:1", ++ "long_id": "myUniqueClonedResource:1", ++ "operation": "monitor", ++ "on_node": "rh7-2", ++ }, ++ { ++ "id": "myUniqueClonedResource:2", ++ "long_id": "myUniqueClonedResource:2", ++ "operation": "start", ++ "on_node": "rh7-3", ++ }, ++ ] ++ self.assertEqual( ++ { ++ 'myUniqueClonedResource:0': { ++ 'id': 'myUniqueClonedResource:0', ++ 'id_for_constraint': 'myUniqueClone', ++ 'long_id': 'myUniqueClonedResource:0', ++ 'start_on_node': 'rh7-1', ++ }, ++ 'myUniqueClonedResource:2': { ++ 'id': 'myUniqueClonedResource:2', ++ 'id_for_constraint': 'myUniqueClone', ++ 'long_id': 'myUniqueClonedResource:2', ++ 'start_on_node': 'rh7-3', ++ }, ++ }, ++ utils.get_resources_location_from_operations(cib_dom, operations) ++ ) + +-Votequorum information +----------------------- +-Expected votes: 3 +-Highest expected: 3 +-Total votes: 3 +-Quorum: +-Flags: Quorate ++ operations = [ ++ { ++ "id": "myMasteredGroupedResource", ++ "long_id": "myMasteredGroupedResource:0", ++ "operation": "start", ++ "on_node": "rh7-1", ++ }, ++ { ++ "id": "myMasteredGroupedResource", ++ "long_id": "myMasteredGroupedResource:1", ++ "operation": "demote", ++ "on_node": "rh7-2", ++ }, ++ { ++ "id": "myMasteredGroupedResource", ++ "long_id": "myMasteredGroupedResource:1", ++ "operation": "promote", ++ "on_node": "rh7-3", ++ }, ++ ] ++ self.assertEqual( ++ { ++ 'myMasteredGroupedResource:0': { ++ 'id': 'myMasteredGroupedResource', ++ 'id_for_constraint': 'myGroupMaster', ++ 'long_id': 'myMasteredGroupedResource:0', ++ 'start_on_node': 'rh7-1', ++ }, ++ 'myMasteredGroupedResource:1': { ++ 'id': 'myMasteredGroupedResource', ++ 'id_for_constraint': 'myGroupMaster', ++ 'long_id': 'myMasteredGroupedResource:1', ++ 'promote_on_node': 'rh7-3', ++ }, ++ }, ++ utils.get_resources_location_from_operations(cib_dom, operations) ++ ) + +-Membership information +----------------------- +- Nodeid Votes Qdevice Name +- 1 1 NR rh70-node1 (local) +- 2 1 NR rh70-node2 +- 3 1 NR rh70-node3 +-""") +- self.assertEqual(None, parsed) ++ operations = [ ++ { ++ "id": "myResource", ++ "long_id": "myResource", ++ "operation": "stop", ++ "on_node": "rh7-1", ++ }, ++ { ++ "id": "myResource", ++ "long_id": "myResource", ++ "operation": "migrate_to", ++ "on_node": "rh7-1", ++ }, ++ { ++ "id": "myResource", ++ "long_id": "myResource", ++ "operation": "migrate_from", ++ "on_node": "rh7-2", ++ }, ++ ] ++ self.assertEqual( ++ { ++ "myResource": { ++ "id": "myResource", ++ "id_for_constraint": "myResource", ++ "long_id": "myResource", ++ "start_on_node": "rh7-2", ++ }, ++ }, ++ utils.get_resources_location_from_operations(cib_dom, operations) ++ ) + +- parsed = utils.parse_quorumtool_output("""\ +-Quorum information +------------------- +-Date: Fri Jan 16 13:03:28 2015 +-Quorum provider: corosync_votequorum +-Nodes: 3 +-Node ID: 1 +-Ring ID: 19860 +-Quorate: Yes ++ def test_is_int(self): ++ self.assertTrue(utils.is_int("-999")) ++ self.assertTrue(utils.is_int("-1")) ++ self.assertTrue(utils.is_int("0")) ++ self.assertTrue(utils.is_int("1")) ++ self.assertTrue(utils.is_int("99999")) ++ self.assertTrue(utils.is_int(" 99999 ")) ++ self.assertFalse(utils.is_int("0.0")) ++ self.assertFalse(utils.is_int("-1.0")) ++ self.assertFalse(utils.is_int("-0.1")) ++ self.assertFalse(utils.is_int("0.001")) ++ self.assertFalse(utils.is_int("-999999.1")) ++ self.assertFalse(utils.is_int("0.0001")) ++ self.assertFalse(utils.is_int("")) ++ self.assertFalse(utils.is_int(" ")) ++ self.assertFalse(utils.is_int("A")) ++ self.assertFalse(utils.is_int("random 15 47 text ")) + +-Votequorum information +----------------------- +-Expected votes: 3 +-Highest expected: 3 +-Total votes: 3 +-Quorum: Foo +-Flags: Quorate ++ def test_dom_get_node(self): ++ cib = self.get_cib_with_nodes_minidom() ++ #assertIsNone is not supported in python 2.6 ++ self.assertTrue(utils.dom_get_node(cib, "non-existing-node") is None) ++ node = utils.dom_get_node(cib, "rh7-1") ++ self.assertEqual(node.getAttribute("uname"), "rh7-1") ++ self.assertEqual(node.getAttribute("id"), "1") + +-Membership information +----------------------- +- Nodeid Votes Qdevice Name +- 1 1 NR rh70-node1 (local) +- 2 1 NR rh70-node2 +- 3 1 NR rh70-node3 +-""") +- self.assertEqual(None, parsed) ++ def test_dom_prepare_child_element(self): ++ cib = self.get_cib_with_nodes_minidom() ++ node = cib.getElementsByTagName("node")[0] ++ self.assertEqual(len(dom_get_child_elements(node)), 0) ++ child = utils.dom_prepare_child_element( ++ node, "utilization", "rh7-1-utilization" ++ ) ++ self.assertEqual(len(dom_get_child_elements(node)), 1) ++ self.assertEqual(child, dom_get_child_elements(node)[0]) ++ self.assertEqual(dom_get_child_elements(node)[0].tagName, "utilization") ++ self.assertEqual( ++ dom_get_child_elements(node)[0].getAttribute("id"), ++ "rh7-1-utilization" ++ ) ++ child2 = utils.dom_prepare_child_element( ++ node, "utilization", "rh7-1-utilization" ++ ) ++ self.assertEqual(len(dom_get_child_elements(node)), 1) ++ self.assertEqual(child, child2) + +- parsed = utils.parse_quorumtool_output("""\ +-Quorum information +------------------- +-Date: Fri Jan 16 13:03:28 2015 +-Quorum provider: corosync_votequorum +-Nodes: 3 +-Node ID: 1 +-Ring ID: 19860 +-Quorate: Yes ++ def test_dom_update_nv_pair_add(self): ++ nv_set = xml.dom.minidom.parseString("<nvset/>").documentElement ++ utils.dom_update_nv_pair(nv_set, "test_name", "test_val", "prefix-") ++ self.assertEqual(len(dom_get_child_elements(nv_set)), 1) ++ pair = dom_get_child_elements(nv_set)[0] ++ self.assertEqual(pair.getAttribute("name"), "test_name") ++ self.assertEqual(pair.getAttribute("value"), "test_val") ++ self.assertEqual(pair.getAttribute("id"), "prefix-test_name") ++ utils.dom_update_nv_pair(nv_set, "another_name", "value", "prefix2-") ++ self.assertEqual(len(dom_get_child_elements(nv_set)), 2) ++ self.assertEqual(pair, dom_get_child_elements(nv_set)[0]) ++ pair = dom_get_child_elements(nv_set)[1] ++ self.assertEqual(pair.getAttribute("name"), "another_name") ++ self.assertEqual(pair.getAttribute("value"), "value") ++ self.assertEqual(pair.getAttribute("id"), "prefix2-another_name") + +-Votequorum information +----------------------- +-Expected votes: 3 +-Highest expected: 3 +-Total votes: 3 +-Quorum: 2 +-Flags: Quorate ++ def test_dom_update_nv_pair_update(self): ++ nv_set = xml.dom.minidom.parseString(""" ++ <nv_set> ++ <nvpair id="prefix-test_name" name="test_name" value="test_val"/> ++ <nvpair id="prefix2-another_name" name="another_name" value="value"/> ++ </nv_set> ++ """).documentElement ++ utils.dom_update_nv_pair(nv_set, "test_name", "new_value") ++ self.assertEqual(len(dom_get_child_elements(nv_set)), 2) ++ pair1 = dom_get_child_elements(nv_set)[0] ++ pair2 = dom_get_child_elements(nv_set)[1] ++ self.assertEqual(pair1.getAttribute("name"), "test_name") ++ self.assertEqual(pair1.getAttribute("value"), "new_value") ++ self.assertEqual(pair1.getAttribute("id"), "prefix-test_name") ++ self.assertEqual(pair2.getAttribute("name"), "another_name") ++ self.assertEqual(pair2.getAttribute("value"), "value") ++ self.assertEqual(pair2.getAttribute("id"), "prefix2-another_name") + +-Membership information +----------------------- +- Nodeid Votes Qdevice Name +- 1 1 NR rh70-node1 (local) +- 2 foo NR rh70-node2 +- 3 1 NR rh70-node3 +-""") +- self.assertEqual(None, parsed) ++ def test_dom_update_nv_pair_remove(self): ++ nv_set = xml.dom.minidom.parseString(""" ++ <nv_set> ++ <nvpair id="prefix-test_name" name="test_name" value="test_val"/> ++ <nvpair id="prefix2-another_name" name="another_name" value="value"/> ++ </nv_set> ++ """).documentElement ++ utils.dom_update_nv_pair(nv_set, "non_existing_name", "") ++ self.assertEqual(len(dom_get_child_elements(nv_set)), 2) ++ utils.dom_update_nv_pair(nv_set, "another_name", "") ++ self.assertEqual(len(dom_get_child_elements(nv_set)), 1) ++ pair = dom_get_child_elements(nv_set)[0] ++ self.assertEqual(pair.getAttribute("name"), "test_name") ++ self.assertEqual(pair.getAttribute("value"), "test_val") ++ self.assertEqual(pair.getAttribute("id"), "prefix-test_name") ++ utils.dom_update_nv_pair(nv_set, "test_name", "") ++ self.assertEqual(len(dom_get_child_elements(nv_set)), 0) + +- def test_is_node_stop_cause_quorum_loss(self): +- quorum_info = { +- "quorate": False, +- } ++ def test_convert_args_to_tuples(self): ++ out = utils.convert_args_to_tuples( ++ ["invalid_string", "key=value", "key2=val=ue", "k e y= v a l u e "] ++ ) + self.assertEqual( +- False, +- utils.is_node_stop_cause_quorum_loss(quorum_info, True) ++ out, ++ [("key", "value"), ("key2", "val=ue"), ("k e y", " v a l u e ")] + ) + +- quorum_info = { +- "quorate": True, +- "quorum": 1, +- "node_list": [ +- {"name": "rh70-node3", "votes": 1, "local": False}, +- ], +- } +- self.assertEqual( +- False, +- utils.is_node_stop_cause_quorum_loss(quorum_info, True) ++ def test_dom_update_utilization_invalid(self): ++ #commands writes to stderr ++ #we want clean test output, so we capture it ++ tmp_stderr = sys.stderr ++ sys.stderr = StringIO() ++ ++ el = xml.dom.minidom.parseString(""" ++ <resource id="test_id"/> ++ """).documentElement ++ self.assertRaises( ++ SystemExit, ++ utils.dom_update_utilization, el, [("name", "invalid_val")] + ) + +- quorum_info = { +- "quorate": True, +- "quorum": 1, +- "node_list": [ +- {"name": "rh70-node3", "votes": 1, "local": True}, +- ], +- } +- self.assertEqual( +- True, +- utils.is_node_stop_cause_quorum_loss(quorum_info, True) ++ self.assertRaises( ++ SystemExit, ++ utils.dom_update_utilization, el, [("name", "0.01")] + ) + +- quorum_info = { +- "quorate": True, +- "quorum": 4, +- "node_list": [ +- {"name": "rh70-node1", "votes": 3, "local": False}, +- {"name": "rh70-node2", "votes": 2, "local": False}, +- {"name": "rh70-node3", "votes": 1, "local": True}, +- ], +- } +- self.assertEqual( +- False, +- utils.is_node_stop_cause_quorum_loss(quorum_info, True) ++ sys.stderr = tmp_stderr ++ ++ def test_dom_update_utilization_add(self): ++ el = xml.dom.minidom.parseString(""" ++ <resource id="test_id"/> ++ """).documentElement ++ utils.dom_update_utilization( ++ el, [("name", ""), ("key", "-1"), ("keys", "90")] + ) + +- quorum_info = { +- "quorate": True, +- "quorum": 4, +- "node_list": [ +- {"name": "rh70-node1", "votes": 3, "local": False}, +- {"name": "rh70-node2", "votes": 2, "local": True}, +- {"name": "rh70-node3", "votes": 1, "local": False}, +- ], +- } ++ self.assertEqual(len(dom_get_child_elements(el)), 1) ++ u = dom_get_child_elements(el)[0] ++ self.assertEqual(u.tagName, "utilization") ++ self.assertEqual(u.getAttribute("id"), "test_id-utilization") ++ self.assertEqual(len(dom_get_child_elements(u)), 2) ++ + self.assertEqual( +- False, +- utils.is_node_stop_cause_quorum_loss(quorum_info, True) ++ dom_get_child_elements(u)[0].getAttribute("id"), ++ "test_id-utilization-key" ++ ) ++ self.assertEqual( ++ dom_get_child_elements(u)[0].getAttribute("name"), ++ "key" ++ ) ++ self.assertEqual( ++ dom_get_child_elements(u)[0].getAttribute("value"), ++ "-1" + ) +- +- quorum_info = { +- "quorate": True, +- "quorum": 4, +- "node_list": [ +- {"name": "rh70-node1", "votes": 3, "local": True}, +- {"name": "rh70-node2", "votes": 2, "local": False}, +- {"name": "rh70-node3", "votes": 1, "local": False}, +- ], +- } + self.assertEqual( +- True, +- utils.is_node_stop_cause_quorum_loss(quorum_info, True) ++ dom_get_child_elements(u)[1].getAttribute("id"), ++ "test_id-utilization-keys" + ) +- +- +- quorum_info = { +- "quorate": True, +- "quorum": 4, +- "node_list": [ +- {"name": "rh70-node1", "votes": 3, "local": True}, +- {"name": "rh70-node2", "votes": 2, "local": False}, +- {"name": "rh70-node3", "votes": 1, "local": False}, +- ], +- } + self.assertEqual( +- False, +- utils.is_node_stop_cause_quorum_loss( +- quorum_info, False, ["rh70-node3"] +- ) ++ dom_get_child_elements(u)[1].getAttribute("name"), ++ "keys" + ) +- +- quorum_info = { +- "quorate": True, +- "quorum": 4, +- "node_list": [ +- {"name": "rh70-node1", "votes": 3, "local": True}, +- {"name": "rh70-node2", "votes": 2, "local": False}, +- {"name": "rh70-node3", "votes": 1, "local": False}, +- ], +- } + self.assertEqual( +- False, +- utils.is_node_stop_cause_quorum_loss( +- quorum_info, False, ["rh70-node2"] +- ) ++ dom_get_child_elements(u)[1].getAttribute("value"), ++ "90" + ) + +- quorum_info = { +- "quorate": True, +- "quorum": 4, +- "node_list": [ +- {"name": "rh70-node1", "votes": 3, "local": True}, +- {"name": "rh70-node2", "votes": 2, "local": False}, +- {"name": "rh70-node3", "votes": 1, "local": False}, +- ], +- } +- self.assertEqual( +- True, +- utils.is_node_stop_cause_quorum_loss( +- quorum_info, False, ["rh70-node1"] +- ) ++ def test_dom_update_utilization_update_remove(self): ++ el = xml.dom.minidom.parseString(""" ++ <resource id="test_id"> ++ <utilization id="test_id-utilization"> ++ <nvpair id="test_id-utilization-key" name="key" value="-1"/> ++ <nvpair id="test_id-utilization-keys" name="keys" value="90"/> ++ </utilization> ++ </resource> ++ """).documentElement ++ utils.dom_update_utilization( ++ el, [("key", "100"), ("keys", "")] + ) + +- quorum_info = { +- "quorate": True, +- "quorum": 4, +- "node_list": [ +- {"name": "rh70-node1", "votes": 4, "local": True}, +- {"name": "rh70-node2", "votes": 1, "local": False}, +- {"name": "rh70-node3", "votes": 1, "local": False}, +- ], +- } ++ u = dom_get_child_elements(el)[0] ++ self.assertEqual(len(dom_get_child_elements(u)), 1) + self.assertEqual( +- False, +- utils.is_node_stop_cause_quorum_loss( +- quorum_info, False, ["rh70-node2", "rh70-node3"] +- ) ++ dom_get_child_elements(u)[0].getAttribute("id"), ++ "test_id-utilization-key" + ) +- +- quorum_info = { +- "quorate": True, +- "quorum": 4, +- "node_list": [ +- {"name": "rh70-node1", "votes": 3, "local": True}, +- {"name": "rh70-node2", "votes": 2, "local": False}, +- {"name": "rh70-node3", "votes": 1, "local": False}, +- ], +- } + self.assertEqual( +- True, +- utils.is_node_stop_cause_quorum_loss( +- quorum_info, False, ["rh70-node2", "rh70-node3"] +- ) ++ dom_get_child_elements(u)[0].getAttribute("name"), ++ "key" + ) +- +- def test_get_operations_from_transitions(self): +- transitions = utils.parse(rc("transitions01.xml")) + self.assertEqual( +- [ +- { +- 'id': 'dummy', +- 'long_id': 'dummy', +- 'operation': 'stop', +- 'on_node': 'rh7-3', +- }, +- { +- 'id': 'dummy', +- 'long_id': 'dummy', +- 'operation': 'start', +- 'on_node': 'rh7-2', +- }, +- { +- 'id': 'd0', +- 'long_id': 'd0:1', +- 'operation': 'stop', +- 'on_node': 'rh7-1', +- }, +- { +- 'id': 'd0', +- 'long_id': 'd0:1', +- 'operation': 'start', +- 'on_node': 'rh7-2', +- }, +- { +- 'id': 'state', +- 'long_id': 'state:0', +- 'operation': 'stop', +- 'on_node': 'rh7-3', +- }, +- { +- 'id': 'state', +- 'long_id': 'state:0', +- 'operation': 'start', +- 'on_node': 'rh7-2', +- }, +- ], +- utils.get_operations_from_transitions(transitions) ++ dom_get_child_elements(u)[0].getAttribute("value"), ++ "100" + ) + +- transitions = utils.parse(rc("transitions02.xml")) +- self.assertEqual( +- [ +- { +- "id": "RemoteNode", +- "long_id": "RemoteNode", +- "operation": "stop", +- "on_node": "virt-143", +- }, +- { +- "id": "RemoteNode", +- "long_id": "RemoteNode", +- "operation": "migrate_to", +- "on_node": "virt-143", +- }, +- { +- "id": "RemoteNode", +- "long_id": "RemoteNode", +- "operation": "migrate_from", +- "on_node": "virt-142", +- }, +- { +- "id": "dummy8", +- "long_id": "dummy8", +- "operation": "stop", +- "on_node": "virt-143", +- }, +- { +- "id": "dummy8", +- "long_id": "dummy8", +- "operation": "start", +- "on_node": "virt-142", +- } +- ], +- utils.get_operations_from_transitions(transitions) ++ def test_dom_update_meta_attr_add(self): ++ el = xml.dom.minidom.parseString(""" ++ <resource id="test_id"/> ++ """).documentElement ++ utils.dom_update_meta_attr( ++ el, [("name", ""), ("key", "test"), ("key2", "val")] + ) + +- def test_get_resources_location_from_operations(self): +- cib_dom = self.get_cib_resources() ++ self.assertEqual(len(dom_get_child_elements(el)), 1) ++ u = dom_get_child_elements(el)[0] ++ self.assertEqual(u.tagName, "meta_attributes") ++ self.assertEqual(u.getAttribute("id"), "test_id-meta_attributes") ++ self.assertEqual(len(dom_get_child_elements(u)), 2) + +- operations = [] + self.assertEqual( +- {}, +- utils.get_resources_location_from_operations(cib_dom, operations) ++ dom_get_child_elements(u)[0].getAttribute("id"), ++ "test_id-meta_attributes-key" + ) +- +- operations = [ +- { +- "id": "myResource", +- "long_id": "myResource", +- "operation": "start", +- "on_node": "rh7-1", +- }, +- ] + self.assertEqual( +- { +- 'myResource': { +- 'id': 'myResource', +- 'id_for_constraint': 'myResource', +- 'long_id': 'myResource', +- 'start_on_node': 'rh7-1', +- }, +- }, +- utils.get_resources_location_from_operations(cib_dom, operations) ++ dom_get_child_elements(u)[0].getAttribute("name"), ++ "key" ++ ) ++ self.assertEqual( ++ dom_get_child_elements(u)[0].getAttribute("value"), ++ "test" + ) +- +- operations = [ +- { +- "id": "myResource", +- "long_id": "myResource", +- "operation": "start", +- "on_node": "rh7-1", +- }, +- { +- "id": "myResource", +- "long_id": "myResource", +- "operation": "start", +- "on_node": "rh7-2", +- }, +- { +- "id": "myResource", +- "long_id": "myResource", +- "operation": "monitor", +- "on_node": "rh7-3", +- }, +- { +- "id": "myResource", +- "long_id": "myResource", +- "operation": "stop", +- "on_node": "rh7-3", +- }, +- ] + self.assertEqual( +- { +- 'myResource': { +- 'id': 'myResource', +- 'id_for_constraint': 'myResource', +- 'long_id': 'myResource', +- 'start_on_node': 'rh7-2', +- }, +- }, +- utils.get_resources_location_from_operations(cib_dom, operations) ++ dom_get_child_elements(u)[1].getAttribute("id"), ++ "test_id-meta_attributes-key2" + ) +- +- operations = [ +- { +- "id": "myResource", +- "long_id": "myResource", +- "operation": "start", +- "on_node": "rh7-1", +- }, +- { +- "id": "myClonedResource", +- "long_id": "myClonedResource:0", +- "operation": "start", +- "on_node": "rh7-1", +- }, +- { +- "id": "myClonedResource", +- "long_id": "myClonedResource:0", +- "operation": "start", +- "on_node": "rh7-2", +- }, +- { +- "id": "myClonedResource", +- "long_id": "myClonedResource:1", +- "operation": "start", +- "on_node": "rh7-3", +- }, +- ] + self.assertEqual( +- { +- 'myResource': { +- 'id': 'myResource', +- 'id_for_constraint': 'myResource', +- 'long_id': 'myResource', +- 'start_on_node': 'rh7-1', +- }, +- 'myClonedResource:0': { +- 'id': 'myClonedResource', +- 'id_for_constraint': 'myClone', +- 'long_id': 'myClonedResource:0', +- 'start_on_node': 'rh7-2', +- }, +- 'myClonedResource:1': { +- 'id': 'myClonedResource', +- 'id_for_constraint': 'myClone', +- 'long_id': 'myClonedResource:1', +- 'start_on_node': 'rh7-3', +- }, +- }, +- utils.get_resources_location_from_operations(cib_dom, operations) ++ dom_get_child_elements(u)[1].getAttribute("name"), ++ "key2" + ) +- +- operations = [ +- { +- "id": "myUniqueClonedResource:0", +- "long_id": "myUniqueClonedResource:0", +- "operation": "start", +- "on_node": "rh7-1", +- }, +- { +- "id": "myUniqueClonedResource:1", +- "long_id": "myUniqueClonedResource:1", +- "operation": "monitor", +- "on_node": "rh7-2", +- }, +- { +- "id": "myUniqueClonedResource:2", +- "long_id": "myUniqueClonedResource:2", +- "operation": "start", +- "on_node": "rh7-3", +- }, +- ] + self.assertEqual( +- { +- 'myUniqueClonedResource:0': { +- 'id': 'myUniqueClonedResource:0', +- 'id_for_constraint': 'myUniqueClone', +- 'long_id': 'myUniqueClonedResource:0', +- 'start_on_node': 'rh7-1', +- }, +- 'myUniqueClonedResource:2': { +- 'id': 'myUniqueClonedResource:2', +- 'id_for_constraint': 'myUniqueClone', +- 'long_id': 'myUniqueClonedResource:2', +- 'start_on_node': 'rh7-3', +- }, +- }, +- utils.get_resources_location_from_operations(cib_dom, operations) ++ dom_get_child_elements(u)[1].getAttribute("value"), ++ "val" + ) + +- operations = [ +- { +- "id": "myMasteredGroupedResource", +- "long_id": "myMasteredGroupedResource:0", +- "operation": "start", +- "on_node": "rh7-1", +- }, +- { +- "id": "myMasteredGroupedResource", +- "long_id": "myMasteredGroupedResource:1", +- "operation": "demote", +- "on_node": "rh7-2", +- }, +- { +- "id": "myMasteredGroupedResource", +- "long_id": "myMasteredGroupedResource:1", +- "operation": "promote", +- "on_node": "rh7-3", +- }, +- ] ++ def test_dom_update_meta_attr_update_remove(self): ++ el = xml.dom.minidom.parseString(""" ++ <resource id="test_id"> ++ <meta_attributes id="test_id-utilization"> ++ <nvpair id="test_id-meta_attributes-key" name="key" value="test"/> ++ <nvpair id="test_id-meta_attributes-key2" name="key2" value="val"/> ++ </meta_attributes> ++ </resource> ++ """).documentElement ++ utils.dom_update_meta_attr( ++ el, [("key", "another_val"), ("key2", "")] ++ ) ++ ++ u = dom_get_child_elements(el)[0] ++ self.assertEqual(len(dom_get_child_elements(u)), 1) + self.assertEqual( +- { +- 'myMasteredGroupedResource:0': { +- 'id': 'myMasteredGroupedResource', +- 'id_for_constraint': 'myGroupMaster', +- 'long_id': 'myMasteredGroupedResource:0', +- 'start_on_node': 'rh7-1', +- }, +- 'myMasteredGroupedResource:1': { +- 'id': 'myMasteredGroupedResource', +- 'id_for_constraint': 'myGroupMaster', +- 'long_id': 'myMasteredGroupedResource:1', +- 'promote_on_node': 'rh7-3', +- }, +- }, +- utils.get_resources_location_from_operations(cib_dom, operations) ++ dom_get_child_elements(u)[0].getAttribute("id"), ++ "test_id-meta_attributes-key" ++ ) ++ self.assertEqual( ++ dom_get_child_elements(u)[0].getAttribute("name"), ++ "key" ++ ) ++ self.assertEqual( ++ dom_get_child_elements(u)[0].getAttribute("value"), ++ "another_val" + ) + +- operations = [ +- { +- "id": "myResource", +- "long_id": "myResource", +- "operation": "stop", +- "on_node": "rh7-1", +- }, +- { +- "id": "myResource", +- "long_id": "myResource", +- "operation": "migrate_to", +- "on_node": "rh7-1", +- }, +- { +- "id": "myResource", +- "long_id": "myResource", +- "operation": "migrate_from", +- "on_node": "rh7-2", ++ def test_get_utilization(self): ++ el = xml.dom.minidom.parseString(""" ++ <resource id="test_id"> ++ <utilization id="test_id-utilization"> ++ <nvpair id="test_id-utilization-key" name="key" value="-1"/> ++ <nvpair id="test_id-utilization-keys" name="keys" value="90"/> ++ </utilization> ++ </resource> ++ """).documentElement ++ self.assertEqual({"key": "-1", "keys": "90"}, utils.get_utilization(el)) ++ ++ def test_get_utilization_str(self): ++ el = xml.dom.minidom.parseString(""" ++ <resource id="test_id"> ++ <utilization id="test_id-utilization"> ++ <nvpair id="test_id-utilization-key" name="key" value="-1"/> ++ <nvpair id="test_id-utilization-keys" name="keys" value="90"/> ++ </utilization> ++ </resource> ++ """).documentElement ++ self.assertEqual("key=-1 keys=90", utils.get_utilization_str(el)) ++ ++ def test_get_cluster_property_from_xml_enum(self): ++ el = ET.fromstring(""" ++ <parameter name="no-quorum-policy" unique="0"> ++ <shortdesc lang="en">What to do when the cluster does not have quorum</shortdesc> ++ <content type="enum" default="stop"/> ++ <longdesc lang="en">What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, suicide</longdesc> ++ </parameter> ++ """) ++ expected = { ++ "name": "no-quorum-policy", ++ "shortdesc": "What to do when the cluster does not have quorum", ++ "longdesc": "", ++ "type": "enum", ++ "default": "stop", ++ "enum": ["stop", "freeze", "ignore", "suicide"] ++ } ++ self.assertEqual(expected, utils.get_cluster_property_from_xml(el)) ++ ++ def test_get_cluster_property_from_xml(self): ++ el = ET.fromstring(""" ++ <parameter name="default-resource-stickiness" unique="0"> ++ <shortdesc lang="en"></shortdesc> ++ <content type="integer" default="0"/> ++ <longdesc lang="en"></longdesc> ++ </parameter> ++ """) ++ expected = { ++ "name": "default-resource-stickiness", ++ "shortdesc": "", ++ "longdesc": "", ++ "type": "integer", ++ "default": "0" ++ } ++ self.assertEqual(expected, utils.get_cluster_property_from_xml(el)) ++ ++ def test_get_cluster_property_default(self): ++ definition = { ++ "default-resource-stickiness": { ++ "name": "default-resource-stickiness", ++ "shortdesc": "", ++ "longdesc": "", ++ "type": "integer", ++ "default": "0", ++ "source": "pengine" + }, +- ] +- self.assertEqual( +- { +- "myResource": { +- "id": "myResource", +- "id_for_constraint": "myResource", +- "long_id": "myResource", +- "start_on_node": "rh7-2", +- }, ++ "no-quorum-policy": { ++ "name": "no-quorum-policy", ++ "shortdesc": "What to do when the cluster does not have quorum", ++ "longdesc": "What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, suicide", ++ "type": "enum", ++ "default": "stop", ++ "enum": ["stop", "freeze", "ignore", "suicide"], ++ "source": "pengine" + }, +- utils.get_resources_location_from_operations(cib_dom, operations) ++ "enable-acl": { ++ "name": "enable-acl", ++ "shortdesc": "Enable CIB ACL", ++ "longdesc": "Enable CIB ACL", ++ "type": "boolean", ++ "default": "false", ++ "source": "cib" ++ } ++ } ++ self.assertEqual( ++ utils.get_cluster_property_default( ++ definition, "default-resource-stickiness" ++ ), ++ "0" + ) +- +- def test_is_int(self): +- self.assertTrue(utils.is_int("-999")) +- self.assertTrue(utils.is_int("-1")) +- self.assertTrue(utils.is_int("0")) +- self.assertTrue(utils.is_int("1")) +- self.assertTrue(utils.is_int("99999")) +- self.assertTrue(utils.is_int(" 99999 ")) +- self.assertFalse(utils.is_int("0.0")) +- self.assertFalse(utils.is_int("-1.0")) +- self.assertFalse(utils.is_int("-0.1")) +- self.assertFalse(utils.is_int("0.001")) +- self.assertFalse(utils.is_int("-999999.1")) +- self.assertFalse(utils.is_int("0.0001")) +- self.assertFalse(utils.is_int("")) +- self.assertFalse(utils.is_int(" ")) +- self.assertFalse(utils.is_int("A")) +- self.assertFalse(utils.is_int("random 15 47 text ")) +- +- def test_dom_get_node(self): +- cib = self.get_cib_with_nodes_minidom() +- #assertIsNone is not supported in python 2.6 +- self.assertTrue(utils.dom_get_node(cib, "non-existing-node") is None) +- node = utils.dom_get_node(cib, "rh7-1") +- self.assertEqual(node.getAttribute("uname"), "rh7-1") +- self.assertEqual(node.getAttribute("id"), "1") +- +- def test_dom_prepare_child_element(self): +- cib = self.get_cib_with_nodes_minidom() +- node = cib.getElementsByTagName("node")[0] +- self.assertEqual(len(dom_get_child_elements(node)), 0) +- child = utils.dom_prepare_child_element( +- node, "utilization", "rh7-1-utilization" ++ self.assertEqual( ++ utils.get_cluster_property_default(definition, "no-quorum-policy"), ++ "stop" + ) +- self.assertEqual(len(dom_get_child_elements(node)), 1) +- self.assertEqual(child, dom_get_child_elements(node)[0]) +- self.assertEqual(dom_get_child_elements(node)[0].tagName, "utilization") + self.assertEqual( +- dom_get_child_elements(node)[0].getAttribute("id"), +- "rh7-1-utilization" ++ utils.get_cluster_property_default(definition, "enable-acl"), ++ "false" + ) +- child2 = utils.dom_prepare_child_element( +- node, "utilization", "rh7-1-utilization" ++ self.assertRaises( ++ utils.UnknownPropertyException, ++ utils.get_cluster_property_default, definition, "non-existing" + ) +- self.assertEqual(len(dom_get_child_elements(node)), 1) +- self.assertEqual(child, child2) + +- def test_dom_update_nv_pair_add(self): +- nv_set = xml.dom.minidom.parseString("<nvset/>").documentElement +- utils.dom_update_nv_pair(nv_set, "test_name", "test_val", "prefix-") +- self.assertEqual(len(dom_get_child_elements(nv_set)), 1) +- pair = dom_get_child_elements(nv_set)[0] +- self.assertEqual(pair.getAttribute("name"), "test_name") +- self.assertEqual(pair.getAttribute("value"), "test_val") +- self.assertEqual(pair.getAttribute("id"), "prefix-test_name") +- utils.dom_update_nv_pair(nv_set, "another_name", "value", "prefix2-") +- self.assertEqual(len(dom_get_child_elements(nv_set)), 2) +- self.assertEqual(pair, dom_get_child_elements(nv_set)[0]) +- pair = dom_get_child_elements(nv_set)[1] +- self.assertEqual(pair.getAttribute("name"), "another_name") +- self.assertEqual(pair.getAttribute("value"), "value") +- self.assertEqual(pair.getAttribute("id"), "prefix2-another_name") ++ def test_is_valid_cib_value_unknown_type(self): ++ # should be always true ++ self.assertTrue(utils.is_valid_cib_value("unknown", "test")) ++ self.assertTrue(utils.is_valid_cib_value("string", "string value")) + +- def test_dom_update_nv_pair_update(self): +- nv_set = xml.dom.minidom.parseString(""" +- <nv_set> +- <nvpair id="prefix-test_name" name="test_name" value="test_val"/> +- <nvpair id="prefix2-another_name" name="another_name" value="value"/> +- </nv_set> +- """).documentElement +- utils.dom_update_nv_pair(nv_set, "test_name", "new_value") +- self.assertEqual(len(dom_get_child_elements(nv_set)), 2) +- pair1 = dom_get_child_elements(nv_set)[0] +- pair2 = dom_get_child_elements(nv_set)[1] +- self.assertEqual(pair1.getAttribute("name"), "test_name") +- self.assertEqual(pair1.getAttribute("value"), "new_value") +- self.assertEqual(pair1.getAttribute("id"), "prefix-test_name") +- self.assertEqual(pair2.getAttribute("name"), "another_name") +- self.assertEqual(pair2.getAttribute("value"), "value") +- self.assertEqual(pair2.getAttribute("id"), "prefix2-another_name") ++ def test_is_valid_cib_value_integer(self): ++ self.assertTrue(utils.is_valid_cib_value("integer", "0")) ++ self.assertTrue(utils.is_valid_cib_value("integer", "42")) ++ self.assertTrue(utils.is_valid_cib_value("integer", "-90")) ++ self.assertTrue(utils.is_valid_cib_value("integer", "+90")) ++ self.assertTrue(utils.is_valid_cib_value("integer", "INFINITY")) ++ self.assertTrue(utils.is_valid_cib_value("integer", "-INFINITY")) ++ self.assertTrue(utils.is_valid_cib_value("integer", "+INFINITY")) ++ self.assertFalse(utils.is_valid_cib_value("integer", "0.0")) ++ self.assertFalse(utils.is_valid_cib_value("integer", "-10.9")) ++ self.assertFalse(utils.is_valid_cib_value("integer", "string")) + +- def test_dom_update_nv_pair_remove(self): +- nv_set = xml.dom.minidom.parseString(""" +- <nv_set> +- <nvpair id="prefix-test_name" name="test_name" value="test_val"/> +- <nvpair id="prefix2-another_name" name="another_name" value="value"/> +- </nv_set> +- """).documentElement +- utils.dom_update_nv_pair(nv_set, "non_existing_name", "") +- self.assertEqual(len(dom_get_child_elements(nv_set)), 2) +- utils.dom_update_nv_pair(nv_set, "another_name", "") +- self.assertEqual(len(dom_get_child_elements(nv_set)), 1) +- pair = dom_get_child_elements(nv_set)[0] +- self.assertEqual(pair.getAttribute("name"), "test_name") +- self.assertEqual(pair.getAttribute("value"), "test_val") +- self.assertEqual(pair.getAttribute("id"), "prefix-test_name") +- utils.dom_update_nv_pair(nv_set, "test_name", "") +- self.assertEqual(len(dom_get_child_elements(nv_set)), 0) ++ def test_is_valid_cib_value_enum(self): ++ self.assertTrue( ++ utils.is_valid_cib_value("enum", "this", ["another", "this", "1"]) ++ ) ++ self.assertFalse( ++ utils.is_valid_cib_value("enum", "this", ["another", "this_not"]) ++ ) ++ self.assertFalse(utils.is_valid_cib_value("enum", "this", [])) ++ self.assertFalse(utils.is_valid_cib_value("enum", "this")) + +- def test_convert_args_to_tuples(self): +- out = utils.convert_args_to_tuples( +- ["invalid_string", "key=value", "key2=val=ue", "k e y= v a l u e "] ++ def test_is_valid_cib_value_boolean(self): ++ self.assertTrue(utils.is_valid_cib_value("boolean", "true")) ++ self.assertTrue(utils.is_valid_cib_value("boolean", "TrUe")) ++ self.assertTrue(utils.is_valid_cib_value("boolean", "TRUE")) ++ self.assertTrue(utils.is_valid_cib_value("boolean", "yes")) ++ self.assertTrue(utils.is_valid_cib_value("boolean", "on")) ++ self.assertTrue(utils.is_valid_cib_value("boolean", "y")) ++ self.assertTrue(utils.is_valid_cib_value("boolean", "Y")) ++ self.assertTrue(utils.is_valid_cib_value("boolean", "1")) ++ self.assertTrue(utils.is_valid_cib_value("boolean", "false")) ++ self.assertTrue(utils.is_valid_cib_value("boolean", "FaLse")) ++ self.assertTrue(utils.is_valid_cib_value("boolean", "FALSE")) ++ self.assertTrue(utils.is_valid_cib_value("boolean", "off")) ++ self.assertTrue(utils.is_valid_cib_value("boolean", "no")) ++ self.assertTrue(utils.is_valid_cib_value("boolean", "N")) ++ self.assertTrue(utils.is_valid_cib_value("boolean", "n")) ++ self.assertTrue(utils.is_valid_cib_value("boolean", "0")) ++ self.assertFalse(utils.is_valid_cib_value("boolean", "-1")) ++ self.assertFalse(utils.is_valid_cib_value("boolean", "not")) ++ self.assertFalse(utils.is_valid_cib_value("boolean", "random_string")) ++ self.assertFalse(utils.is_valid_cib_value("boolean", "truth")) ++ ++ def test_is_valid_cib_value_time(self): ++ self.assertTrue(utils.is_valid_cib_value("time", "10")) ++ self.assertTrue(utils.is_valid_cib_value("time", "0")) ++ self.assertTrue(utils.is_valid_cib_value("time", "9s")) ++ self.assertTrue(utils.is_valid_cib_value("time", "10sec")) ++ self.assertTrue(utils.is_valid_cib_value("time", "10min")) ++ self.assertTrue(utils.is_valid_cib_value("time", "10m")) ++ self.assertTrue(utils.is_valid_cib_value("time", "10h")) ++ self.assertTrue(utils.is_valid_cib_value("time", "10hr")) ++ self.assertFalse(utils.is_valid_cib_value("time", "5.2")) ++ self.assertFalse(utils.is_valid_cib_value("time", "-10")) ++ self.assertFalse(utils.is_valid_cib_value("time", "10m 2s")) ++ self.assertFalse(utils.is_valid_cib_value("time", "hour")) ++ self.assertFalse(utils.is_valid_cib_value("time", "day")) ++ ++ def test_validate_cluster_property(self): ++ definition = { ++ "default-resource-stickiness": { ++ "name": "default-resource-stickiness", ++ "shortdesc": "", ++ "longdesc": "", ++ "type": "integer", ++ "default": "0", ++ "source": "pengine" ++ }, ++ "no-quorum-policy": { ++ "name": "no-quorum-policy", ++ "shortdesc": "What to do when the cluster does not have quorum", ++ "longdesc": "What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, suicide", ++ "type": "enum", ++ "default": "stop", ++ "enum": ["stop", "freeze", "ignore", "suicide"], ++ "source": "pengine" ++ }, ++ "enable-acl": { ++ "name": "enable-acl", ++ "shortdesc": "Enable CIB ACL", ++ "longdesc": "Enable CIB ACL", ++ "type": "boolean", ++ "default": "false", ++ "source": "cib" ++ } ++ } ++ self.assertTrue(utils.is_valid_cluster_property( ++ definition, "default-resource-stickiness", "10" ++ )) ++ self.assertTrue(utils.is_valid_cluster_property( ++ definition, "default-resource-stickiness", "-1" ++ )) ++ self.assertTrue(utils.is_valid_cluster_property( ++ definition, "no-quorum-policy", "freeze" ++ )) ++ self.assertTrue(utils.is_valid_cluster_property( ++ definition, "no-quorum-policy", "suicide" ++ )) ++ self.assertTrue(utils.is_valid_cluster_property( ++ definition, "enable-acl", "true" ++ )) ++ self.assertTrue(utils.is_valid_cluster_property( ++ definition, "enable-acl", "false" ++ )) ++ self.assertTrue(utils.is_valid_cluster_property( ++ definition, "enable-acl", "on" ++ )) ++ self.assertTrue(utils.is_valid_cluster_property( ++ definition, "enable-acl", "OFF" ++ )) ++ self.assertFalse(utils.is_valid_cluster_property( ++ definition, "default-resource-stickiness", "test" ++ )) ++ self.assertFalse(utils.is_valid_cluster_property( ++ definition, "default-resource-stickiness", "1.2" ++ )) ++ self.assertFalse(utils.is_valid_cluster_property( ++ definition, "no-quorum-policy", "invalid" ++ )) ++ self.assertFalse(utils.is_valid_cluster_property( ++ definition, "enable-acl", "not" ++ )) ++ self.assertRaises( ++ utils.UnknownPropertyException, ++ utils.is_valid_cluster_property, definition, "unknown", "value" + ) +- self.assertEqual( +- out, +- [("key", "value"), ("key2", "val=ue"), ("k e y", " v a l u e ")] ++ ++ def assert_element_id(self, node, node_id): ++ self.assertTrue( ++ isinstance(node, xml.dom.minidom.Element), ++ "element with id '%s' not found" % node_id + ) ++ self.assertEqual(node.getAttribute("id"), node_id) + +- def test_dom_update_utilization_invalid(self): +- #commands writes to stderr +- #we want clean test output, so we capture it +- tmp_stderr = sys.stderr +- sys.stderr = StringIO() + +- el = xml.dom.minidom.parseString(""" +- <resource id="test_id"/> +- """).documentElement +- self.assertRaises( +- SystemExit, +- utils.dom_update_utilization, el, [("name", "invalid_val")] +- ) ++class RunParallelTest(unittest.TestCase): ++ def fixture_create_worker(self, log, name, sleepSeconds=0): ++ def worker(): ++ sleep(sleepSeconds) ++ log.append(name) ++ return worker + +- self.assertRaises( +- SystemExit, +- utils.dom_update_utilization, el, [("name", "0.01")] ++ def test_run_all_workers(self): ++ log = [] ++ utils.run_parallel( ++ [ ++ self.fixture_create_worker(log, 'first'), ++ self.fixture_create_worker(log, 'second'), ++ ], ++ wait_seconds=.1 + ) + +- sys.stderr = tmp_stderr ++ self.assertEqual(log, ['first', 'second']) + +- def test_dom_update_utilization_add(self): +- el = xml.dom.minidom.parseString(""" +- <resource id="test_id"/> +- """).documentElement +- utils.dom_update_utilization( +- el, [("name", ""), ("key", "-1"), ("keys", "90")] ++ def test_wait_for_slower_workers(self): ++ log = [] ++ utils.run_parallel( ++ [ ++ self.fixture_create_worker(log, 'first', .03), ++ self.fixture_create_worker(log, 'second'), ++ ], ++ wait_seconds=.01 + ) + +- self.assertEqual(len(dom_get_child_elements(el)), 1) +- u = dom_get_child_elements(el)[0] +- self.assertEqual(u.tagName, "utilization") +- self.assertEqual(u.getAttribute("id"), "test_id-utilization") +- self.assertEqual(len(dom_get_child_elements(u)), 2) ++ self.assertEqual(log, ['second', 'first']) ++ + ++class PrepareNodeNamesTest(unittest.TestCase): ++ def test_return_original_when_is_in_pacemaker_nodes(self): ++ node = 'test' + self.assertEqual( +- dom_get_child_elements(u)[0].getAttribute("id"), +- "test_id-utilization-key" ++ node, ++ utils.prepare_node_name(node, {1: node}, {}) + ) ++ ++ def test_return_original_when_is_not_in_corosync_nodes(self): ++ node = 'test' + self.assertEqual( +- dom_get_child_elements(u)[0].getAttribute("name"), +- "key" ++ node, ++ utils.prepare_node_name(node, {}, {}) + ) ++ ++ def test_return_original_when_corosync_id_not_in_pacemaker(self): ++ node = 'test' + self.assertEqual( +- dom_get_child_elements(u)[0].getAttribute("value"), +- "-1" ++ node, ++ utils.prepare_node_name(node, {}, {1: node}) + ) ++ ++ def test_return_modified_name(self): ++ node = 'test' + self.assertEqual( +- dom_get_child_elements(u)[1].getAttribute("id"), +- "test_id-utilization-keys" ++ 'another (test)', ++ utils.prepare_node_name(node, {1: 'another'}, {1: node}) + ) ++ ++ def test_return_modified_name_with_pm_null_case(self): ++ node = 'test' + self.assertEqual( +- dom_get_child_elements(u)[1].getAttribute("name"), +- "keys" ++ '*Unknown* (test)', ++ utils.prepare_node_name(node, {1: '(null)'}, {1: node}) ++ ) ++ ++ ++class NodeActionTaskTest(unittest.TestCase): ++ def test_can_run_action(self): ++ def action(node, arg, kwarg=None): ++ return (0, ':'.join([node, arg, kwarg])) ++ ++ report_list = [] ++ def report(node, returncode, output): ++ report_list.append('|'.join([node, str(returncode), output])) ++ ++ task = utils.create_task(report, action, 'node', 'arg', kwarg='kwarg') ++ task() ++ ++ self.assertEqual(['node|0|node:arg:kwarg'], report_list) ++ ++ ++class ParseCmanQuorumInfoTest(unittest.TestCase): ++ def test_error_empty_string(self): ++ parsed = utils.parse_cman_quorum_info("") ++ self.assertEqual(None, parsed) ++ ++ def test_quorate_no_qdevice(self): ++ parsed = utils.parse_cman_quorum_info("""\ ++Version: 6.2.0 ++Config Version: 23 ++Cluster Name: cluster66 ++Cluster Id: 22265 ++Cluster Member: Yes ++Cluster Generation: 3612 ++Membership state: Cluster-Member ++Nodes: 3 ++Expected votes: 3 ++Total votes: 3 ++Node votes: 1 ++Quorum: 2 ++Active subsystems: 8 ++Flags: ++Ports Bound: 0 ++Node name: rh66-node2 ++Node ID: 2 ++Multicast addresses: 239.192.86.80 ++Node addresses: 192.168.122.61 ++---Votes--- ++1 M 3 rh66-node1 ++2 M 2 rh66-node2 ++3 M 1 rh66-node3 ++""") ++ self.assertEqual(True, parsed["quorate"]) ++ self.assertEqual(2, parsed["quorum"]) ++ self.assertEqual( ++ [ ++ {"name": "rh66-node1", "votes": 3, "local": False}, ++ {"name": "rh66-node2", "votes": 2, "local": True}, ++ {"name": "rh66-node3", "votes": 1, "local": False}, ++ ], ++ parsed["node_list"] + ) ++ self.assertEqual([], parsed["qdevice_list"]) ++ ++ def test_no_quorate_no_qdevice(self): ++ parsed = utils.parse_cman_quorum_info("""\ ++Version: 6.2.0 ++Config Version: 23 ++Cluster Name: cluster66 ++Cluster Id: 22265 ++Cluster Member: Yes ++Cluster Generation: 3612 ++Membership state: Cluster-Member ++Nodes: 3 ++Expected votes: 3 ++Total votes: 3 ++Node votes: 1 ++Quorum: 2 Activity blocked ++Active subsystems: 8 ++Flags: ++Ports Bound: 0 ++Node name: rh66-node1 ++Node ID: 1 ++Multicast addresses: 239.192.86.80 ++Node addresses: 192.168.122.61 ++---Votes--- ++1 M 3 rh66-node1 ++2 X 2 rh66-node2 ++3 X 1 rh66-node3 ++""") ++ self.assertEqual(False, parsed["quorate"]) ++ self.assertEqual(2, parsed["quorum"]) + self.assertEqual( +- dom_get_child_elements(u)[1].getAttribute("value"), +- "90" ++ [ ++ {"name": "rh66-node1", "votes": 3, "local": True}, ++ ], ++ parsed["node_list"] + ) ++ self.assertEqual([], parsed["qdevice_list"]) ++ ++ def test_error_missing_quorum(self): ++ parsed = utils.parse_cman_quorum_info("""\ ++Version: 6.2.0 ++Config Version: 23 ++Cluster Name: cluster66 ++Cluster Id: 22265 ++Cluster Member: Yes ++Cluster Generation: 3612 ++Membership state: Cluster-Member ++Nodes: 3 ++Expected votes: 3 ++Total votes: 3 ++Node votes: 1 ++Quorum: ++Active subsystems: 8 ++Flags: ++Ports Bound: 0 ++Node name: rh66-node2 ++Node ID: 2 ++Multicast addresses: 239.192.86.80 ++Node addresses: 192.168.122.61 ++---Votes--- ++1 M 3 rh66-node1 ++2 M 2 rh66-node2 ++3 M 1 rh66-node3 ++""") ++ self.assertEqual(None, parsed) ++ ++ def test_error_quorum_garbage(self): ++ parsed = utils.parse_cman_quorum_info("""\ ++Version: 6.2.0 ++Config Version: 23 ++Cluster Name: cluster66 ++Cluster Id: 22265 ++Cluster Member: Yes ++Cluster Generation: 3612 ++Membership state: Cluster-Member ++Nodes: 3 ++Expected votes: 3 ++Total votes: 3 ++Node votes: 1 ++Quorum: Foo ++Active subsystems: 8 ++Flags: ++Ports Bound: 0 ++Node name: rh66-node2 ++Node ID: 2 ++Multicast addresses: 239.192.86.80 ++Node addresses: 192.168.122.61 ++---Votes--- ++1 M 3 rh66-node1 ++2 M 2 rh66-node2 ++3 M 1 rh66-node3 ++""") ++ self.assertEqual(None, parsed) ++ ++ def test_error_node_votes_garbage(self): ++ parsed = utils.parse_cman_quorum_info("""\ ++Version: 6.2.0 ++Config Version: 23 ++Cluster Name: cluster66 ++Cluster Id: 22265 ++Cluster Member: Yes ++Cluster Generation: 3612 ++Membership state: Cluster-Member ++Nodes: 3 ++Expected votes: 3 ++Total votes: 3 ++Node votes: 1 ++Quorum: 4 ++Active subsystems: 8 ++Flags: ++Ports Bound: 0 ++Node name: rh66-node2 ++Node ID: 2 ++Multicast addresses: 239.192.86.80 ++Node addresses: 192.168.122.61 ++---Votes--- ++1 M 3 rh66-node1 ++2 M Foo rh66-node2 ++3 M 1 rh66-node3 ++""") ++ self.assertEqual(None, parsed) + +- def test_dom_update_utilization_update_remove(self): +- el = xml.dom.minidom.parseString(""" +- <resource id="test_id"> +- <utilization id="test_id-utilization"> +- <nvpair id="test_id-utilization-key" name="key" value="-1"/> +- <nvpair id="test_id-utilization-keys" name="keys" value="90"/> +- </utilization> +- </resource> +- """).documentElement +- utils.dom_update_utilization( +- el, [("key", "100"), ("keys", "")] +- ) + +- u = dom_get_child_elements(el)[0] +- self.assertEqual(len(dom_get_child_elements(u)), 1) +- self.assertEqual( +- dom_get_child_elements(u)[0].getAttribute("id"), +- "test_id-utilization-key" +- ) +- self.assertEqual( +- dom_get_child_elements(u)[0].getAttribute("name"), +- "key" +- ) +- self.assertEqual( +- dom_get_child_elements(u)[0].getAttribute("value"), +- "100" +- ) ++class ParseQuorumtoolOutputTest(unittest.TestCase): ++ def test_error_empty_string(self): ++ parsed = utils.parse_quorumtool_output("") ++ self.assertEqual(None, parsed) + +- def test_dom_update_meta_attr_add(self): +- el = xml.dom.minidom.parseString(""" +- <resource id="test_id"/> +- """).documentElement +- utils.dom_update_meta_attr( +- el, [("name", ""), ("key", "test"), ("key2", "val")] +- ) ++ def test_quorate_no_qdevice(self): ++ parsed = utils.parse_quorumtool_output("""\ ++Quorum information ++------------------ ++Date: Fri Jan 16 13:03:28 2015 ++Quorum provider: corosync_votequorum ++Nodes: 3 ++Node ID: 1 ++Ring ID: 19860 ++Quorate: Yes + +- self.assertEqual(len(dom_get_child_elements(el)), 1) +- u = dom_get_child_elements(el)[0] +- self.assertEqual(u.tagName, "meta_attributes") +- self.assertEqual(u.getAttribute("id"), "test_id-meta_attributes") +- self.assertEqual(len(dom_get_child_elements(u)), 2) ++Votequorum information ++---------------------- ++Expected votes: 3 ++Highest expected: 3 ++Total votes: 3 ++Quorum: 2 ++Flags: Quorate + ++Membership information ++---------------------- ++ Nodeid Votes Qdevice Name ++ 1 3 NR rh70-node1 ++ 2 2 NR rh70-node2 (local) ++ 3 1 NR rh70-node3 ++""") ++ self.assertEqual(True, parsed["quorate"]) ++ self.assertEqual(2, parsed["quorum"]) + self.assertEqual( +- dom_get_child_elements(u)[0].getAttribute("id"), +- "test_id-meta_attributes-key" +- ) +- self.assertEqual( +- dom_get_child_elements(u)[0].getAttribute("name"), +- "key" +- ) +- self.assertEqual( +- dom_get_child_elements(u)[0].getAttribute("value"), +- "test" +- ) +- self.assertEqual( +- dom_get_child_elements(u)[1].getAttribute("id"), +- "test_id-meta_attributes-key2" ++ [ ++ {"name": "rh70-node1", "votes": 3, "local": False}, ++ {"name": "rh70-node2", "votes": 2, "local": True}, ++ {"name": "rh70-node3", "votes": 1, "local": False}, ++ ], ++ parsed["node_list"] + ) ++ self.assertEqual([], parsed["qdevice_list"]) ++ ++ def test_quorate_with_qdevice(self): ++ parsed = utils.parse_quorumtool_output("""\ ++Quorum information ++------------------ ++Date: Fri Jan 16 13:03:28 2015 ++Quorum provider: corosync_votequorum ++Nodes: 3 ++Node ID: 1 ++Ring ID: 19860 ++Quorate: Yes ++ ++Votequorum information ++---------------------- ++Expected votes: 10 ++Highest expected: 10 ++Total votes: 10 ++Quorum: 6 ++Flags: Quorate Qdevice ++ ++Membership information ++---------------------- ++ Nodeid Votes Qdevice Name ++ 1 3 A,V,MNW rh70-node1 ++ 2 2 A,V,MNW rh70-node2 (local) ++ 3 1 A,V,MNW rh70-node3 ++ 0 4 Qdevice ++""") ++ self.assertEqual(True, parsed["quorate"]) ++ self.assertEqual(6, parsed["quorum"]) + self.assertEqual( +- dom_get_child_elements(u)[1].getAttribute("name"), +- "key2" ++ [ ++ {"name": "rh70-node1", "votes": 3, "local": False}, ++ {"name": "rh70-node2", "votes": 2, "local": True}, ++ {"name": "rh70-node3", "votes": 1, "local": False}, ++ ], ++ parsed["node_list"] + ) + self.assertEqual( +- dom_get_child_elements(u)[1].getAttribute("value"), +- "val" ++ [ ++ {"name": "Qdevice", "votes": 4, "local": False}, ++ ], ++ parsed["qdevice_list"] + ) + +- def test_dom_update_meta_attr_update_remove(self): +- el = xml.dom.minidom.parseString(""" +- <resource id="test_id"> +- <meta_attributes id="test_id-utilization"> +- <nvpair id="test_id-meta_attributes-key" name="key" value="test"/> +- <nvpair id="test_id-meta_attributes-key2" name="key2" value="val"/> +- </meta_attributes> +- </resource> +- """).documentElement +- utils.dom_update_meta_attr( +- el, [("key", "another_val"), ("key2", "")] +- ) ++ def test_quorate_with_qdevice_lost(self): ++ parsed = utils.parse_quorumtool_output("""\ ++Quorum information ++------------------ ++Date: Fri Jan 16 13:03:28 2015 ++Quorum provider: corosync_votequorum ++Nodes: 3 ++Node ID: 1 ++Ring ID: 19860 ++Quorate: Yes + +- u = dom_get_child_elements(el)[0] +- self.assertEqual(len(dom_get_child_elements(u)), 1) +- self.assertEqual( +- dom_get_child_elements(u)[0].getAttribute("id"), +- "test_id-meta_attributes-key" +- ) ++Votequorum information ++---------------------- ++Expected votes: 10 ++Highest expected: 10 ++Total votes: 6 ++Quorum: 6 ++Flags: Quorate Qdevice ++ ++Membership information ++---------------------- ++ Nodeid Votes Qdevice Name ++ 1 3 NA,V,MNW rh70-node1 ++ 2 2 NA,V,MNW rh70-node2 (local) ++ 3 1 NA,V,MNW rh70-node3 ++ 0 0 Qdevice (votes 4) ++""") ++ self.assertEqual(True, parsed["quorate"]) ++ self.assertEqual(6, parsed["quorum"]) + self.assertEqual( +- dom_get_child_elements(u)[0].getAttribute("name"), +- "key" ++ [ ++ {"name": "rh70-node1", "votes": 3, "local": False}, ++ {"name": "rh70-node2", "votes": 2, "local": True}, ++ {"name": "rh70-node3", "votes": 1, "local": False}, ++ ], ++ parsed["node_list"] + ) + self.assertEqual( +- dom_get_child_elements(u)[0].getAttribute("value"), +- "another_val" ++ [ ++ {"name": "Qdevice", "votes": 0, "local": False}, ++ ], ++ parsed["qdevice_list"] + ) + +- def test_get_utilization(self): +- el = xml.dom.minidom.parseString(""" +- <resource id="test_id"> +- <utilization id="test_id-utilization"> +- <nvpair id="test_id-utilization-key" name="key" value="-1"/> +- <nvpair id="test_id-utilization-keys" name="keys" value="90"/> +- </utilization> +- </resource> +- """).documentElement +- self.assertEqual({"key": "-1", "keys": "90"}, utils.get_utilization(el)) +- +- def test_get_utilization_str(self): +- el = xml.dom.minidom.parseString(""" +- <resource id="test_id"> +- <utilization id="test_id-utilization"> +- <nvpair id="test_id-utilization-key" name="key" value="-1"/> +- <nvpair id="test_id-utilization-keys" name="keys" value="90"/> +- </utilization> +- </resource> +- """).documentElement +- self.assertEqual("key=-1 keys=90", utils.get_utilization_str(el)) +- +- def test_get_cluster_property_from_xml_enum(self): +- el = ET.fromstring(""" +- <parameter name="no-quorum-policy" unique="0"> +- <shortdesc lang="en">What to do when the cluster does not have quorum</shortdesc> +- <content type="enum" default="stop"/> +- <longdesc lang="en">What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, suicide</longdesc> +- </parameter> +- """) +- expected = { +- "name": "no-quorum-policy", +- "shortdesc": "What to do when the cluster does not have quorum", +- "longdesc": "", +- "type": "enum", +- "default": "stop", +- "enum": ["stop", "freeze", "ignore", "suicide"] +- } +- self.assertEqual(expected, utils.get_cluster_property_from_xml(el)) +- +- def test_get_cluster_property_from_xml(self): +- el = ET.fromstring(""" +- <parameter name="default-resource-stickiness" unique="0"> +- <shortdesc lang="en"></shortdesc> +- <content type="integer" default="0"/> +- <longdesc lang="en"></longdesc> +- </parameter> +- """) +- expected = { +- "name": "default-resource-stickiness", +- "shortdesc": "", +- "longdesc": "", +- "type": "integer", +- "default": "0" +- } +- self.assertEqual(expected, utils.get_cluster_property_from_xml(el)) +- +- def test_get_cluster_property_default(self): +- definition = { +- "default-resource-stickiness": { +- "name": "default-resource-stickiness", +- "shortdesc": "", +- "longdesc": "", +- "type": "integer", +- "default": "0", +- "source": "pengine" +- }, +- "no-quorum-policy": { +- "name": "no-quorum-policy", +- "shortdesc": "What to do when the cluster does not have quorum", +- "longdesc": "What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, suicide", +- "type": "enum", +- "default": "stop", +- "enum": ["stop", "freeze", "ignore", "suicide"], +- "source": "pengine" +- }, +- "enable-acl": { +- "name": "enable-acl", +- "shortdesc": "Enable CIB ACL", +- "longdesc": "Enable CIB ACL", +- "type": "boolean", +- "default": "false", +- "source": "cib" +- } +- } ++ def test_no_quorate_no_qdevice(self): ++ parsed = utils.parse_quorumtool_output("""\ ++Quorum information ++------------------ ++Date: Fri Jan 16 13:03:35 2015 ++Quorum provider: corosync_votequorum ++Nodes: 1 ++Node ID: 1 ++Ring ID: 19868 ++Quorate: No ++ ++Votequorum information ++---------------------- ++Expected votes: 3 ++Highest expected: 3 ++Total votes: 1 ++Quorum: 2 Activity blocked ++Flags: ++ ++Membership information ++---------------------- ++ Nodeid Votes Qdevice Name ++ 1 1 NR rh70-node1 (local) ++""") ++ self.assertEqual(False, parsed["quorate"]) ++ self.assertEqual(2, parsed["quorum"]) + self.assertEqual( +- utils.get_cluster_property_default( +- definition, "default-resource-stickiness" +- ), +- "0" ++ [ ++ {"name": "rh70-node1", "votes": 1, "local": True}, ++ ], ++ parsed["node_list"] + ) ++ self.assertEqual([], parsed["qdevice_list"]) ++ ++ def test_no_quorate_with_qdevice(self): ++ parsed = utils.parse_quorumtool_output("""\ ++Quorum information ++------------------ ++Date: Fri Jan 16 13:03:35 2015 ++Quorum provider: corosync_votequorum ++Nodes: 1 ++Node ID: 1 ++Ring ID: 19868 ++Quorate: No ++ ++Votequorum information ++---------------------- ++Expected votes: 3 ++Highest expected: 3 ++Total votes: 1 ++Quorum: 2 Activity blocked ++Flags: Qdevice ++ ++Membership information ++---------------------- ++ Nodeid Votes Qdevice Name ++ 1 1 NR rh70-node1 (local) ++ 0 0 Qdevice (votes 1) ++""") ++ self.assertEqual(False, parsed["quorate"]) ++ self.assertEqual(2, parsed["quorum"]) + self.assertEqual( +- utils.get_cluster_property_default(definition, "no-quorum-policy"), +- "stop" ++ [ ++ {"name": "rh70-node1", "votes": 1, "local": True}, ++ ], ++ parsed["node_list"] + ) + self.assertEqual( +- utils.get_cluster_property_default(definition, "enable-acl"), +- "false" +- ) +- self.assertRaises( +- utils.UnknownPropertyException, +- utils.get_cluster_property_default, definition, "non-existing" ++ [ ++ {"name": "Qdevice", "votes": 0, "local": False}, ++ ], ++ parsed["qdevice_list"] + ) + +- def test_is_valid_cib_value_unknown_type(self): +- # should be always true +- self.assertTrue(utils.is_valid_cib_value("unknown", "test")) +- self.assertTrue(utils.is_valid_cib_value("string", "string value")) ++ def test_error_missing_quorum(self): ++ parsed = utils.parse_quorumtool_output("""\ ++Quorum information ++------------------ ++Date: Fri Jan 16 13:03:28 2015 ++Quorum provider: corosync_votequorum ++Nodes: 3 ++Node ID: 1 ++Ring ID: 19860 ++Quorate: Yes + +- def test_is_valid_cib_value_integer(self): +- self.assertTrue(utils.is_valid_cib_value("integer", "0")) +- self.assertTrue(utils.is_valid_cib_value("integer", "42")) +- self.assertTrue(utils.is_valid_cib_value("integer", "-90")) +- self.assertTrue(utils.is_valid_cib_value("integer", "+90")) +- self.assertTrue(utils.is_valid_cib_value("integer", "INFINITY")) +- self.assertTrue(utils.is_valid_cib_value("integer", "-INFINITY")) +- self.assertTrue(utils.is_valid_cib_value("integer", "+INFINITY")) +- self.assertFalse(utils.is_valid_cib_value("integer", "0.0")) +- self.assertFalse(utils.is_valid_cib_value("integer", "-10.9")) +- self.assertFalse(utils.is_valid_cib_value("integer", "string")) ++Votequorum information ++---------------------- ++Expected votes: 3 ++Highest expected: 3 ++Total votes: 3 ++Quorum: ++Flags: Quorate + +- def test_is_valid_cib_value_enum(self): +- self.assertTrue( +- utils.is_valid_cib_value("enum", "this", ["another", "this", "1"]) +- ) +- self.assertFalse( +- utils.is_valid_cib_value("enum", "this", ["another", "this_not"]) +- ) +- self.assertFalse(utils.is_valid_cib_value("enum", "this", [])) +- self.assertFalse(utils.is_valid_cib_value("enum", "this")) ++Membership information ++---------------------- ++ Nodeid Votes Qdevice Name ++ 1 1 NR rh70-node1 (local) ++ 2 1 NR rh70-node2 ++ 3 1 NR rh70-node3 ++""") ++ self.assertEqual(None, parsed) + +- def test_is_valid_cib_value_boolean(self): +- self.assertTrue(utils.is_valid_cib_value("boolean", "true")) +- self.assertTrue(utils.is_valid_cib_value("boolean", "TrUe")) +- self.assertTrue(utils.is_valid_cib_value("boolean", "TRUE")) +- self.assertTrue(utils.is_valid_cib_value("boolean", "yes")) +- self.assertTrue(utils.is_valid_cib_value("boolean", "on")) +- self.assertTrue(utils.is_valid_cib_value("boolean", "y")) +- self.assertTrue(utils.is_valid_cib_value("boolean", "Y")) +- self.assertTrue(utils.is_valid_cib_value("boolean", "1")) +- self.assertTrue(utils.is_valid_cib_value("boolean", "false")) +- self.assertTrue(utils.is_valid_cib_value("boolean", "FaLse")) +- self.assertTrue(utils.is_valid_cib_value("boolean", "FALSE")) +- self.assertTrue(utils.is_valid_cib_value("boolean", "off")) +- self.assertTrue(utils.is_valid_cib_value("boolean", "no")) +- self.assertTrue(utils.is_valid_cib_value("boolean", "N")) +- self.assertTrue(utils.is_valid_cib_value("boolean", "n")) +- self.assertTrue(utils.is_valid_cib_value("boolean", "0")) +- self.assertFalse(utils.is_valid_cib_value("boolean", "-1")) +- self.assertFalse(utils.is_valid_cib_value("boolean", "not")) +- self.assertFalse(utils.is_valid_cib_value("boolean", "random_string")) +- self.assertFalse(utils.is_valid_cib_value("boolean", "truth")) ++ def test_error_quorum_garbage(self): ++ parsed = utils.parse_quorumtool_output("""\ ++Quorum information ++------------------ ++Date: Fri Jan 16 13:03:28 2015 ++Quorum provider: corosync_votequorum ++Nodes: 3 ++Node ID: 1 ++Ring ID: 19860 ++Quorate: Yes + +- def test_is_valid_cib_value_time(self): +- self.assertTrue(utils.is_valid_cib_value("time", "10")) +- self.assertTrue(utils.is_valid_cib_value("time", "0")) +- self.assertTrue(utils.is_valid_cib_value("time", "9s")) +- self.assertTrue(utils.is_valid_cib_value("time", "10sec")) +- self.assertTrue(utils.is_valid_cib_value("time", "10min")) +- self.assertTrue(utils.is_valid_cib_value("time", "10m")) +- self.assertTrue(utils.is_valid_cib_value("time", "10h")) +- self.assertTrue(utils.is_valid_cib_value("time", "10hr")) +- self.assertFalse(utils.is_valid_cib_value("time", "5.2")) +- self.assertFalse(utils.is_valid_cib_value("time", "-10")) +- self.assertFalse(utils.is_valid_cib_value("time", "10m 2s")) +- self.assertFalse(utils.is_valid_cib_value("time", "hour")) +- self.assertFalse(utils.is_valid_cib_value("time", "day")) ++Votequorum information ++---------------------- ++Expected votes: 3 ++Highest expected: 3 ++Total votes: 3 ++Quorum: Foo ++Flags: Quorate + +- def test_validate_cluster_property(self): +- definition = { +- "default-resource-stickiness": { +- "name": "default-resource-stickiness", +- "shortdesc": "", +- "longdesc": "", +- "type": "integer", +- "default": "0", +- "source": "pengine" +- }, +- "no-quorum-policy": { +- "name": "no-quorum-policy", +- "shortdesc": "What to do when the cluster does not have quorum", +- "longdesc": "What to do when the cluster does not have quorum Allowed values: stop, freeze, ignore, suicide", +- "type": "enum", +- "default": "stop", +- "enum": ["stop", "freeze", "ignore", "suicide"], +- "source": "pengine" +- }, +- "enable-acl": { +- "name": "enable-acl", +- "shortdesc": "Enable CIB ACL", +- "longdesc": "Enable CIB ACL", +- "type": "boolean", +- "default": "false", +- "source": "cib" +- } +- } +- self.assertTrue(utils.is_valid_cluster_property( +- definition, "default-resource-stickiness", "10" +- )) +- self.assertTrue(utils.is_valid_cluster_property( +- definition, "default-resource-stickiness", "-1" +- )) +- self.assertTrue(utils.is_valid_cluster_property( +- definition, "no-quorum-policy", "freeze" +- )) +- self.assertTrue(utils.is_valid_cluster_property( +- definition, "no-quorum-policy", "suicide" +- )) +- self.assertTrue(utils.is_valid_cluster_property( +- definition, "enable-acl", "true" +- )) +- self.assertTrue(utils.is_valid_cluster_property( +- definition, "enable-acl", "false" +- )) +- self.assertTrue(utils.is_valid_cluster_property( +- definition, "enable-acl", "on" +- )) +- self.assertTrue(utils.is_valid_cluster_property( +- definition, "enable-acl", "OFF" +- )) +- self.assertFalse(utils.is_valid_cluster_property( +- definition, "default-resource-stickiness", "test" +- )) +- self.assertFalse(utils.is_valid_cluster_property( +- definition, "default-resource-stickiness", "1.2" +- )) +- self.assertFalse(utils.is_valid_cluster_property( +- definition, "no-quorum-policy", "invalid" +- )) +- self.assertFalse(utils.is_valid_cluster_property( +- definition, "enable-acl", "not" +- )) +- self.assertRaises( +- utils.UnknownPropertyException, +- utils.is_valid_cluster_property, definition, "unknown", "value" +- ) ++Membership information ++---------------------- ++ Nodeid Votes Qdevice Name ++ 1 1 NR rh70-node1 (local) ++ 2 1 NR rh70-node2 ++ 3 1 NR rh70-node3 ++""") ++ self.assertEqual(None, parsed) ++ ++ def test_error_node_votes_garbage(self): ++ parsed = utils.parse_quorumtool_output("""\ ++Quorum information ++------------------ ++Date: Fri Jan 16 13:03:28 2015 ++Quorum provider: corosync_votequorum ++Nodes: 3 ++Node ID: 1 ++Ring ID: 19860 ++Quorate: Yes ++ ++Votequorum information ++---------------------- ++Expected votes: 3 ++Highest expected: 3 ++Total votes: 3 ++Quorum: 2 ++Flags: Quorate + +- def assert_element_id(self, node, node_id): +- self.assertTrue( +- isinstance(node, xml.dom.minidom.Element), +- "element with id '%s' not found" % node_id ++Membership information ++---------------------- ++ Nodeid Votes Qdevice Name ++ 1 1 NR rh70-node1 (local) ++ 2 foo NR rh70-node2 ++ 3 1 NR rh70-node3 ++""") ++ self.assertEqual(None, parsed) ++ ++ ++class IsNodeStopCauseQuorumLossTest(unittest.TestCase): ++ def test_not_quorate(self): ++ quorum_info = { ++ "quorate": False, ++ } ++ self.assertEqual( ++ False, ++ utils.is_node_stop_cause_quorum_loss(quorum_info, True) + ) +- self.assertEqual(node.getAttribute("id"), node_id) + +-class RunParallelTest(unittest.TestCase): +- def fixture_create_worker(self, log, name, sleepSeconds=0): +- def worker(): +- sleep(sleepSeconds) +- log.append(name) +- return worker ++ def test_local_node_not_in_list(self): ++ quorum_info = { ++ "quorate": True, ++ "quorum": 1, ++ "node_list": [ ++ {"name": "rh70-node3", "votes": 1, "local": False}, ++ ], ++ "qdevice_list": [], ++ } ++ self.assertEqual( ++ False, ++ utils.is_node_stop_cause_quorum_loss(quorum_info, True) ++ ) + +- def test_run_all_workers(self): +- log = [] +- utils.run_parallel( +- [ +- self.fixture_create_worker(log, 'first'), +- self.fixture_create_worker(log, 'second'), ++ def test_local_node_alone_in_list(self): ++ quorum_info = { ++ "quorate": True, ++ "quorum": 1, ++ "node_list": [ ++ {"name": "rh70-node3", "votes": 1, "local": True}, + ], +- wait_seconds=.1 ++ "qdevice_list": [], ++ } ++ self.assertEqual( ++ True, ++ utils.is_node_stop_cause_quorum_loss(quorum_info, True) + ) + +- self.assertEqual(log, ['first', 'second']) ++ def test_local_node_still_quorate(self): ++ quorum_info = { ++ "quorate": True, ++ "quorum": 4, ++ "node_list": [ ++ {"name": "rh70-node1", "votes": 3, "local": False}, ++ {"name": "rh70-node2", "votes": 2, "local": False}, ++ {"name": "rh70-node3", "votes": 1, "local": True}, ++ ], ++ "qdevice_list": [], ++ } ++ self.assertEqual( ++ False, ++ utils.is_node_stop_cause_quorum_loss(quorum_info, True) ++ ) + +- def test_wait_for_slower_workers(self): +- log = [] +- utils.run_parallel( +- [ +- self.fixture_create_worker(log, 'first', .03), +- self.fixture_create_worker(log, 'second'), ++ quorum_info = { ++ "quorate": True, ++ "quorum": 4, ++ "node_list": [ ++ {"name": "rh70-node1", "votes": 3, "local": False}, ++ {"name": "rh70-node2", "votes": 2, "local": True}, ++ {"name": "rh70-node3", "votes": 1, "local": False}, + ], +- wait_seconds=.01 ++ "qdevice_list": [], ++ } ++ self.assertEqual( ++ False, ++ utils.is_node_stop_cause_quorum_loss(quorum_info, True) + ) + +- self.assertEqual(log, ['second', 'first']) ++ def test_local_node_quorum_loss(self): ++ quorum_info = { ++ "quorate": True, ++ "quorum": 4, ++ "node_list": [ ++ {"name": "rh70-node1", "votes": 3, "local": True}, ++ {"name": "rh70-node2", "votes": 2, "local": False}, ++ {"name": "rh70-node3", "votes": 1, "local": False}, ++ ], ++ "qdevice_list": [], ++ } ++ self.assertEqual( ++ True, ++ utils.is_node_stop_cause_quorum_loss(quorum_info, True) ++ ) + +-class PrepareNodeNamesTest(unittest.TestCase): +- def test_return_original_when_is_in_pacemaker_nodes(self): +- node = 'test' ++ def test_one_node_still_quorate(self): ++ quorum_info = { ++ "quorate": True, ++ "quorum": 4, ++ "node_list": [ ++ {"name": "rh70-node1", "votes": 3, "local": True}, ++ {"name": "rh70-node2", "votes": 2, "local": False}, ++ {"name": "rh70-node3", "votes": 1, "local": False}, ++ ], ++ "qdevice_list": [], ++ } + self.assertEqual( +- node, +- utils.prepare_node_name(node, {1: node}, {}) ++ False, ++ utils.is_node_stop_cause_quorum_loss( ++ quorum_info, False, ["rh70-node3"] ++ ) + ) + +- def test_return_original_when_is_not_in_corosync_nodes(self): +- node = 'test' ++ quorum_info = { ++ "quorate": True, ++ "quorum": 4, ++ "node_list": [ ++ {"name": "rh70-node1", "votes": 3, "local": True}, ++ {"name": "rh70-node2", "votes": 2, "local": False}, ++ {"name": "rh70-node3", "votes": 1, "local": False}, ++ ], ++ "qdevice_list": [], ++ } + self.assertEqual( +- node, +- utils.prepare_node_name(node, {}, {}) ++ False, ++ utils.is_node_stop_cause_quorum_loss( ++ quorum_info, False, ["rh70-node2"] ++ ) + ) + +- def test_return_original_when_corosync_id_not_in_pacemaker(self): +- node = 'test' ++ def test_one_node_quorum_loss(self): ++ quorum_info = { ++ "quorate": True, ++ "quorum": 4, ++ "node_list": [ ++ {"name": "rh70-node1", "votes": 3, "local": True}, ++ {"name": "rh70-node2", "votes": 2, "local": False}, ++ {"name": "rh70-node3", "votes": 1, "local": False}, ++ ], ++ "qdevice_list": [], ++ } + self.assertEqual( +- node, +- utils.prepare_node_name(node, {}, {1: node}) ++ True, ++ utils.is_node_stop_cause_quorum_loss( ++ quorum_info, False, ["rh70-node1"] ++ ) + ) + +- def test_return_modified_name(self): +- node = 'test' ++ def test_more_nodes_still_quorate(self): ++ quorum_info = { ++ "quorate": True, ++ "quorum": 4, ++ "node_list": [ ++ {"name": "rh70-node1", "votes": 4, "local": True}, ++ {"name": "rh70-node2", "votes": 1, "local": False}, ++ {"name": "rh70-node3", "votes": 1, "local": False}, ++ ], ++ "qdevice_list": [], ++ } + self.assertEqual( +- 'another (test)', +- utils.prepare_node_name(node, {1: 'another'}, {1: node}) ++ False, ++ utils.is_node_stop_cause_quorum_loss( ++ quorum_info, False, ["rh70-node2", "rh70-node3"] ++ ) + ) + +- def test_return_modified_name_with_pm_null_case(self): +- node = 'test' ++ def test_more_nodes_quorum_loss(self): ++ quorum_info = { ++ "quorate": True, ++ "quorum": 4, ++ "node_list": [ ++ {"name": "rh70-node1", "votes": 3, "local": True}, ++ {"name": "rh70-node2", "votes": 2, "local": False}, ++ {"name": "rh70-node3", "votes": 1, "local": False}, ++ ], ++ "qdevice_list": [], ++ } + self.assertEqual( +- '*Unknown* (test)', +- utils.prepare_node_name(node, {1: '(null)'}, {1: node}) ++ True, ++ utils.is_node_stop_cause_quorum_loss( ++ quorum_info, False, ["rh70-node2", "rh70-node3"] ++ ) + ) + +-class NodeActionTaskTest(unittest.TestCase): +- def test_can_run_action(self): +- def action(node, arg, kwarg=None): +- return (0, ':'.join([node, arg, kwarg])) ++ def test_qdevice_still_quorate(self): ++ quorum_info = { ++ "quorate": True, ++ "quorum": 3, ++ "node_list": [ ++ {"name": "rh70-node1", "votes": 1, "local": True}, ++ {"name": "rh70-node2", "votes": 1, "local": False}, ++ {"name": "rh70-node3", "votes": 1, "local": False}, ++ ], ++ "qdevice_list": [ ++ {"name": "Qdevice", "votes": 1, "local": False}, ++ ], ++ } ++ self.assertEqual( ++ False, ++ utils.is_node_stop_cause_quorum_loss( ++ quorum_info, False, ["rh70-node2"] ++ ) ++ ) + +- report_list = [] +- def report(node, returncode, output): +- report_list.append('|'.join([node, str(returncode), output])) ++ def test_qdevice_quorum_lost(self): ++ quorum_info = { ++ "quorate": True, ++ "quorum": 3, ++ "node_list": [ ++ {"name": "rh70-node1", "votes": 1, "local": True}, ++ {"name": "rh70-node2", "votes": 1, "local": False}, ++ {"name": "rh70-node3", "votes": 1, "local": False}, ++ ], ++ "qdevice_list": [ ++ {"name": "Qdevice", "votes": 1, "local": False}, ++ ], ++ } ++ self.assertEqual( ++ True, ++ utils.is_node_stop_cause_quorum_loss( ++ quorum_info, False, ["rh70-node2", "rh70-node3"] ++ ) ++ ) + +- task = utils.create_task(report, action, 'node', 'arg', kwarg='kwarg') +- task() ++ def test_qdevice_lost_still_quorate(self): ++ quorum_info = { ++ "quorate": True, ++ "quorum": 4, # expect qdevice votes == 1 ++ "node_list": [ ++ {"name": "rh70-node1", "votes": 2, "local": True}, ++ {"name": "rh70-node2", "votes": 2, "local": False}, ++ {"name": "rh70-node3", "votes": 2, "local": False}, ++ ], ++ "qdevice_list": [ ++ {"name": "Qdevice", "votes": 0, "local": False}, ++ ], ++ } ++ self.assertEqual( ++ False, ++ utils.is_node_stop_cause_quorum_loss( ++ quorum_info, False, ["rh70-node2"] ++ ) ++ ) + +- self.assertEqual(['node|0|node:arg:kwarg'], report_list) ++ def test_qdevice_lost_quorum_lost(self): ++ quorum_info = { ++ "quorate": True, ++ "quorum": 4, # expect qdevice votes == 1 ++ "node_list": [ ++ {"name": "rh70-node1", "votes": 2, "local": True}, ++ {"name": "rh70-node2", "votes": 2, "local": False}, ++ {"name": "rh70-node3", "votes": 2, "local": False}, ++ ], ++ "qdevice_list": [ ++ {"name": "Qdevice", "votes": 0, "local": False}, ++ ], ++ } ++ self.assertEqual( ++ True, ++ utils.is_node_stop_cause_quorum_loss( ++ quorum_info, False, ["rh70-node2", "rh70-node3"] ++ ) ++ ) +diff --git a/pcs/usage.py b/pcs/usage.py +index 8ae6839..42e03e6 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -1272,14 +1272,20 @@ Commands: + def qdevice(args=[], pout=True): + output = """ + Usage: pcs qdevice <command> +-Manage quorum device provider on the local host ++Manage quorum device provider on the local host, currently only 'net' model is ++supported. + + Commands: ++ status <device model> [--full] [<cluster name>] ++ Show runtime status of specified model of quorum device provider. Using ++ --full will give more detailed output. If <cluster name> is specified, ++ only information about the specified cluster will be displayed. ++ + setup model <device model> [--enable] [--start] + Configure specified model of quorum device provider. Quorum device then +- may be added to clusters by "pcs quorum device add" command. +- --start will also start the provider. --enable will configure +- the provider to start on boot. ++ can be added to clusters by running "pcs quorum device add" command ++ in a cluster. --start will also start the provider. --enable will ++ configure the provider to start on boot. + + destroy <device model> + Disable and stop specified model of quorum device provider and delete +@@ -1292,8 +1298,10 @@ Commands: + Stop specified model of quorum device provider. + + kill <device model> +- Force specified model of quorum device provider to stop (performs +- kill -9). ++ Force specified model of quorum device provider to stop (performs kill ++ -9). Note that init system (e.g. systemd) can detect that the qdevice ++ is not running and start it again. If you want to stop the qdevice, run ++ "pcs qdevice stop" command. + + enable <device model> + Configure specified model of quorum device provider to start on boot. +@@ -1310,21 +1318,38 @@ Commands: + def quorum(args=[], pout=True): + output = """ + Usage: pcs quorum <command> +-Manage cluster quorum settings ++Manage cluster quorum settings. + + Commands: + config + Show quorum configuration. + +- device add [generic options] model <device model> [model options] +- Add quorum device to cluster. Quorum device needs to be created first +- by "pcs qdevice setup" command. ++ status ++ Show quorum runtime status. ++ ++ device add [<generic options>] model <device model> [<model options>] ++ Add a quorum device to the cluster. Quorum device needs to be created ++ first by "pcs qdevice setup" command. It is not possible to use more ++ than one quorum device in a cluster simultaneously. Generic options, ++ model and model options are all documented in corosync's ++ corosync-qdevice(8) man page. + + device remove +- Remove quorum device from cluster. ++ Remove a quorum device from the cluster. ++ ++ device status [--full] ++ Show quorum device runtime status. Using --full will give more detailed ++ output. ++ ++ device update [<generic options>] [model <model options>] ++ Add/Change quorum device options. Generic options and model options are ++ all documented in corosync's corosync-qdevice(8) man page. Requires ++ the cluster to be stopped. + +- device update [generic options] [model <model options>] +- Add/Change quorum device options. Requires cluster to be stopped. ++ WARNING: If you want to change "host" option of qdevice model net, use ++ "pcs quorum device remove" and "pcs quorum device add" commands ++ to set up configuration properly unless old and new host is the same ++ machine. + + unblock [--force] + Cancel waiting for all nodes when establishing quorum. Useful in +@@ -1343,7 +1368,7 @@ Commands: + [last_man_standing_window=[<time in ms>]] [wait_for_all=[0|1]] + Add/Change quorum options. At least one option must be specified. + Options are documented in corosync's votequorum(5) man page. Requires +- cluster to be stopped. ++ the cluster to be stopped. + """ + if pout: + print(sub_usage(args, output)) +diff --git a/pcs/utils.py b/pcs/utils.py +index f9cdb1c..171fbdd 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -56,7 +56,6 @@ except ImportError: + + + from pcs import settings, usage +-from pcs.common import report_codes + from pcs.cli.common.reports import ( + process_library_reports, + LibraryReportProcessorToConsole as LibraryReportProcessorToConsole, +@@ -64,18 +63,21 @@ from pcs.cli.common.reports import ( + from pcs.common.tools import simple_cache + from pcs.lib import reports + from pcs.lib.env import LibraryEnvironment +-from pcs.lib.errors import LibraryError, ReportItemSeverity +-import pcs.lib.corosync.config_parser as corosync_conf_parser ++from pcs.lib.errors import LibraryError + from pcs.lib.external import ( +- is_cman_cluster, + CommandRunner, +- is_service_running, +- is_service_enabled, ++ is_cman_cluster, + is_systemctl, ++ is_service_enabled, ++ is_service_running, ++ disable_service, ++ DisableServiceError, ++ enable_service, ++ EnableServiceError, + ) + import pcs.lib.resource_agent as lib_ra ++import pcs.lib.corosync.config_parser as corosync_conf_parser + from pcs.lib.corosync.config_facade import ConfigFacade as corosync_conf_facade +-from pcs.lib.nodes_task import check_corosync_offline_on_nodes + from pcs.lib.pacemaker import has_resource_wait_support + from pcs.lib.pacemaker_state import ClusterState + from pcs.lib.pacemaker_values import( +@@ -686,50 +688,18 @@ def autoset_2node_corosync(corosync_conf): + facade._ConfigFacade__update_two_node() + return facade.config + +-# when adding or removing a node, changing number of nodes to or from two, +-# we need to change qdevice algorith lms <-> 2nodelms, which cannot be done when +-# the cluster is running +-def check_qdevice_algorithm_and_running_cluster(corosync_conf, add=True): ++# is it needed to handle corosync-qdevice service when managing cluster services ++def need_to_handle_qdevice_service(): + if is_rhel6(): +- return +- facade = corosync_conf_facade.from_string(corosync_conf) +- if not facade.has_quorum_device(): +- return +- node_list = facade.get_nodes() +- node_count_target = len(node_list) + (1 if add else -1) +- model, model_opts, dummy_generic_opts = facade.get_quorum_device_settings() +- if model != "net": +- return +- algorithm = model_opts.get("algorithm", "") +- need_stopped = ( +- (algorithm == "lms" and node_count_target == 2) +- or +- (algorithm == "2nodelms" and node_count_target != 2) +- ) +- if not need_stopped: +- return +- ++ return False + try: +- lib_env = get_lib_env() +- check_corosync_offline_on_nodes( +- lib_env.node_communicator(), +- lib_env.report_processor, +- node_list, +- get_modificators()["skip_offline_nodes"] ++ cfg = corosync_conf_facade.from_string( ++ open(settings.corosync_conf_file).read() + ) +- except LibraryError as e: +- report_item_list = list(e.args) +- for report_item in report_item_list: +- if ( +- report_item.code == report_codes.COROSYNC_RUNNING_ON_NODE +- and +- report_item.severity == ReportItemSeverity.ERROR +- ): +- report_item_list.append( +- reports.qdevice_remove_or_cluster_stop_needed() +- ) +- break +- process_library_reports(report_item_list) ++ return cfg.has_quorum_device() ++ except (EnvironmentError, corosync_conf_parser.CorosyncConfParserException): ++ # corosync.conf not present or not valid => no qdevice specified ++ return False + + def getNextNodeID(corosync_conf): + currentNodes = [] +@@ -2070,28 +2040,43 @@ def serviceStatus(prefix): + pass + + def enableServices(): ++ # do NOT handle SBD in here, it is started by pacemaker not systemd or init + if is_rhel6(): +- run(["chkconfig", "pacemaker", "on"]) ++ service_list = ["pacemaker"] + else: +- if is_systemctl(): +- run(["systemctl", "enable", "corosync.service"]) +- run(["systemctl", "enable", "pacemaker.service"]) +- else: +- run(["chkconfig", "corosync", "on"]) +- run(["chkconfig", "pacemaker", "on"]) ++ service_list = ["corosync", "pacemaker"] ++ if need_to_handle_qdevice_service(): ++ service_list.append("corosync-qdevice") ++ ++ report_item_list = [] ++ for service in service_list: ++ try: ++ enable_service(cmd_runner(), service) ++ except EnableServiceError as e: ++ report_item_list.append( ++ reports.service_enable_error(e.service, e.message) ++ ) ++ if report_item_list: ++ raise LibraryError(*report_item_list) + + def disableServices(): +- if is_rhel6(): +- run(["chkconfig", "pacemaker", "off"]) +- run(["chkconfig", "corosync", "off"]) # Left here for users of old pcs +- # which enabled corosync +- else: +- if is_systemctl(): +- run(["systemctl", "disable", "corosync.service"]) +- run(["systemctl", "disable", "pacemaker.service"]) +- else: +- run(["chkconfig", "corosync", "off"]) +- run(["chkconfig", "pacemaker", "off"]) ++ # Disable corosync on RHEL6 as well - left here for users of old pcs which ++ # enabled corosync. ++ # do NOT handle SBD in here, it is started by pacemaker not systemd or init ++ service_list = ["corosync", "pacemaker"] ++ if need_to_handle_qdevice_service(): ++ service_list.append("corosync-qdevice") ++ ++ report_item_list = [] ++ for service in service_list: ++ try: ++ disable_service(cmd_runner(), service) ++ except DisableServiceError as e: ++ report_item_list.append( ++ reports.service_disable_error(e.service, e.message) ++ ) ++ if report_item_list: ++ raise LibraryError(*report_item_list) + + def write_file(path, data, permissions=0o644, binary=False): + if os.path.exists(path): +@@ -2248,7 +2233,7 @@ def parse_cman_quorum_info(cman_info): + in_node_list = False + local_node_id = "" + try: +- for line in cman_info.split("\n"): ++ for line in cman_info.splitlines(): + line = line.strip() + if not line: + continue +@@ -2260,12 +2245,13 @@ def parse_cman_quorum_info(cman_info): + parsed["node_list"].append({ + "name": parts[3], + "votes": int(parts[2]), +- "local": local_node_id == parts[0] ++ "local": local_node_id == parts[0], + }) + else: + if line == "---Votes---": + in_node_list = True + parsed["node_list"] = [] ++ parsed["qdevice_list"] = [] + continue + if not ":" in line: + continue +@@ -2290,7 +2276,7 @@ def parse_quorumtool_output(quorumtool_output): + parsed = {} + in_node_list = False + try: +- for line in quorumtool_output.split("\n"): ++ for line in quorumtool_output.splitlines(): + line = line.strip() + if not line: + continue +@@ -2299,15 +2285,25 @@ def parse_quorumtool_output(quorumtool_output): + # skip headers + continue + parts = line.split() +- parsed["node_list"].append({ +- "name": parts[3], +- "votes": int(parts[1]), +- "local": len(parts) > 4 and parts[4] == "(local)" +- }) ++ if parts[0] == "0": ++ # this line has nodeid == 0, this is a qdevice line ++ parsed["qdevice_list"].append({ ++ "name": parts[2], ++ "votes": int(parts[1]), ++ "local": False, ++ }) ++ else: ++ # this line has non-zero nodeid, this is a node line ++ parsed["node_list"].append({ ++ "name": parts[3], ++ "votes": int(parts[1]), ++ "local": len(parts) > 4 and parts[4] == "(local)", ++ }) + else: + if line == "Membership information": + in_node_list = True + parsed["node_list"] = [] ++ parsed["qdevice_list"] = [] + continue + if not ":" in line: + continue +@@ -2340,6 +2336,8 @@ def is_node_stop_cause_quorum_loss(quorum_info, local=True, node_list=None): + if node_list and node_info["name"] in node_list: + continue + votes_after_stop += node_info["votes"] ++ for qdevice_info in quorum_info.get("qdevice_list", []): ++ votes_after_stop += qdevice_info["votes"] + return votes_after_stop < quorum_info["quorum"] + + def dom_prepare_child_element(dom_element, tag_name, id): +@@ -2661,6 +2659,7 @@ def get_modificators(): + "enable": "--enable" in pcs_options, + "force": "--force" in pcs_options, + "full": "--full" in pcs_options, ++ "name": pcs_options.get("--name", None), + "skip_offline_nodes": "--skip-offline" in pcs_options, + "start": "--start" in pcs_options, + "watchdog": pcs_options.get("--watchdog", []), +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index 415e02a..7c25e10 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -1965,6 +1965,23 @@ def disable_service(service) + return (retcode == 0) + end + ++def start_service(service) ++ _, _, retcode = run_cmd( ++ PCSAuth.getSuperuserAuth(), "service", service, "start" ++ ) ++ return (retcode == 0) ++end ++ ++def stop_service(service) ++ if not is_service_installed?(service) ++ return true ++ end ++ _, _, retcode = run_cmd( ++ PCSAuth.getSuperuserAuth(), "service", service, "stop" ++ ) ++ return (retcode == 0) ++end ++ + def set_cluster_prop_force(auth_user, prop, val) + cmd = [PCS, 'property', 'set', "#{prop}=#{val}", '--force'] + if pacemaker_running? +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index f002d5b..0b2dc61 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -4,6 +4,7 @@ require 'open4' + require 'set' + require 'timeout' + require 'rexml/document' ++require 'base64' + + require 'pcs.rb' + require 'resource.rb' +@@ -71,7 +72,16 @@ def remote(params, request, auth_user) + :remove_stonith_watchdog_timeout=> method(:remove_stonith_watchdog_timeout), + :set_stonith_watchdog_timeout_to_zero => method(:set_stonith_watchdog_timeout_to_zero), + :remote_enable_sbd => method(:remote_enable_sbd), +- :remote_disable_sbd => method(:remote_disable_sbd) ++ :remote_disable_sbd => method(:remote_disable_sbd), ++ :qdevice_net_get_ca_certificate => method(:qdevice_net_get_ca_certificate), ++ :qdevice_net_sign_node_certificate => method(:qdevice_net_sign_node_certificate), ++ :qdevice_net_client_init_certificate_storage => method(:qdevice_net_client_init_certificate_storage), ++ :qdevice_net_client_import_certificate => method(:qdevice_net_client_import_certificate), ++ :qdevice_net_client_destroy => method(:qdevice_net_client_destroy), ++ :qdevice_client_enable => method(:qdevice_client_enable), ++ :qdevice_client_disable => method(:qdevice_client_disable), ++ :qdevice_client_start => method(:qdevice_client_start), ++ :qdevice_client_stop => method(:qdevice_client_stop), + } + remote_cmd_with_pacemaker = { + :pacemaker_node_status => method(:remote_pacemaker_node_status), +@@ -2377,3 +2387,154 @@ def remote_disable_sbd(params, request, auth_user) + + return [200, 'Sbd has been disabled.'] + end ++ ++def qdevice_net_get_ca_certificate(params, request, auth_user) ++ unless allowed_for_local_cluster(auth_user, Permissions::READ) ++ return 403, 'Permission denied' ++ end ++ begin ++ return [ ++ 200, ++ Base64.encode64(File.read(COROSYNC_QDEVICE_NET_SERVER_CA_FILE)) ++ ] ++ rescue => e ++ return [400, "Unable to read certificate: #{e}"] ++ end ++end ++ ++def qdevice_net_sign_node_certificate(params, request, auth_user) ++ unless allowed_for_local_cluster(auth_user, Permissions::READ) ++ return 403, 'Permission denied' ++ end ++ stdout, stderr, retval = run_cmd_options( ++ auth_user, ++ {'stdin' => params[:certificate_request]}, ++ PCS, 'qdevice', 'sign-net-cert-request', '--name', params[:cluster_name] ++ ) ++ if retval != 0 ++ return [400, stderr.join('')] ++ end ++ return [200, stdout.join('')] ++end ++ ++def qdevice_net_client_init_certificate_storage(params, request, auth_user) ++ # Last step of adding qdevice into a cluster is distribution of corosync.conf ++ # file with qdevice settings. This requires FULL permissions currently. ++ # If that gets relaxed, we can require lower permissions in here as well. ++ unless allowed_for_local_cluster(auth_user, Permissions::FULL) ++ return 403, 'Permission denied' ++ end ++ stdout, stderr, retval = run_cmd_options( ++ auth_user, ++ {'stdin' => params[:ca_certificate]}, ++ PCS, 'qdevice', 'net-client', 'setup' ++ ) ++ if retval != 0 ++ return [400, stderr.join('')] ++ end ++ return [200, stdout.join('')] ++end ++ ++def qdevice_net_client_import_certificate(params, request, auth_user) ++ # Last step of adding qdevice into a cluster is distribution of corosync.conf ++ # file with qdevice settings. This requires FULL permissions currently. ++ # If that gets relaxed, we can require lower permissions in here as well. ++ unless allowed_for_local_cluster(auth_user, Permissions::FULL) ++ return 403, 'Permission denied' ++ end ++ stdout, stderr, retval = run_cmd_options( ++ auth_user, ++ {'stdin' => params[:certificate]}, ++ PCS, 'qdevice', 'net-client', 'import-certificate' ++ ) ++ if retval != 0 ++ return [400, stderr.join('')] ++ end ++ return [200, stdout.join('')] ++end ++ ++def qdevice_net_client_destroy(param, request, auth_user) ++ # When removing a qdevice from a cluster, an updated corosync.conf file ++ # with removed qdevice settings is distributed. This requires FULL permissions ++ # currently. If that gets relaxed, we can require lower permissions in here ++ # as well. ++ unless allowed_for_local_cluster(auth_user, Permissions::FULL) ++ return 403, 'Permission denied' ++ end ++ stdout, stderr, retval = run_cmd( ++ auth_user, ++ PCS, 'qdevice', 'net-client', 'destroy' ++ ) ++ if retval != 0 ++ return [400, stderr.join('')] ++ end ++ return [200, stdout.join('')] ++end ++ ++def qdevice_client_disable(param, request, auth_user) ++ unless allowed_for_local_cluster(auth_user, Permissions::WRITE) ++ return 403, 'Permission denied' ++ end ++ if disable_service('corosync-qdevice') ++ msg = 'corosync-qdevice disabled' ++ $logger.info(msg) ++ return [200, msg] ++ else ++ msg = 'Disabling corosync-qdevice failed' ++ $logger.error(msg) ++ return [400, msg] ++ end ++end ++ ++def qdevice_client_enable(param, request, auth_user) ++ unless allowed_for_local_cluster(auth_user, Permissions::WRITE) ++ return 403, 'Permission denied' ++ end ++ if not is_service_enabled?('corosync') ++ msg = 'corosync is not enabled, skipping' ++ $logger.info(msg) ++ return [200, msg] ++ elsif enable_service('corosync-qdevice') ++ msg = 'corosync-qdevice enabled' ++ $logger.info(msg) ++ return [200, msg] ++ else ++ msg = 'Enabling corosync-qdevice failed' ++ $logger.error(msg) ++ return [400, msg] ++ end ++end ++ ++def qdevice_client_stop(param, request, auth_user) ++ unless allowed_for_local_cluster(auth_user, Permissions::WRITE) ++ return 403, 'Permission denied' ++ end ++ if stop_service('corosync-qdevice') ++ msg = 'corosync-qdevice stopped' ++ $logger.info(msg) ++ return [200, msg] ++ else ++ msg = 'Stopping corosync-qdevice failed' ++ $logger.error(msg) ++ return [400, msg] ++ end ++end ++ ++def qdevice_client_start(param, request, auth_user) ++ unless allowed_for_local_cluster(auth_user, Permissions::WRITE) ++ return 403, 'Permission denied' ++ end ++ if not is_service_running?('corosync') ++ msg = 'corosync is not running, skipping' ++ $logger.info(msg) ++ return [200, msg] ++ elsif start_service('corosync-qdevice') ++ msg = 'corosync-qdevice started' ++ $logger.info(msg) ++ return [200, msg] ++ else ++ msg = 'Starting corosync-qdevice failed' ++ $logger.error(msg) ++ return [400, msg] ++ end ++end +diff --git a/pcsd/settings.rb b/pcsd/settings.rb +index 6229161..51f00ac 100644 +--- a/pcsd/settings.rb ++++ b/pcsd/settings.rb +@@ -21,6 +21,12 @@ CIBADMIN = "/usr/sbin/cibadmin" + SBD_CONFIG = '/etc/sysconfig/sbd' + CIB_PATH='/var/lib/pacemaker/cib/cib.xml' + ++COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR = "/etc/corosync/qnetd/nssdb" ++COROSYNC_QDEVICE_NET_SERVER_CA_FILE = ( ++ COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR + "/qnetd-cacert.crt" ++) ++COROSYNC_QDEVICE_NET_CLIENT_CERTS_DIR = "/etc/corosync/qdevice/net/nssdb" ++ + SUPERUSER = 'hacluster' + ADMIN_GROUP = 'haclient' + $user_pass_file = "pcs_users.conf" +diff --git a/pcsd/settings.rb.debian b/pcsd/settings.rb.debian +index 7bc92a9..aae1b11 100644 +--- a/pcsd/settings.rb.debian ++++ b/pcsd/settings.rb.debian +@@ -18,8 +18,14 @@ COROSYNC_BINARIES = "/usr/sbin/" + CMAN_TOOL = "/usr/sbin/cman_tool" + PACEMAKERD = "/usr/sbin/pacemakerd" + CIBADMIN = "/usr/sbin/cibadmin" +-SBD_CONFIG = '/etc/sysconfig/sbd' +-CIB_PATH='/var/lib/pacemaker/cib/cib.xml' ++SBD_CONFIG = "/etc/sysconfig/sbd" ++CIB_PATH = "/var/lib/pacemaker/cib/cib.xml" ++ ++COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR = "/etc/corosync/qnetd/nssdb" ++COROSYNC_QDEVICE_NET_SERVER_CA_FILE = ( ++ COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR + "/qnetd-cacert.crt" ++) ++COROSYNC_QDEVICE_NET_CLIENT_CERTS_DIR = "/etc/corosync/qdevice/net/nssdb" + + SUPERUSER = 'hacluster' + ADMIN_GROUP = 'haclient' +-- +1.8.3.1 + diff --git a/SOURCES/bz1158805-01-cli-improve-quorum-device-commands-syntax.patch b/SOURCES/bz1158805-01-cli-improve-quorum-device-commands-syntax.patch new file mode 100644 index 0000000..9ad60bf --- /dev/null +++ b/SOURCES/bz1158805-01-cli-improve-quorum-device-commands-syntax.patch @@ -0,0 +1,298 @@ +From 32d9dde2936b9f8b690ce3dd6c9bdc685f3ac5f0 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Mon, 11 Jul 2016 15:19:30 +0200 +Subject: [PATCH] cli: improve quorum device commands syntax + +* add alias "pcs status quorum" to "pcs quorum status" +* add alias "pcs status qdevice" to "pcs qdevice status" +* add alias "pcs quorum" to "pcs quorum config" +--- + pcs/cluster.py | 59 +++------------------------------------------------ + pcs/pcs.8 | 8 ++++++- + pcs/quorum.py | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++++----- + pcs/status.py | 25 ++++++++++++++++++++++ + pcs/usage.py | 10 ++++++++- + 5 files changed, 106 insertions(+), 63 deletions(-) + +diff --git a/pcs/cluster.py b/pcs/cluster.py +index 9d4798c..4155103 100644 +--- a/pcs/cluster.py ++++ b/pcs/cluster.py +@@ -26,7 +26,7 @@ from pcs import ( + constraint, + node, + pcsd, +- prop, ++ quorum, + resource, + settings, + status, +@@ -143,9 +143,9 @@ def cluster_cmd(argv): + cluster_report(argv) + elif (sub_cmd == "quorum"): + if argv and argv[0] == "unblock": +- cluster_quorum_unblock(argv[1:]) ++ quorum.quorum_unblock_cmd(argv[1:]) + else: +- usage.cluster(["quorum"]) ++ usage.cluster() + sys.exit(1) + else: + usage.cluster() +@@ -1890,56 +1890,3 @@ def cluster_remote_node(argv): + usage.cluster(["remote-node"]) + sys.exit(1) + +-def cluster_quorum_unblock(argv): +- if len(argv) > 0: +- usage.quorum(["unblock"]) +- sys.exit(1) +- +- if utils.is_rhel6(): +- utils.err("operation is not supported on CMAN clusters") +- +- output, retval = utils.run( +- ["corosync-cmapctl", "-g", "runtime.votequorum.wait_for_all_status"] +- ) +- if retval != 0: +- utils.err("unable to check quorum status") +- if output.split("=")[-1].strip() != "1": +- utils.err("cluster is not waiting for nodes to establish quorum") +- +- unjoined_nodes = ( +- set(utils.getNodesFromCorosyncConf()) +- - +- set(utils.getCorosyncActiveNodes()) +- ) +- if not unjoined_nodes: +- utils.err("no unjoined nodes found") +- if "--force" not in utils.pcs_options: +- answer = utils.get_terminal_input( +- ( +- "WARNING: If node(s) {nodes} are not powered off or they do" +- + " have access to shared resources, data corruption and/or" +- + " cluster failure may occur. Are you sure you want to" +- + " continue? [y/N] " +- ).format(nodes=", ".join(unjoined_nodes)) +- ) +- if answer.lower() not in ["y", "yes"]: +- print("Canceled") +- return +- for node in unjoined_nodes: +- stonith.stonith_confirm([node], skip_question=True) +- +- output, retval = utils.run( +- ["corosync-cmapctl", "-s", "quorum.cancel_wait_for_all", "u8", "1"] +- ) +- if retval != 0: +- utils.err("unable to cancel waiting for nodes") +- print("Quorum unblocked") +- +- startup_fencing = prop.get_set_properties().get("startup-fencing", "") +- utils.set_cib_property( +- "startup-fencing", +- "false" if startup_fencing.lower() != "false" else "true" +- ) +- utils.set_cib_property("startup-fencing", startup_fencing) +- print("Waiting for nodes canceled") +- +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 223ef1b..a26c94b 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -543,7 +543,7 @@ disable <device model> + Configure specified model of quorum device provider to not start on boot. + .SS "quorum" + .TP +-config ++[config] + Show quorum configuration. + .TP + status +@@ -590,6 +590,12 @@ View current cluster status. + corosync + View current membership information as seen by corosync. + .TP ++quorum ++View current quorum status. ++.TP ++qdevice <device model> [\fB\-\-full\fR] [<cluster name>] ++Show runtime status of specified model of quorum device provider. Using \fB\-\-full\fR will give more detailed output. If <cluster name> is specified, only information about the specified cluster will be displayed. ++.TP + nodes [corosync|both|config] + View current status of nodes from pacemaker. If 'corosync' is specified, print nodes currently configured in corosync, if 'both' is specified, print nodes from both corosync & pacemaker. If 'config' is specified, print nodes from corosync & pacemaker configuration. + .TP +diff --git a/pcs/quorum.py b/pcs/quorum.py +index 2d54ed7..a849282 100644 +--- a/pcs/quorum.py ++++ b/pcs/quorum.py +@@ -8,10 +8,11 @@ from __future__ import ( + import sys + + from pcs import ( ++ prop, ++ stonith, + usage, + utils, + ) +-from pcs.cluster import cluster_quorum_unblock + from pcs.cli.common import parse_args + from pcs.cli.common.console_report import indent + from pcs.cli.common.errors import CmdLineInputError +@@ -19,10 +20,10 @@ from pcs.lib.errors import LibraryError + + def quorum_cmd(lib, argv, modificators): + if len(argv) < 1: +- usage.quorum() +- sys.exit(1) ++ sub_cmd, argv_next = "config", [] ++ else: ++ sub_cmd, argv_next = argv[0], argv[1:] + +- sub_cmd, argv_next = argv[0], argv[1:] + try: + if sub_cmd == "help": + usage.quorum(argv) +@@ -35,7 +36,8 @@ def quorum_cmd(lib, argv, modificators): + elif sub_cmd == "device": + quorum_device_cmd(lib, argv_next, modificators) + elif sub_cmd == "unblock": +- cluster_quorum_unblock(argv_next) ++ # TODO switch to new architecture ++ quorum_unblock_cmd(argv_next) + elif sub_cmd == "update": + quorum_update_cmd(lib, argv_next, modificators) + else: +@@ -185,3 +187,58 @@ def quorum_device_update_cmd(lib, argv, modificators): + force_options=modificators["force"], + skip_offline_nodes=modificators["skip_offline_nodes"] + ) ++ ++# TODO switch to new architecture, move to lib ++def quorum_unblock_cmd(argv): ++ if len(argv) > 0: ++ usage.quorum(["unblock"]) ++ sys.exit(1) ++ ++ if utils.is_rhel6(): ++ utils.err("operation is not supported on CMAN clusters") ++ ++ output, retval = utils.run( ++ ["corosync-cmapctl", "-g", "runtime.votequorum.wait_for_all_status"] ++ ) ++ if retval != 0: ++ utils.err("unable to check quorum status") ++ if output.split("=")[-1].strip() != "1": ++ utils.err("cluster is not waiting for nodes to establish quorum") ++ ++ unjoined_nodes = ( ++ set(utils.getNodesFromCorosyncConf()) ++ - ++ set(utils.getCorosyncActiveNodes()) ++ ) ++ if not unjoined_nodes: ++ utils.err("no unjoined nodes found") ++ if "--force" not in utils.pcs_options: ++ answer = utils.get_terminal_input( ++ ( ++ "WARNING: If node(s) {nodes} are not powered off or they do" ++ + " have access to shared resources, data corruption and/or" ++ + " cluster failure may occur. Are you sure you want to" ++ + " continue? [y/N] " ++ ).format(nodes=", ".join(unjoined_nodes)) ++ ) ++ if answer.lower() not in ["y", "yes"]: ++ print("Canceled") ++ return ++ for node in unjoined_nodes: ++ stonith.stonith_confirm([node], skip_question=True) ++ ++ output, retval = utils.run( ++ ["corosync-cmapctl", "-s", "quorum.cancel_wait_for_all", "u8", "1"] ++ ) ++ if retval != 0: ++ utils.err("unable to cancel waiting for nodes") ++ print("Quorum unblocked") ++ ++ startup_fencing = prop.get_set_properties().get("startup-fencing", "") ++ utils.set_cib_property( ++ "startup-fencing", ++ "false" if startup_fencing.lower() != "false" else "true" ++ ) ++ utils.set_cib_property("startup-fencing", startup_fencing) ++ print("Waiting for nodes canceled") ++ +diff --git a/pcs/status.py b/pcs/status.py +index e1f367f..bdfcc85 100644 +--- a/pcs/status.py ++++ b/pcs/status.py +@@ -13,6 +13,9 @@ from pcs import ( + usage, + utils, + ) ++from pcs.qdevice import qdevice_status_cmd ++from pcs.quorum import quorum_status_cmd ++from pcs.cli.common.errors import CmdLineInputError + from pcs.lib.errors import LibraryError + from pcs.lib.pacemaker_state import ClusterState + +@@ -38,6 +41,28 @@ def status_cmd(argv): + xml_status() + elif (sub_cmd == "corosync"): + corosync_status() ++ elif sub_cmd == "qdevice": ++ try: ++ qdevice_status_cmd( ++ utils.get_library_wrapper(), ++ argv, ++ utils.get_modificators() ++ ) ++ except LibraryError as e: ++ utils.process_library_reports(e.args) ++ except CmdLineInputError as e: ++ utils.exit_on_cmdline_input_errror(e, "status", sub_cmd) ++ elif sub_cmd == "quorum": ++ try: ++ quorum_status_cmd( ++ utils.get_library_wrapper(), ++ argv, ++ utils.get_modificators() ++ ) ++ except LibraryError as e: ++ utils.process_library_reports(e.args) ++ except CmdLineInputError as e: ++ utils.exit_on_cmdline_input_errror(e, "status", sub_cmd) + else: + usage.status() + sys.exit(1) +diff --git a/pcs/usage.py b/pcs/usage.py +index 77b496e..0605cd7 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -1118,6 +1118,14 @@ Commands: + corosync + View current membership information as seen by corosync. + ++ quorum ++ View current quorum status. ++ ++ qdevice <device model> [--full] [<cluster name>] ++ Show runtime status of specified model of quorum device provider. Using ++ --full will give more detailed output. If <cluster name> is specified, ++ only information about the specified cluster will be displayed. ++ + nodes [corosync|both|config] + View current status of nodes from pacemaker. If 'corosync' is + specified, print nodes currently configured in corosync, if 'both' +@@ -1322,7 +1330,7 @@ Usage: pcs quorum <command> + Manage cluster quorum settings. + + Commands: +- config ++ [config] + Show quorum configuration. + + status +-- +1.8.3.1 + diff --git a/SOURCES/bz1158805-02-add-support-for-qdeviceqnetd-provided-by-corosync.patch b/SOURCES/bz1158805-02-add-support-for-qdeviceqnetd-provided-by-corosync.patch new file mode 100644 index 0000000..1642614 --- /dev/null +++ b/SOURCES/bz1158805-02-add-support-for-qdeviceqnetd-provided-by-corosync.patch @@ -0,0 +1,4078 @@ +From bc599f0f30c039a72540002d9a41a93c15626837 Mon Sep 17 00:00:00 2001 +From: Ivan Devat <idevat@redhat.com> +Date: Wed, 14 Sep 2016 09:04:57 +0200 +Subject: [PATCH] squash bz1158805 Add support for qdevice/qnetd pro + +9c7f37ef37bb lib: do not merge external processes' stdout and stderr + +db3ada5e27aa warn on stopping/destroying currently used qdevice + +18df73397b54 handle SBD when removing qdevice from a cluster + +766f86954b46 Allow to re-run "cluster node add" if failed due to qdevice +--- + pcs/cluster.py | 44 ++-- + pcs/common/report_codes.py | 6 +- + pcs/common/tools.py | 3 + + pcs/lib/booth/status.py | 27 +- + pcs/lib/booth/test/test_status.py | 26 +- + pcs/lib/cib/tools.py | 7 +- + pcs/lib/commands/booth.py | 5 +- + pcs/lib/commands/qdevice.py | 35 ++- + pcs/lib/commands/quorum.py | 12 +- + pcs/lib/commands/test/test_booth.py | 4 +- + pcs/lib/corosync/live.py | 26 +- + pcs/lib/corosync/qdevice_client.py | 9 +- + pcs/lib/corosync/qdevice_net.py | 77 ++++-- + pcs/lib/external.py | 105 +++++--- + pcs/lib/pacemaker.py | 71 ++++-- + pcs/lib/reports.py | 97 ++++---- + pcs/lib/resource_agent.py | 31 ++- + pcs/lib/sbd.py | 4 +- + pcs/qdevice.py | 4 +- + pcs/test/resources/corosync-qdevice.conf | 34 +++ + pcs/test/test_common_tools.py | 32 +++ + pcs/test/test_lib_cib_tools.py | 10 +- + pcs/test/test_lib_commands_qdevice.py | 155 +++++++++++- + pcs/test/test_lib_commands_quorum.py | 105 +++++++- + pcs/test/test_lib_corosync_live.py | 30 ++- + pcs/test/test_lib_corosync_qdevice_client.py | 8 +- + pcs/test/test_lib_corosync_qdevice_net.py | 110 +++++--- + pcs/test/test_lib_external.py | 167 +++++++------ + pcs/test/test_lib_pacemaker.py | 359 ++++++++++++++++++--------- + pcs/test/test_lib_resource_agent.py | 39 ++- + pcs/test/test_lib_sbd.py | 12 +- + 31 files changed, 1166 insertions(+), 488 deletions(-) + create mode 100644 pcs/test/resources/corosync-qdevice.conf + +diff --git a/pcs/cluster.py b/pcs/cluster.py +index 577e08e..e5ad1ec 100644 +--- a/pcs/cluster.py ++++ b/pcs/cluster.py +@@ -1414,7 +1414,6 @@ def cluster_node(argv): + "cluster is not configured for RRP, " + "you must not specify ring 1 address for the node" + ) +- corosync_conf = None + (canAdd, error) = utils.canAddNodeToCluster(node0) + if not canAdd: + utils.err("Unable to add '%s' to cluster: %s" % (node0, error)) +@@ -1422,7 +1421,29 @@ def cluster_node(argv): + report_processor = lib_env.report_processor + node_communicator = lib_env.node_communicator() + node_addr = NodeAddresses(node0, node1) ++ ++ # First set up everything else than corosync. Once the new node is ++ # present in corosync.conf / cluster.conf, it's considered part of a ++ # cluster and the node add command cannot be run again. So we need to ++ # minimize the amout of actions (and therefore possible failures) after ++ # adding the node to corosync. + try: ++ # qdevice setup ++ if not utils.is_rhel6(): ++ conf_facade = corosync_conf_facade.from_string( ++ utils.getCorosyncConf() ++ ) ++ qdevice_model, qdevice_model_options, _ = conf_facade.get_quorum_device_settings() ++ if qdevice_model == "net": ++ _add_device_model_net( ++ lib_env, ++ qdevice_model_options["host"], ++ conf_facade.get_cluster_name(), ++ [node_addr], ++ skip_offline_nodes=False ++ ) ++ ++ # sbd setup + if lib_sbd.is_sbd_enabled(utils.cmd_runner()): + if "--watchdog" not in utils.pcs_options: + watchdog = settings.sbd_watchdog_default +@@ -1463,6 +1484,7 @@ def cluster_node(argv): + report_processor, node_communicator, node_addr + ) + ++ # booth setup + booth_sync.send_all_config_to_node( + node_communicator, + report_processor, +@@ -1477,6 +1499,8 @@ def cluster_node(argv): + [node_communicator_exception_to_report_item(e)] + ) + ++ # Now add the new node to corosync.conf / cluster.conf ++ corosync_conf = None + for my_node in utils.getNodesFromCorosyncConf(): + retval, output = utils.addLocalNode(my_node, node0, node1) + if retval != 0: +@@ -1512,24 +1536,6 @@ def cluster_node(argv): + except: + utils.err('Unable to communicate with pcsd') + +- # set qdevice-net certificates if needed +- if not utils.is_rhel6(): +- try: +- conf_facade = corosync_conf_facade.from_string( +- corosync_conf +- ) +- qdevice_model, qdevice_model_options, _ = conf_facade.get_quorum_device_settings() +- if qdevice_model == "net": +- _add_device_model_net( +- lib_env, +- qdevice_model_options["host"], +- conf_facade.get_cluster_name(), +- [node_addr], +- skip_offline_nodes=False +- ) +- except LibraryError as e: +- process_library_reports(e.args) +- + print("Setting up corosync...") + utils.setCorosyncConfig(node0, corosync_conf) + if "--enable" in utils.pcs_options: +diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py +index e6a86ec..23e931f 100644 +--- a/pcs/common/report_codes.py ++++ b/pcs/common/report_codes.py +@@ -8,17 +8,18 @@ from __future__ import ( + # force cathegories + FORCE_ACTIVE_RRP = "ACTIVE_RRP" + FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE = "FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE" +-FORCE_BOOTH_REMOVE_FROM_CIB = "FORCE_BOOTH_REMOVE_FROM_CIB" + FORCE_BOOTH_DESTROY = "FORCE_BOOTH_DESTROY" ++FORCE_BOOTH_REMOVE_FROM_CIB = "FORCE_BOOTH_REMOVE_FROM_CIB" + FORCE_CONSTRAINT_DUPLICATE = "CONSTRAINT_DUPLICATE" + FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE = "CONSTRAINT_MULTIINSTANCE_RESOURCE" + FORCE_FILE_OVERWRITE = "FORCE_FILE_OVERWRITE" + FORCE_LOAD_THRESHOLD = "LOAD_THRESHOLD" ++FORCE_METADATA_ISSUE = "METADATA_ISSUE" + FORCE_OPTIONS = "OPTIONS" + FORCE_QDEVICE_MODEL = "QDEVICE_MODEL" ++FORCE_QDEVICE_USED = "QDEVICE_USED" + FORCE_UNKNOWN_AGENT = "UNKNOWN_AGENT" + FORCE_UNSUPPORTED_AGENT = "UNSUPPORTED_AGENT" +-FORCE_METADATA_ISSUE = "METADATA_ISSUE" + SKIP_OFFLINE_NODES = "SKIP_OFFLINE_NODES" + SKIP_UNREADABLE_CONFIG = "SKIP_UNREADABLE_CONFIG" + +@@ -135,6 +136,7 @@ QDEVICE_NOT_DEFINED = "QDEVICE_NOT_DEFINED" + QDEVICE_NOT_INITIALIZED = "QDEVICE_NOT_INITIALIZED" + QDEVICE_CLIENT_RELOAD_STARTED = "QDEVICE_CLIENT_RELOAD_STARTED" + QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED = "QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED" ++QDEVICE_USED_BY_CLUSTERS = "QDEVICE_USED_BY_CLUSTERS" + REQUIRED_OPTION_IS_MISSING = "REQUIRED_OPTION_IS_MISSING" + RESOURCE_CLEANUP_ERROR = "RESOURCE_CLEANUP_ERROR" + RESOURCE_CLEANUP_TOO_TIME_CONSUMING = 'RESOURCE_CLEANUP_TOO_TIME_CONSUMING' +diff --git a/pcs/common/tools.py b/pcs/common/tools.py +index 275f6b9..01194a5 100644 +--- a/pcs/common/tools.py ++++ b/pcs/common/tools.py +@@ -38,3 +38,6 @@ def format_environment_error(e): + if e.filename: + return "{0}: '{1}'".format(e.strerror, e.filename) + return e.strerror ++ ++def join_multilines(strings): ++ return "\n".join([a.strip() for a in strings if a.strip()]) +diff --git a/pcs/lib/booth/status.py b/pcs/lib/booth/status.py +index 4b93161..87cdc05 100644 +--- a/pcs/lib/booth/status.py ++++ b/pcs/lib/booth/status.py +@@ -6,6 +6,7 @@ from __future__ import ( + ) + + from pcs import settings ++from pcs.common.tools import join_multilines + from pcs.lib.booth import reports + from pcs.lib.errors import LibraryError + +@@ -14,28 +15,36 @@ def get_daemon_status(runner, name=None): + cmd = [settings.booth_binary, "status"] + if name: + cmd += ["-c", name] +- output, return_value = runner.run(cmd) ++ stdout, stderr, return_value = runner.run(cmd) + # 7 means that there is no booth instance running + if return_value not in [0, 7]: +- raise LibraryError(reports.booth_daemon_status_error(output)) +- return output ++ raise LibraryError( ++ reports.booth_daemon_status_error(join_multilines([stderr, stdout])) ++ ) ++ return stdout + + + def get_tickets_status(runner, name=None): + cmd = [settings.booth_binary, "list"] + if name: + cmd += ["-c", name] +- output, return_value = runner.run(cmd) ++ stdout, stderr, return_value = runner.run(cmd) + if return_value != 0: +- raise LibraryError(reports.booth_tickets_status_error(output)) +- return output ++ raise LibraryError( ++ reports.booth_tickets_status_error( ++ join_multilines([stderr, stdout]) ++ ) ++ ) ++ return stdout + + + def get_peers_status(runner, name=None): + cmd = [settings.booth_binary, "peers"] + if name: + cmd += ["-c", name] +- output, return_value = runner.run(cmd) ++ stdout, stderr, return_value = runner.run(cmd) + if return_value != 0: +- raise LibraryError(reports.booth_peers_status_error(output)) +- return output ++ raise LibraryError( ++ reports.booth_peers_status_error(join_multilines([stderr, stdout])) ++ ) ++ return stdout +diff --git a/pcs/lib/booth/test/test_status.py b/pcs/lib/booth/test/test_status.py +index d47ffca..dfb7354 100644 +--- a/pcs/lib/booth/test/test_status.py ++++ b/pcs/lib/booth/test/test_status.py +@@ -30,34 +30,34 @@ class GetDaemonStatusTest(TestCase): + self.mock_run = mock.MagicMock(spec_set=CommandRunner) + + def test_no_name(self): +- self.mock_run.run.return_value = ("output", 0) ++ self.mock_run.run.return_value = ("output", "", 0) + self.assertEqual("output", lib.get_daemon_status(self.mock_run)) + self.mock_run.run.assert_called_once_with( + [settings.booth_binary, "status"] + ) + + def test_with_name(self): +- self.mock_run.run.return_value = ("output", 0) ++ self.mock_run.run.return_value = ("output", "", 0) + self.assertEqual("output", lib.get_daemon_status(self.mock_run, "name")) + self.mock_run.run.assert_called_once_with( + [settings.booth_binary, "status", "-c", "name"] + ) + + def test_daemon_not_running(self): +- self.mock_run.run.return_value = ("", 7) ++ self.mock_run.run.return_value = ("", "error", 7) + self.assertEqual("", lib.get_daemon_status(self.mock_run)) + self.mock_run.run.assert_called_once_with( + [settings.booth_binary, "status"] + ) + + def test_failure(self): +- self.mock_run.run.return_value = ("out", 1) ++ self.mock_run.run.return_value = ("out", "error", 1) + assert_raise_library_error( + lambda: lib.get_daemon_status(self.mock_run), + ( + Severities.ERROR, + report_codes.BOOTH_DAEMON_STATUS_ERROR, +- {"reason": "out"} ++ {"reason": "error\nout"} + ) + ) + self.mock_run.run.assert_called_once_with( +@@ -70,14 +70,14 @@ class GetTicketsStatusTest(TestCase): + self.mock_run = mock.MagicMock(spec_set=CommandRunner) + + def test_no_name(self): +- self.mock_run.run.return_value = ("output", 0) ++ self.mock_run.run.return_value = ("output", "", 0) + self.assertEqual("output", lib.get_tickets_status(self.mock_run)) + self.mock_run.run.assert_called_once_with( + [settings.booth_binary, "list"] + ) + + def test_with_name(self): +- self.mock_run.run.return_value = ("output", 0) ++ self.mock_run.run.return_value = ("output", "", 0) + self.assertEqual( + "output", lib.get_tickets_status(self.mock_run, "name") + ) +@@ -86,14 +86,14 @@ class GetTicketsStatusTest(TestCase): + ) + + def test_failure(self): +- self.mock_run.run.return_value = ("out", 1) ++ self.mock_run.run.return_value = ("out", "error", 1) + assert_raise_library_error( + lambda: lib.get_tickets_status(self.mock_run), + ( + Severities.ERROR, + report_codes.BOOTH_TICKET_STATUS_ERROR, + { +- "reason": "out" ++ "reason": "error\nout" + } + ) + ) +@@ -107,28 +107,28 @@ class GetPeersStatusTest(TestCase): + self.mock_run = mock.MagicMock(spec_set=CommandRunner) + + def test_no_name(self): +- self.mock_run.run.return_value = ("output", 0) ++ self.mock_run.run.return_value = ("output", "", 0) + self.assertEqual("output", lib.get_peers_status(self.mock_run)) + self.mock_run.run.assert_called_once_with( + [settings.booth_binary, "peers"] + ) + + def test_with_name(self): +- self.mock_run.run.return_value = ("output", 0) ++ self.mock_run.run.return_value = ("output", "", 0) + self.assertEqual("output", lib.get_peers_status(self.mock_run, "name")) + self.mock_run.run.assert_called_once_with( + [settings.booth_binary, "peers", "-c", "name"] + ) + + def test_failure(self): +- self.mock_run.run.return_value = ("out", 1) ++ self.mock_run.run.return_value = ("out", "error", 1) + assert_raise_library_error( + lambda: lib.get_peers_status(self.mock_run), + ( + Severities.ERROR, + report_codes.BOOTH_PEERS_STATUS_ERROR, + { +- "reason": "out" ++ "reason": "error\nout" + } + ) + ) +diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py +index 8141360..6285931 100644 +--- a/pcs/lib/cib/tools.py ++++ b/pcs/lib/cib/tools.py +@@ -11,6 +11,7 @@ import tempfile + from lxml import etree + + from pcs import settings ++from pcs.common.tools import join_multilines + from pcs.lib import reports + from pcs.lib.errors import LibraryError + from pcs.lib.pacemaker_values import validate_id +@@ -181,7 +182,7 @@ def upgrade_cib(cib, runner): + temp_file = tempfile.NamedTemporaryFile("w+", suffix=".pcs") + temp_file.write(etree.tostring(cib).decode()) + temp_file.flush() +- output, retval = runner.run( ++ stdout, stderr, retval = runner.run( + [ + os.path.join(settings.pacemaker_binaries, "cibadmin"), + "--upgrade", +@@ -192,7 +193,9 @@ def upgrade_cib(cib, runner): + + if retval != 0: + temp_file.close() +- raise LibraryError(reports.cib_upgrade_failed(output)) ++ raise LibraryError( ++ reports.cib_upgrade_failed(join_multilines([stderr, stdout])) ++ ) + + temp_file.seek(0) + return etree.fromstring(temp_file.read()) +diff --git a/pcs/lib/commands/booth.py b/pcs/lib/commands/booth.py +index 7a3d348..bea966c 100644 +--- a/pcs/lib/commands/booth.py ++++ b/pcs/lib/commands/booth.py +@@ -10,6 +10,7 @@ import os.path + from functools import partial + + from pcs import settings ++from pcs.common.tools import join_multilines + from pcs.lib import external, reports + from pcs.lib.booth import ( + config_exchange, +@@ -185,7 +186,7 @@ def ticket_operation(operation, env, name, ticket, site_ip): + ) + site_ip = site_ip_list[0] + +- command_output, return_code = env.cmd_runner().run([ ++ stdout, stderr, return_code = env.cmd_runner().run([ + settings.booth_binary, operation, + "-s", site_ip, + ticket +@@ -195,7 +196,7 @@ def ticket_operation(operation, env, name, ticket, site_ip): + raise LibraryError( + booth_reports.booth_ticket_operation_failed( + operation, +- command_output, ++ join_multilines([stderr, stdout]), + site_ip, + ticket + ) +diff --git a/pcs/lib/commands/qdevice.py b/pcs/lib/commands/qdevice.py +index 1d1d85f..ca0ae86 100644 +--- a/pcs/lib/commands/qdevice.py ++++ b/pcs/lib/commands/qdevice.py +@@ -8,9 +8,10 @@ from __future__ import ( + import base64 + import binascii + ++from pcs.common import report_codes + from pcs.lib import external, reports + from pcs.lib.corosync import qdevice_net +-from pcs.lib.errors import LibraryError ++from pcs.lib.errors import LibraryError, ReportItemSeverity + + + def qdevice_setup(lib_env, model, enable, start): +@@ -31,13 +32,20 @@ def qdevice_setup(lib_env, model, enable, start): + if start: + _service_start(lib_env, qdevice_net.qdevice_start) + +-def qdevice_destroy(lib_env, model): ++def qdevice_destroy(lib_env, model, proceed_if_used=False): + """ + Stop and disable qdevice on local host and remove its configuration + string model qdevice model to destroy ++ bool procced_if_used destroy qdevice even if it is used by clusters + """ + _ensure_not_cman(lib_env) + _check_model(model) ++ _check_qdevice_not_used( ++ lib_env.report_processor, ++ lib_env.cmd_runner(), ++ model, ++ proceed_if_used ++ ) + _service_stop(lib_env, qdevice_net.qdevice_stop) + _service_disable(lib_env, qdevice_net.qdevice_disable) + qdevice_net.qdevice_destroy() +@@ -83,12 +91,20 @@ def qdevice_start(lib_env, model): + _check_model(model) + _service_start(lib_env, qdevice_net.qdevice_start) + +-def qdevice_stop(lib_env, model): ++def qdevice_stop(lib_env, model, proceed_if_used=False): + """ + stop qdevice now on local host ++ string model qdevice model to destroy ++ bool procced_if_used stop qdevice even if it is used by clusters + """ + _ensure_not_cman(lib_env) + _check_model(model) ++ _check_qdevice_not_used( ++ lib_env.report_processor, ++ lib_env.cmd_runner(), ++ model, ++ proceed_if_used ++ ) + _service_stop(lib_env, qdevice_net.qdevice_stop) + + def qdevice_kill(lib_env, model): +@@ -176,6 +192,19 @@ def _check_model(model): + reports.invalid_option_value("model", model, ["net"]) + ) + ++def _check_qdevice_not_used(reporter, runner, model, force=False): ++ _check_model(model) ++ connected_clusters = [] ++ if model == "net": ++ status = qdevice_net.qdevice_status_cluster_text(runner) ++ connected_clusters = qdevice_net.qdevice_connected_clusters(status) ++ if connected_clusters: ++ reporter.process(reports.qdevice_used_by_clusters( ++ connected_clusters, ++ ReportItemSeverity.WARNING if force else ReportItemSeverity.ERROR, ++ None if force else report_codes.FORCE_QDEVICE_USED ++ )) ++ + def _service_start(lib_env, func): + lib_env.report_processor.process( + reports.service_start_started("quorum device") +diff --git a/pcs/lib/commands/quorum.py b/pcs/lib/commands/quorum.py +index 7fb7bb4..8390fc6 100644 +--- a/pcs/lib/commands/quorum.py ++++ b/pcs/lib/commands/quorum.py +@@ -283,14 +283,23 @@ def remove_device(lib_env, skip_offline_nodes=False): + cfg = lib_env.get_corosync_conf() + model, dummy_options, dummy_options = cfg.get_quorum_device_settings() + cfg.remove_quorum_device() ++ ++ if lib_env.is_corosync_conf_live: ++ # fix quorum options for SBD to work properly ++ if sbd.atb_has_to_be_enabled(lib_env.cmd_runner(), cfg): ++ lib_env.report_processor.process(reports.sbd_requires_atb()) ++ cfg.set_quorum_options( ++ lib_env.report_processor, {"auto_tie_breaker": "1"} ++ ) ++ + lib_env.push_corosync_conf(cfg, skip_offline_nodes) + + if lib_env.is_corosync_conf_live: ++ communicator = lib_env.node_communicator() + # disable qdevice + lib_env.report_processor.process( + reports.service_disable_started("corosync-qdevice") + ) +- communicator = lib_env.node_communicator() + parallel_nodes_communication_helper( + qdevice_client.remote_client_disable, + [ +@@ -304,7 +313,6 @@ def remove_device(lib_env, skip_offline_nodes=False): + lib_env.report_processor.process( + reports.service_stop_started("corosync-qdevice") + ) +- communicator = lib_env.node_communicator() + parallel_nodes_communication_helper( + qdevice_client.remote_client_stop, + [ +diff --git a/pcs/lib/commands/test/test_booth.py b/pcs/lib/commands/test/test_booth.py +index 08d2c79..6bcab2b 100644 +--- a/pcs/lib/commands/test/test_booth.py ++++ b/pcs/lib/commands/test/test_booth.py +@@ -520,7 +520,7 @@ class TicketOperationTest(TestCase): + ) + + def test_raises_when_command_fail(self): +- mock_run = mock.Mock(return_value=("some message", 1)) ++ mock_run = mock.Mock(return_value=("some message", "error", 1)) + mock_env = mock.MagicMock( + cmd_runner=mock.Mock(return_value=mock.MagicMock(run=mock_run)) + ) +@@ -533,7 +533,7 @@ class TicketOperationTest(TestCase): + report_codes.BOOTH_TICKET_OPERATION_FAILED, + { + "operation": "grant", +- "reason": "some message", ++ "reason": "error\nsome message", + "site_ip": "1.2.3.4", + "ticket_name": "ABC", + } +diff --git a/pcs/lib/corosync/live.py b/pcs/lib/corosync/live.py +index 1e68c31..67aa0e4 100644 +--- a/pcs/lib/corosync/live.py ++++ b/pcs/lib/corosync/live.py +@@ -8,6 +8,7 @@ from __future__ import ( + import os.path + + from pcs import settings ++from pcs.common.tools import join_multilines + from pcs.lib import reports + from pcs.lib.errors import LibraryError + from pcs.lib.external import NodeCommunicator +@@ -41,42 +42,39 @@ def reload_config(runner): + """ + Ask corosync to reload its configuration + """ +- output, retval = runner.run([ ++ stdout, stderr, retval = runner.run([ + os.path.join(settings.corosync_binaries, "corosync-cfgtool"), + "-R" + ]) +- if retval != 0 or "invalid option" in output: +- raise LibraryError( +- reports.corosync_config_reload_error(output.rstrip()) +- ) ++ message = join_multilines([stderr, stdout]) ++ if retval != 0 or "invalid option" in message: ++ raise LibraryError(reports.corosync_config_reload_error(message)) + + def get_quorum_status_text(runner): + """ + Get runtime quorum status from the local node + """ +- output, retval = runner.run([ ++ stdout, stderr, retval = runner.run([ + os.path.join(settings.corosync_binaries, "corosync-quorumtool"), + "-p" + ]) + # retval is 0 on success if node is not in partition with quorum + # retval is 1 on error OR on success if node has quorum +- if retval not in [0, 1]: +- raise LibraryError( +- reports.corosync_quorum_get_status_error(output) +- ) +- return output ++ if retval not in [0, 1] or stderr.strip(): ++ raise LibraryError(reports.corosync_quorum_get_status_error(stderr)) ++ return stdout + + def set_expected_votes(runner, votes): + """ + set expected votes in live cluster to specified value + """ +- output, retval = runner.run([ ++ stdout, stderr, retval = runner.run([ + os.path.join(settings.corosync_binaries, "corosync-quorumtool"), + # format votes to handle the case where they are int + "-e", "{0}".format(votes) + ]) + if retval != 0: + raise LibraryError( +- reports.corosync_quorum_set_expected_votes_error(output) ++ reports.corosync_quorum_set_expected_votes_error(stderr) + ) +- return output ++ return stdout +diff --git a/pcs/lib/corosync/qdevice_client.py b/pcs/lib/corosync/qdevice_client.py +index 98fbb0e..c9d0095 100644 +--- a/pcs/lib/corosync/qdevice_client.py ++++ b/pcs/lib/corosync/qdevice_client.py +@@ -8,6 +8,7 @@ from __future__ import ( + import os.path + + from pcs import settings ++from pcs.common.tools import join_multilines + from pcs.lib import reports + from pcs.lib.errors import LibraryError + +@@ -23,12 +24,14 @@ def get_status_text(runner, verbose=False): + ] + if verbose: + cmd.append("-v") +- output, retval = runner.run(cmd) ++ stdout, stderr, retval = runner.run(cmd) + if retval != 0: + raise LibraryError( +- reports.corosync_quorum_get_status_error(output) ++ reports.corosync_quorum_get_status_error( ++ join_multilines([stderr, stdout]) ++ ) + ) +- return output ++ return stdout + + def remote_client_enable(reporter, node_communicator, node): + """ +diff --git a/pcs/lib/corosync/qdevice_net.py b/pcs/lib/corosync/qdevice_net.py +index 4054592..200e45a 100644 +--- a/pcs/lib/corosync/qdevice_net.py ++++ b/pcs/lib/corosync/qdevice_net.py +@@ -15,6 +15,7 @@ import shutil + import tempfile + + from pcs import settings ++from pcs.common.tools import join_multilines + from pcs.lib import external, reports + from pcs.lib.errors import LibraryError + +@@ -41,12 +42,15 @@ def qdevice_setup(runner): + if external.is_dir_nonempty(settings.corosync_qdevice_net_server_certs_dir): + raise LibraryError(reports.qdevice_already_initialized(__model)) + +- output, retval = runner.run([ ++ stdout, stderr, retval = runner.run([ + __qnetd_certutil, "-i" + ]) + if retval != 0: + raise LibraryError( +- reports.qdevice_initialization_error(__model, output.rstrip()) ++ reports.qdevice_initialization_error( ++ __model, ++ join_multilines([stderr, stdout]) ++ ) + ) + + def qdevice_initialized(): +@@ -78,10 +82,15 @@ def qdevice_status_generic_text(runner, verbose=False): + cmd = [__qnetd_tool, "-s"] + if verbose: + cmd.append("-v") +- output, retval = runner.run(cmd) ++ stdout, stderr, retval = runner.run(cmd) + if retval != 0: +- raise LibraryError(reports.qdevice_get_status_error(__model, output)) +- return output ++ raise LibraryError( ++ reports.qdevice_get_status_error( ++ __model, ++ join_multilines([stderr, stdout]) ++ ) ++ ) ++ return stdout + + def qdevice_status_cluster_text(runner, cluster=None, verbose=False): + """ +@@ -94,10 +103,24 @@ def qdevice_status_cluster_text(runner, cluster=None, verbose=False): + cmd.append("-v") + if cluster: + cmd.extend(["-c", cluster]) +- output, retval = runner.run(cmd) ++ stdout, stderr, retval = runner.run(cmd) + if retval != 0: +- raise LibraryError(reports.qdevice_get_status_error(__model, output)) +- return output ++ raise LibraryError( ++ reports.qdevice_get_status_error( ++ __model, ++ join_multilines([stderr, stdout]) ++ ) ++ ) ++ return stdout ++ ++def qdevice_connected_clusters(status_cluster_text): ++ connected_clusters = [] ++ regexp = re.compile(r'^Cluster "(?P<cluster>[^"]+)":$') ++ for line in status_cluster_text.splitlines(): ++ match = regexp.search(line) ++ if match: ++ connected_clusters.append(match.group("cluster")) ++ return connected_clusters + + def qdevice_enable(runner): + """ +@@ -143,17 +166,19 @@ def qdevice_sign_certificate_request(runner, cert_request, cluster_name): + reports.qdevice_certificate_sign_error + ) + # sign the request +- output, retval = runner.run([ ++ stdout, stderr, retval = runner.run([ + __qnetd_certutil, "-s", "-c", tmpfile.name, "-n", cluster_name + ]) + tmpfile.close() # temp file is deleted on close + if retval != 0: + raise LibraryError( +- reports.qdevice_certificate_sign_error(output.strip()) ++ reports.qdevice_certificate_sign_error( ++ join_multilines([stderr, stdout]) ++ ) + ) + # get signed certificate, corosync tool only works with files + return _get_output_certificate( +- output, ++ stdout, + reports.qdevice_certificate_sign_error + ) + +@@ -181,12 +206,15 @@ def client_setup(runner, ca_certificate): + reports.qdevice_initialization_error(__model, e.strerror) + ) + # initialize client's certificate storage +- output, retval = runner.run([ ++ stdout, stderr, retval = runner.run([ + __qdevice_certutil, "-i", "-c", ca_file_path + ]) + if retval != 0: + raise LibraryError( +- reports.qdevice_initialization_error(__model, output.rstrip()) ++ reports.qdevice_initialization_error( ++ __model, ++ join_multilines([stderr, stdout]) ++ ) + ) + + def client_initialized(): +@@ -217,15 +245,18 @@ def client_generate_certificate_request(runner, cluster_name): + """ + if not client_initialized(): + raise LibraryError(reports.qdevice_not_initialized(__model)) +- output, retval = runner.run([ ++ stdout, stderr, retval = runner.run([ + __qdevice_certutil, "-r", "-n", cluster_name + ]) + if retval != 0: + raise LibraryError( +- reports.qdevice_initialization_error(__model, output.rstrip()) ++ reports.qdevice_initialization_error( ++ __model, ++ join_multilines([stderr, stdout]) ++ ) + ) + return _get_output_certificate( +- output, ++ stdout, + functools.partial(reports.qdevice_initialization_error, __model) + ) + +@@ -243,17 +274,19 @@ def client_cert_request_to_pk12(runner, cert_request): + reports.qdevice_certificate_import_error + ) + # transform it +- output, retval = runner.run([ ++ stdout, stderr, retval = runner.run([ + __qdevice_certutil, "-M", "-c", tmpfile.name + ]) + tmpfile.close() # temp file is deleted on close + if retval != 0: + raise LibraryError( +- reports.qdevice_certificate_import_error(output) ++ reports.qdevice_certificate_import_error( ++ join_multilines([stderr, stdout]) ++ ) + ) + # get resulting pk12, corosync tool only works with files + return _get_output_certificate( +- output, ++ stdout, + reports.qdevice_certificate_import_error + ) + +@@ -268,13 +301,15 @@ def client_import_certificate_and_key(runner, pk12_certificate): + pk12_certificate, + reports.qdevice_certificate_import_error + ) +- output, retval = runner.run([ ++ stdout, stderr, retval = runner.run([ + __qdevice_certutil, "-m", "-c", tmpfile.name + ]) + tmpfile.close() # temp file is deleted on close + if retval != 0: + raise LibraryError( +- reports.qdevice_certificate_import_error(output) ++ reports.qdevice_certificate_import_error( ++ join_multilines([stderr, stdout]) ++ ) + ) + + def remote_qdevice_get_ca_certificate(node_communicator, host): +diff --git a/pcs/lib/external.py b/pcs/lib/external.py +index 08bf2bb..074d2aa 100644 +--- a/pcs/lib/external.py ++++ b/pcs/lib/external.py +@@ -47,14 +47,15 @@ except ImportError: + URLError as urllib_URLError + ) + +-from pcs.lib import reports +-from pcs.lib.errors import LibraryError, ReportItemSeverity ++from pcs import settings + from pcs.common import report_codes + from pcs.common.tools import ( ++ join_multilines, + simple_cache, + run_parallel as tools_run_parallel, + ) +-from pcs import settings ++from pcs.lib import reports ++from pcs.lib.errors import LibraryError, ReportItemSeverity + + + class ManageServiceError(Exception): +@@ -138,13 +139,17 @@ def disable_service(runner, service, instance=None): + if not is_service_installed(runner, service): + return + if is_systemctl(): +- output, retval = runner.run([ ++ stdout, stderr, retval = runner.run([ + "systemctl", "disable", _get_service_name(service, instance) + ]) + else: +- output, retval = runner.run(["chkconfig", service, "off"]) ++ stdout, stderr, retval = runner.run(["chkconfig", service, "off"]) + if retval != 0: +- raise DisableServiceError(service, output.rstrip(), instance) ++ raise DisableServiceError( ++ service, ++ join_multilines([stderr, stdout]), ++ instance ++ ) + + + def enable_service(runner, service, instance=None): +@@ -158,13 +163,17 @@ def enable_service(runner, service, instance=None): + If None no instance name will be used. + """ + if is_systemctl(): +- output, retval = runner.run([ ++ stdout, stderr, retval = runner.run([ + "systemctl", "enable", _get_service_name(service, instance) + ]) + else: +- output, retval = runner.run(["chkconfig", service, "on"]) ++ stdout, stderr, retval = runner.run(["chkconfig", service, "on"]) + if retval != 0: +- raise EnableServiceError(service, output.rstrip(), instance) ++ raise EnableServiceError( ++ service, ++ join_multilines([stderr, stdout]), ++ instance ++ ) + + + def start_service(runner, service, instance=None): +@@ -176,13 +185,17 @@ def start_service(runner, service, instance=None): + If None no instance name will be used. + """ + if is_systemctl(): +- output, retval = runner.run([ ++ stdout, stderr, retval = runner.run([ + "systemctl", "start", _get_service_name(service, instance) + ]) + else: +- output, retval = runner.run(["service", service, "start"]) ++ stdout, stderr, retval = runner.run(["service", service, "start"]) + if retval != 0: +- raise StartServiceError(service, output.rstrip(), instance) ++ raise StartServiceError( ++ service, ++ join_multilines([stderr, stdout]), ++ instance ++ ) + + + def stop_service(runner, service, instance=None): +@@ -194,13 +207,17 @@ def stop_service(runner, service, instance=None): + If None no instance name will be used. + """ + if is_systemctl(): +- output, retval = runner.run([ ++ stdout, stderr, retval = runner.run([ + "systemctl", "stop", _get_service_name(service, instance) + ]) + else: +- output, retval = runner.run(["service", service, "stop"]) ++ stdout, stderr, retval = runner.run(["service", service, "stop"]) + if retval != 0: +- raise StopServiceError(service, output.rstrip(), instance) ++ raise StopServiceError( ++ service, ++ join_multilines([stderr, stdout]), ++ instance ++ ) + + + def kill_services(runner, services): +@@ -210,15 +227,16 @@ def kill_services(runner, services): + iterable services service names + """ + # make killall not report that a process is not running +- output, retval = runner.run( ++ stdout, stderr, retval = runner.run( + ["killall", "--quiet", "--signal", "9", "--"] + list(services) + ) + # If a process isn't running, killall will still return 1 even with --quiet. + # We don't consider that an error, so we check for output string as well. + # If it's empty, no actuall error happened. + if retval != 0: +- if output.strip(): +- raise KillServicesError(list(services), output.rstrip()) ++ message = join_multilines([stderr, stdout]) ++ if message: ++ raise KillServicesError(list(services), message) + + + def is_service_enabled(runner, service, instance=None): +@@ -229,11 +247,11 @@ def is_service_enabled(runner, service, instance=None): + service -- name of service + """ + if is_systemctl(): +- _, retval = runner.run( ++ dummy_stdout, dummy_stderr, retval = runner.run( + ["systemctl", "is-enabled", _get_service_name(service, instance)] + ) + else: +- _, retval = runner.run(["chkconfig", service]) ++ dummy_stdout, dummy_stderr, retval = runner.run(["chkconfig", service]) + + return retval == 0 + +@@ -246,13 +264,15 @@ def is_service_running(runner, service, instance=None): + service -- name of service + """ + if is_systemctl(): +- _, retval = runner.run([ ++ dummy_stdout, dummy_stderr, retval = runner.run([ + "systemctl", + "is-active", + _get_service_name(service, instance) + ]) + else: +- _, retval = runner.run(["service", service, "status"]) ++ dummy_stdout, dummy_stderr, retval = runner.run( ++ ["service", service, "status"] ++ ) + + return retval == 0 + +@@ -279,12 +299,12 @@ def get_non_systemd_services(runner): + if is_systemctl(): + return [] + +- output, return_code = runner.run(["chkconfig"], ignore_stderr=True) ++ stdout, dummy_stderr, return_code = runner.run(["chkconfig"]) + if return_code != 0: + return [] + + service_list = [] +- for service in output.splitlines(): ++ for service in stdout.splitlines(): + service = service.split(" ", 1)[0] + if service: + service_list.append(service) +@@ -300,12 +320,14 @@ def get_systemd_services(runner): + if not is_systemctl(): + return [] + +- output, return_code = runner.run(["systemctl", "list-unit-files", "--full"]) ++ stdout, dummy_stderr, return_code = runner.run([ ++ "systemctl", "list-unit-files", "--full" ++ ]) + if return_code != 0: + return [] + + service_list = [] +- for service in output.splitlines(): ++ for service in stdout.splitlines(): + match = re.search(r'^([\S]*)\.service', service) + if match: + service_list.append(match.group(1)) +@@ -322,13 +344,13 @@ def is_cman_cluster(runner): + # - corosync1 runs with cman on rhel6 + # - corosync1 can be used without cman, but we don't support it anyways + # - corosync2 is the default result if errors occur +- output, retval = runner.run([ ++ stdout, dummy_stderr, retval = runner.run([ + os.path.join(settings.corosync_binaries, "corosync"), + "-v" + ]) + if retval != 0: + return False +- match = re.search(r"version\D+(\d+)", output) ++ match = re.search(r"version\D+(\d+)", stdout) + return match is not None and match.group(1) == "1" + + +@@ -340,8 +362,7 @@ class CommandRunner(object): + self._python2 = sys.version[0] == "2" + + def run( +- self, args, ignore_stderr=False, stdin_string=None, env_extend=None, +- binary_output=False ++ self, args, stdin_string=None, env_extend=None, binary_output=False + ): + #Reset environment variables by empty dict is desired here. We need to + #get rid of defaults - we do not know the context and environment of the +@@ -364,9 +385,7 @@ class CommandRunner(object): + # Some commands react differently if they get anything via stdin + stdin=(subprocess.PIPE if stdin_string is not None else None), + stdout=subprocess.PIPE, +- stderr=( +- subprocess.PIPE if ignore_stderr else subprocess.STDOUT +- ), ++ stderr=subprocess.PIPE, + preexec_fn=( + lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL) + ), +@@ -376,7 +395,7 @@ class CommandRunner(object): + # decodes newlines and in python3 also converts bytes to str + universal_newlines=(not self._python2 and not binary_output) + ) +- output, dummy_stderror = process.communicate(stdin_string) ++ out_std, out_err = process.communicate(stdin_string) + retval = process.returncode + except OSError as e: + raise LibraryError( +@@ -386,13 +405,19 @@ class CommandRunner(object): + self._logger.debug( + ( + "Finished running: {args}\nReturn value: {retval}" +- + "\n--Debug Output Start--\n{output}\n--Debug Output End--" +- ).format(args=log_args, retval=retval, output=output) +- ) +- self._reporter.process( +- reports.run_external_process_finished(log_args, retval, output) ++ + "\n--Debug Stdout Start--\n{out_std}\n--Debug Stdout End--" ++ + "\n--Debug Stderr Start--\n{out_err}\n--Debug Stderr End--" ++ ).format( ++ args=log_args, ++ retval=retval, ++ out_std=out_std, ++ out_err=out_err ++ ) + ) +- return output, retval ++ self._reporter.process(reports.run_external_process_finished( ++ log_args, retval, out_std, out_err ++ )) ++ return out_std, out_err, retval + + + class NodeCommunicationException(Exception): +diff --git a/pcs/lib/pacemaker.py b/pcs/lib/pacemaker.py +index fd6f97b..6747b22 100644 +--- a/pcs/lib/pacemaker.py ++++ b/pcs/lib/pacemaker.py +@@ -9,6 +9,7 @@ import os.path + from lxml import etree + + from pcs import settings ++from pcs.common.tools import join_multilines + from pcs.lib import reports + from pcs.lib.errors import LibraryError + from pcs.lib.pacemaker_state import ClusterState +@@ -26,28 +27,33 @@ def __exec(name): + return os.path.join(settings.pacemaker_binaries, name) + + def get_cluster_status_xml(runner): +- output, retval = runner.run( ++ stdout, stderr, retval = runner.run( + [__exec("crm_mon"), "--one-shot", "--as-xml", "--inactive"] + ) + if retval != 0: + raise CrmMonErrorException( +- reports.cluster_state_cannot_load(retval, output) ++ reports.cluster_state_cannot_load(join_multilines([stderr, stdout])) + ) +- return output ++ return stdout + + def get_cib_xml(runner, scope=None): + command = [__exec("cibadmin"), "--local", "--query"] + if scope: + command.append("--scope={0}".format(scope)) +- output, retval = runner.run(command) ++ stdout, stderr, retval = runner.run(command) + if retval != 0: + if retval == __EXITCODE_CIB_SCOPE_VALID_BUT_NOT_PRESENT and scope: + raise LibraryError( +- reports.cib_load_error_scope_missing(scope, retval, output) ++ reports.cib_load_error_scope_missing( ++ scope, ++ join_multilines([stderr, stdout]) ++ ) + ) + else: +- raise LibraryError(reports.cib_load_error(retval, output)) +- return output ++ raise LibraryError( ++ reports.cib_load_error(join_multilines([stderr, stdout])) ++ ) ++ return stdout + + def get_cib(xml): + try: +@@ -59,9 +65,9 @@ def replace_cib_configuration_xml(runner, xml, cib_upgraded=False): + cmd = [__exec("cibadmin"), "--replace", "--verbose", "--xml-pipe"] + if not cib_upgraded: + cmd += ["--scope", "configuration"] +- output, retval = runner.run(cmd, stdin_string=xml) ++ stdout, stderr, retval = runner.run(cmd, stdin_string=xml) + if retval != 0: +- raise LibraryError(reports.cib_push_error(retval, output)) ++ raise LibraryError(reports.cib_push_error(stderr, stdout)) + + def replace_cib_configuration(runner, tree, cib_upgraded=False): + #etree returns bytes: b'xml' +@@ -108,13 +114,18 @@ def resource_cleanup(runner, resource=None, node=None, force=False): + if node: + cmd.extend(["--node", node]) + +- output, retval = runner.run(cmd) ++ stdout, stderr, retval = runner.run(cmd) + + if retval != 0: + raise LibraryError( +- reports.resource_cleanup_error(retval, output, resource, node) ++ reports.resource_cleanup_error( ++ join_multilines([stderr, stdout]), ++ resource, ++ node ++ ) + ) +- return output ++ # usefull output (what has been done) goes to stderr ++ return join_multilines([stdout, stderr]) + + def nodes_standby(runner, node_list=None, all_nodes=False): + return __nodes_standby_unstandby(runner, True, node_list, all_nodes) +@@ -124,8 +135,11 @@ def nodes_unstandby(runner, node_list=None, all_nodes=False): + + def has_resource_wait_support(runner): + # returns 1 on success so we don't care about retval +- output, dummy_retval = runner.run([__exec("crm_resource"), "-?"]) +- return "--wait" in output ++ stdout, stderr, dummy_retval = runner.run( ++ [__exec("crm_resource"), "-?"] ++ ) ++ # help goes to stderr but we check stdout as well if that gets changed ++ return "--wait" in stderr or "--wait" in stdout + + def ensure_resource_wait_support(runner): + if not has_resource_wait_support(runner): +@@ -135,15 +149,22 @@ def wait_for_resources(runner, timeout=None): + args = [__exec("crm_resource"), "--wait"] + if timeout is not None: + args.append("--timeout={0}".format(timeout)) +- output, retval = runner.run(args) ++ stdout, stderr, retval = runner.run(args) + if retval != 0: ++ # Usefull info goes to stderr - not only error messages, a list of ++ # pending actions in case of timeout goes there as well. ++ # We use stdout just to be sure if that's get changed. + if retval == __EXITCODE_WAIT_TIMEOUT: + raise LibraryError( +- reports.resource_wait_timed_out(retval, output.strip()) ++ reports.resource_wait_timed_out( ++ join_multilines([stderr, stdout]) ++ ) + ) + else: + raise LibraryError( +- reports.resource_wait_error(retval, output.strip()) ++ reports.resource_wait_error( ++ join_multilines([stderr, stdout]) ++ ) + ) + + def __nodes_standby_unstandby( +@@ -178,9 +199,11 @@ def __nodes_standby_unstandby( + cmd_list.append(cmd_template) + report = [] + for cmd in cmd_list: +- output, retval = runner.run(cmd) ++ stdout, stderr, retval = runner.run(cmd) + if retval != 0: +- report.append(reports.common_error(output)) ++ report.append( ++ reports.common_error(join_multilines([stderr, stdout])) ++ ) + if report: + raise LibraryError(*report) + +@@ -189,21 +212,23 @@ def __get_local_node_name(runner): + # but it returns false names when cluster is not running (or we are on + # a remote node). Getting node id first is reliable since it fails in those + # cases. +- output, retval = runner.run([__exec("crm_node"), "--cluster-id"]) ++ stdout, dummy_stderr, retval = runner.run( ++ [__exec("crm_node"), "--cluster-id"] ++ ) + if retval != 0: + raise LibraryError( + reports.pacemaker_local_node_name_not_found("node id not found") + ) +- node_id = output.strip() ++ node_id = stdout.strip() + +- output, retval = runner.run( ++ stdout, dummy_stderr, retval = runner.run( + [__exec("crm_node"), "--name-for-id={0}".format(node_id)] + ) + if retval != 0: + raise LibraryError( + reports.pacemaker_local_node_name_not_found("node name not found") + ) +- node_name = output.strip() ++ node_name = stdout.strip() + + if node_name == "(null)": + raise LibraryError( +diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py +index a701679..b9e9a66 100644 +--- a/pcs/lib/reports.py ++++ b/pcs/lib/reports.py +@@ -262,21 +262,24 @@ def run_external_process_started(command, stdin): + } + ) + +-def run_external_process_finished(command, retval, stdout): ++def run_external_process_finished(command, retval, stdout, stderr): + """ + information about result of running an external process + command string the external process command + retval external process's return (exit) code + stdout string external process's stdout ++ stderr string external process's stderr + """ + return ReportItem.debug( + report_codes.RUN_EXTERNAL_PROCESS_FINISHED, + "Finished running: {command}\nReturn value: {return_value}" +- + "\n--Debug Output Start--\n{stdout}\n--Debug Output End--\n", ++ + "\n--Debug Stdout Start--\n{stdout}\n--Debug Stdout End--" ++ + "\n--Debug Stderr Start--\n{stderr}\n--Debug Stderr End--\n", + info={ + "command": command, + "return_value": retval, + "stdout": stdout, ++ "stderr": stderr, + } + ) + +@@ -854,6 +857,23 @@ def qdevice_get_status_error(model, reason): + } + ) + ++def qdevice_used_by_clusters( ++ clusters, severity=ReportItemSeverity.ERROR, forceable=None ++): ++ """ ++ Qdevice is currently being used by clusters, cannot stop it unless forced ++ """ ++ return ReportItem( ++ report_codes.QDEVICE_USED_BY_CLUSTERS, ++ severity, ++ "Quorum device is currently being used by cluster(s): {clusters_str}", ++ info={ ++ "clusters": clusters, ++ "clusters_str": ", ".join(clusters), ++ }, ++ forceable=forceable ++ ) ++ + def cman_unsupported_command(): + """ + requested library command is not available as local cluster is CMAN based +@@ -903,35 +923,31 @@ def resource_does_not_exist(resource_id): + } + ) + +-def cib_load_error(retval, stdout): ++def cib_load_error(reason): + """ + cannot load cib from cibadmin, cibadmin exited with non-zero code +- retval external process's return (exit) code +- stdout string external process's stdout ++ string reason error description + """ + return ReportItem.error( + report_codes.CIB_LOAD_ERROR, + "unable to get cib", + info={ +- "return_value": retval, +- "stdout": stdout, ++ "reason": reason, + } + ) + +-def cib_load_error_scope_missing(scope, retval, stdout): ++def cib_load_error_scope_missing(scope, reason): + """ + cannot load cib from cibadmin, specified scope is missing in the cib + scope string requested cib scope +- retval external process's return (exit) code +- stdout string external process's stdout ++ string reason error description + """ + return ReportItem.error( + report_codes.CIB_LOAD_ERROR_SCOPE_MISSING, + "unable to get cib, scope '{scope}' not present in cib", + info={ + "scope": scope, +- "return_value": retval, +- "stdout": stdout, ++ "reason": reason, + } + ) + +@@ -957,33 +973,31 @@ def cib_missing_mandatory_section(section_name): + } + ) + +-def cib_push_error(retval, stdout): ++def cib_push_error(reason, pushed_cib): + """ + cannot push cib to cibadmin, cibadmin exited with non-zero code +- retval external process's return (exit) code +- stdout string external process's stdout ++ string reason error description ++ string pushed_cib cib which failed to be pushed + """ + return ReportItem.error( + report_codes.CIB_PUSH_ERROR, +- "Unable to update cib\n{stdout}", ++ "Unable to update cib\n{reason}\n{pushed_cib}", + info={ +- "return_value": retval, +- "stdout": stdout, ++ "reason": reason, ++ "pushed_cib": pushed_cib, + } + ) + +-def cluster_state_cannot_load(retval, stdout): ++def cluster_state_cannot_load(reason): + """ + cannot load cluster status from crm_mon, crm_mon exited with non-zero code +- retval external process's return (exit) code +- stdout string external process's stdout ++ string reason error description + """ + return ReportItem.error( + report_codes.CRM_MON_ERROR, + "error running crm_mon, is pacemaker running?", + info={ +- "return_value": retval, +- "stdout": stdout, ++ "reason": reason, + } + ) + +@@ -1005,57 +1019,50 @@ def resource_wait_not_supported(): + "crm_resource does not support --wait, please upgrade pacemaker" + ) + +-def resource_wait_timed_out(retval, stdout): ++def resource_wait_timed_out(reason): + """ + waiting for resources (crm_resource --wait) failed, timeout expired +- retval external process's return (exit) code +- stdout string external process's stdout ++ string reason error description + """ + return ReportItem.error( + report_codes.RESOURCE_WAIT_TIMED_OUT, +- "waiting timeout\n\n{stdout}", ++ "waiting timeout\n\n{reason}", + info={ +- "return_value": retval, +- "stdout": stdout, ++ "reason": reason, + } + ) + +-def resource_wait_error(retval, stdout): ++def resource_wait_error(reason): + """ + waiting for resources (crm_resource --wait) failed +- retval external process's return (exit) code +- stdout string external process's stdout ++ string reason error description + """ + return ReportItem.error( + report_codes.RESOURCE_WAIT_ERROR, +- "{stdout}", ++ "{reason}", + info={ +- "return_value": retval, +- "stdout": stdout, ++ "reason": reason, + } + ) + +-def resource_cleanup_error(retval, stdout, resource=None, node=None): ++def resource_cleanup_error(reason, resource=None, node=None): + """ + an error occured when deleting resource history in pacemaker +- retval external process's return (exit) code +- stdout string external process's stdout +- resource string resource which has been cleaned up +- node string node which has been cleaned up ++ string reason error description ++ string resource resource which has been cleaned up ++ string node node which has been cleaned up + """ + if resource: +- text = "Unable to cleanup resource: {resource}\n{stdout}" ++ text = "Unable to cleanup resource: {resource}\n{reason}" + else: + text = ( +- "Unexpected error occured. 'crm_resource -C' err_code: " +- + "{return_value}\n{stdout}" ++ "Unexpected error occured. 'crm_resource -C' error:\n{reason}" + ) + return ReportItem.error( + report_codes.RESOURCE_CLEANUP_ERROR, + text, + info={ +- "return_value": retval, +- "stdout": stdout, ++ "reason": reason, + "resource": resource, + "node": node, + } +diff --git a/pcs/lib/resource_agent.py b/pcs/lib/resource_agent.py +index ea93875..d49b5c0 100644 +--- a/pcs/lib/resource_agent.py ++++ b/pcs/lib/resource_agent.py +@@ -125,14 +125,14 @@ def _get_pcmk_advanced_stonith_parameters(runner): + """ + @simple_cache + def __get_stonithd_parameters(): +- output, retval = runner.run( +- [settings.stonithd_binary, "metadata"], ignore_stderr=True ++ stdout, stderr, dummy_retval = runner.run( ++ [settings.stonithd_binary, "metadata"] + ) +- if output.strip() == "": +- raise UnableToGetAgentMetadata("stonithd", output) ++ if stdout.strip() == "": ++ raise UnableToGetAgentMetadata("stonithd", stderr) + + try: +- params = _get_agent_parameters(etree.fromstring(output)) ++ params = _get_agent_parameters(etree.fromstring(stdout)) + for param in params: + param["longdesc"] = "{0}\n{1}".format( + param["shortdesc"], param["longdesc"] +@@ -166,15 +166,15 @@ def get_fence_agent_metadata(runner, fence_agent): + ): + raise AgentNotFound(fence_agent) + +- output, retval = runner.run( +- [script_path, "-o", "metadata"], ignore_stderr=True ++ stdout, stderr, dummy_retval = runner.run( ++ [script_path, "-o", "metadata"] + ) + +- if output.strip() == "": +- raise UnableToGetAgentMetadata(fence_agent, output) ++ if stdout.strip() == "": ++ raise UnableToGetAgentMetadata(fence_agent, stderr) + + try: +- return etree.fromstring(output) ++ return etree.fromstring(stdout) + except etree.XMLSyntaxError as e: + raise UnableToGetAgentMetadata(fence_agent, str(e)) + +@@ -219,17 +219,16 @@ def _get_ocf_resource_agent_metadata(runner, provider, agent): + if not __is_path_abs(script_path) or not is_path_runnable(script_path): + raise AgentNotFound(agent_name) + +- output, retval = runner.run( ++ stdout, stderr, dummy_retval = runner.run( + [script_path, "meta-data"], +- env_extend={"OCF_ROOT": settings.ocf_root}, +- ignore_stderr=True ++ env_extend={"OCF_ROOT": settings.ocf_root} + ) + +- if output.strip() == "": +- raise UnableToGetAgentMetadata(agent_name, output) ++ if stdout.strip() == "": ++ raise UnableToGetAgentMetadata(agent_name, stderr) + + try: +- return etree.fromstring(output) ++ return etree.fromstring(stdout) + except etree.XMLSyntaxError as e: + raise UnableToGetAgentMetadata(agent_name, str(e)) + +diff --git a/pcs/lib/sbd.py b/pcs/lib/sbd.py +index 39de740..9b57400 100644 +--- a/pcs/lib/sbd.py ++++ b/pcs/lib/sbd.py +@@ -115,11 +115,11 @@ def atb_has_to_be_enabled(runner, corosync_conf_facade, node_number_modifier=0): + node. + """ + return ( ++ not corosync_conf_facade.is_enabled_auto_tie_breaker() ++ and + is_auto_tie_breaker_needed( + runner, corosync_conf_facade, node_number_modifier + ) +- and +- not corosync_conf_facade.is_enabled_auto_tie_breaker() + ) + + +diff --git a/pcs/qdevice.py b/pcs/qdevice.py +index 0037704..2591bae 100644 +--- a/pcs/qdevice.py ++++ b/pcs/qdevice.py +@@ -92,7 +92,7 @@ def qdevice_destroy_cmd(lib, argv, modifiers): + if len(argv) != 1: + raise CmdLineInputError() + model = argv[0] +- lib.qdevice.destroy(model) ++ lib.qdevice.destroy(model, modifiers["force"]) + + def qdevice_start_cmd(lib, argv, modifiers): + if len(argv) != 1: +@@ -104,7 +104,7 @@ def qdevice_stop_cmd(lib, argv, modifiers): + if len(argv) != 1: + raise CmdLineInputError() + model = argv[0] +- lib.qdevice.stop(model) ++ lib.qdevice.stop(model, modifiers["force"]) + + def qdevice_kill_cmd(lib, argv, modifiers): + if len(argv) != 1: +diff --git a/pcs/test/resources/corosync-qdevice.conf b/pcs/test/resources/corosync-qdevice.conf +new file mode 100644 +index 0000000..38998e7 +--- /dev/null ++++ b/pcs/test/resources/corosync-qdevice.conf +@@ -0,0 +1,34 @@ ++totem { ++ version: 2 ++ secauth: off ++ cluster_name: test99 ++ transport: udpu ++} ++ ++nodelist { ++ node { ++ ring0_addr: rh7-1 ++ nodeid: 1 ++ } ++ ++ node { ++ ring0_addr: rh7-2 ++ nodeid: 2 ++ } ++} ++ ++quorum { ++ provider: corosync_votequorum ++ ++ device { ++ model: net ++ ++ net { ++ host: 127.0.0.1 ++ } ++ } ++} ++ ++logging { ++ to_syslog: yes ++} +diff --git a/pcs/test/test_common_tools.py b/pcs/test/test_common_tools.py +index 5290e6d..d9b6af3 100644 +--- a/pcs/test/test_common_tools.py ++++ b/pcs/test/test_common_tools.py +@@ -63,3 +63,35 @@ class RunParallelTestCase(TestCase): + elapsed_time = finish_time - start_time + self.assertTrue(elapsed_time > x) + self.assertTrue(elapsed_time < sum([i + 1 for i in range(x)])) ++ ++ ++class JoinMultilinesTest(TestCase): ++ def test_empty_input(self): ++ self.assertEqual( ++ "", ++ tools.join_multilines([]) ++ ) ++ ++ def test_two_strings(self): ++ self.assertEqual( ++ "a\nb", ++ tools.join_multilines(["a", "b"]) ++ ) ++ ++ def test_strip(self): ++ self.assertEqual( ++ "a\nb", ++ tools.join_multilines([" a\n", " b\n"]) ++ ) ++ ++ def test_skip_empty(self): ++ self.assertEqual( ++ "a\nb", ++ tools.join_multilines([" a\n", " \n", " b\n"]) ++ ) ++ ++ def test_multiline(self): ++ self.assertEqual( ++ "a\nA\nb\nB", ++ tools.join_multilines(["a\nA\n", "b\nB\n"]) ++ ) +diff --git a/pcs/test/test_lib_cib_tools.py b/pcs/test/test_lib_cib_tools.py +index ffc2642..ec9c312 100644 +--- a/pcs/test/test_lib_cib_tools.py ++++ b/pcs/test/test_lib_cib_tools.py +@@ -383,7 +383,7 @@ class UpgradeCibTest(TestCase): + mock_file.name = "mock_file_name" + mock_file.read.return_value = "<cib/>" + mock_named_file.return_value = mock_file +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("", "", 0) + assert_xml_equal( + "<cib/>", + etree.tostring( +@@ -408,13 +408,15 @@ class UpgradeCibTest(TestCase): + mock_file = mock.MagicMock() + mock_file.name = "mock_file_name" + mock_named_file.return_value = mock_file +- self.mock_runner.run.return_value = ("reason", 1) ++ self.mock_runner.run.return_value = ("some info", "some error", 1) + assert_raise_library_error( + lambda: lib.upgrade_cib(etree.XML("<old_cib/>"), self.mock_runner), + ( + severities.ERROR, + report_codes.CIB_UPGRADE_FAILED, +- {"reason": "reason"} ++ { ++ "reason": "some error\nsome info", ++ } + ) + ) + mock_named_file.assert_called_once_with("w+", suffix=".pcs") +@@ -434,7 +436,7 @@ class UpgradeCibTest(TestCase): + mock_file.name = "mock_file_name" + mock_file.read.return_value = "not xml" + mock_named_file.return_value = mock_file +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("", "", 0) + assert_raise_library_error( + lambda: lib.upgrade_cib(etree.XML("<old_cib/>"), self.mock_runner), + ( +diff --git a/pcs/test/test_lib_commands_qdevice.py b/pcs/test/test_lib_commands_qdevice.py +index 10841e9..756afa8 100644 +--- a/pcs/test/test_lib_commands_qdevice.py ++++ b/pcs/test/test_lib_commands_qdevice.py +@@ -345,6 +345,7 @@ class QdeviceNetSetupTest(QdeviceTestCase): + ) + + ++@mock.patch("pcs.lib.corosync.qdevice_net.qdevice_status_cluster_text") + @mock.patch("pcs.lib.external.stop_service") + @mock.patch("pcs.lib.external.disable_service") + @mock.patch("pcs.lib.commands.qdevice.qdevice_net.qdevice_destroy") +@@ -355,7 +356,11 @@ class QdeviceNetSetupTest(QdeviceTestCase): + lambda self: "mock_runner" + ) + class QdeviceNetDestroyTest(QdeviceTestCase): +- def test_success(self, mock_net_destroy, mock_net_disable, mock_net_stop): ++ def test_success_not_used( ++ self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status ++ ): ++ mock_status.return_value = "" ++ + lib.qdevice_destroy(self.lib_env, "net") + + mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd") +@@ -398,9 +403,85 @@ class QdeviceNetDestroyTest(QdeviceTestCase): + ] + ) + ++ def test_success_used_forced( ++ self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status ++ ): ++ mock_status.return_value = 'Cluster "a_cluster":\n' ++ ++ lib.qdevice_destroy(self.lib_env, "net", proceed_if_used=True) ++ ++ mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd") ++ mock_net_disable.assert_called_once_with( ++ "mock_runner", ++ "corosync-qnetd" ++ ) ++ mock_net_destroy.assert_called_once_with() ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.WARNING, ++ report_codes.QDEVICE_USED_BY_CLUSTERS, ++ { ++ "clusters": ["a_cluster"], ++ } ++ ), ++ ( ++ severity.INFO, ++ report_codes.SERVICE_STOP_STARTED, ++ { ++ "service": "quorum device", ++ } ++ ), ++ ( ++ severity.INFO, ++ report_codes.SERVICE_STOP_SUCCESS, ++ { ++ "service": "quorum device", ++ } ++ ), ++ ( ++ severity.INFO, ++ report_codes.SERVICE_DISABLE_SUCCESS, ++ { ++ "service": "quorum device", ++ } ++ ), ++ ( ++ severity.INFO, ++ report_codes.QDEVICE_DESTROY_SUCCESS, ++ { ++ "model": "net", ++ } ++ ) ++ ] ++ ) ++ ++ def test_used_not_forced( ++ self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status ++ ): ++ mock_status.return_value = 'Cluster "a_cluster":\n' ++ ++ assert_raise_library_error( ++ lambda: lib.qdevice_destroy(self.lib_env, "net"), ++ ( ++ severity.ERROR, ++ report_codes.QDEVICE_USED_BY_CLUSTERS, ++ { ++ "clusters": ["a_cluster"], ++ }, ++ report_codes.FORCE_QDEVICE_USED ++ ), ++ ) ++ ++ mock_net_stop.assert_not_called() ++ mock_net_disable.assert_not_called() ++ mock_net_destroy.assert_not_called() ++ + def test_stop_failed( +- self, mock_net_destroy, mock_net_disable, mock_net_stop ++ self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status + ): ++ mock_status.return_value = "" + mock_net_stop.side_effect = StopServiceError( + "test service", + "test error" +@@ -435,8 +516,9 @@ class QdeviceNetDestroyTest(QdeviceTestCase): + ) + + def test_disable_failed( +- self, mock_net_destroy, mock_net_disable, mock_net_stop ++ self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status + ): ++ mock_status.return_value = "" + mock_net_disable.side_effect = DisableServiceError( + "test service", + "test error" +@@ -481,8 +563,9 @@ class QdeviceNetDestroyTest(QdeviceTestCase): + ) + + def test_destroy_failed( +- self, mock_net_destroy, mock_net_disable, mock_net_stop ++ self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status + ): ++ mock_status.return_value = "" + mock_net_destroy.side_effect = LibraryError("mock_report_item") + + self.assertRaises( +@@ -755,6 +838,7 @@ class QdeviceNetStartTest(QdeviceTestCase): + ) + + ++@mock.patch("pcs.lib.corosync.qdevice_net.qdevice_status_cluster_text") + @mock.patch("pcs.lib.external.stop_service") + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) + @mock.patch.object( +@@ -763,13 +847,49 @@ class QdeviceNetStartTest(QdeviceTestCase): + lambda self: "mock_runner" + ) + class QdeviceNetStopTest(QdeviceTestCase): +- def test_success(self, mock_net_stop): +- lib.qdevice_stop(self.lib_env, "net") ++ def test_success_not_used(self, mock_net_stop, mock_status): ++ mock_status.return_value = "" ++ ++ lib.qdevice_stop(self.lib_env, "net", proceed_if_used=False) ++ ++ mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd") ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.SERVICE_STOP_STARTED, ++ { ++ "service": "quorum device", ++ } ++ ), ++ ( ++ severity.INFO, ++ report_codes.SERVICE_STOP_SUCCESS, ++ { ++ "service": "quorum device", ++ } ++ ) ++ ] ++ ) ++ ++ def test_success_used_forced(self, mock_net_stop, mock_status): ++ mock_status.return_value = 'Cluster "a_cluster":\n' ++ ++ lib.qdevice_stop(self.lib_env, "net", proceed_if_used=True) ++ + mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd") + assert_report_item_list_equal( + self.mock_reporter.report_item_list, + [ + ( ++ severity.WARNING, ++ report_codes.QDEVICE_USED_BY_CLUSTERS, ++ { ++ "clusters": ["a_cluster"], ++ } ++ ), ++ ( + severity.INFO, + report_codes.SERVICE_STOP_STARTED, + { +@@ -786,7 +906,28 @@ class QdeviceNetStopTest(QdeviceTestCase): + ] + ) + +- def test_failed(self, mock_net_stop): ++ def test_used_not_forced(self, mock_net_stop, mock_status): ++ mock_status.return_value = 'Cluster "a_cluster":\n' ++ ++ assert_raise_library_error( ++ lambda: lib.qdevice_stop( ++ self.lib_env, ++ "net", ++ proceed_if_used=False ++ ), ++ ( ++ severity.ERROR, ++ report_codes.QDEVICE_USED_BY_CLUSTERS, ++ { ++ "clusters": ["a_cluster"], ++ }, ++ report_codes.FORCE_QDEVICE_USED ++ ), ++ ) ++ mock_net_stop.assert_not_called() ++ ++ def test_failed(self, mock_net_stop, mock_status): ++ mock_status.return_value = "" + mock_net_stop.side_effect = StopServiceError( + "test service", + "test error" +diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py +index d7701af..1487eb4 100644 +--- a/pcs/test/test_lib_commands_quorum.py ++++ b/pcs/test/test_lib_commands_quorum.py +@@ -1579,10 +1579,14 @@ class RemoveDeviceTest(TestCase, CmanMixin): + mock_remote_stop.assert_not_called() + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) +- def test_success( ++ @mock.patch("pcs.lib.sbd.is_sbd_installed", lambda self: True) ++ @mock.patch("pcs.lib.sbd.is_sbd_enabled", lambda self: True) ++ def test_success_3nodes_sbd( + self, mock_remote_stop, mock_remote_disable, mock_remove_net, + mock_get_corosync, mock_push_corosync + ): ++ # nothing special needs to be done in regards of SBD if a cluster ++ # consists of odd number of nodes + original_conf = open(rc("corosync-3nodes-qdevice.conf")).read() + no_device_conf = open(rc("corosync-3nodes.conf")).read() + mock_get_corosync.return_value = original_conf +@@ -1619,10 +1623,106 @@ class RemoveDeviceTest(TestCase, CmanMixin): + self.assertEqual(3, len(mock_remote_stop.mock_calls)) + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) +- def test_success_file( ++ @mock.patch("pcs.lib.sbd.is_sbd_installed", lambda self: False) ++ @mock.patch("pcs.lib.sbd.is_sbd_enabled", lambda self: False) ++ def test_success_2nodes_no_sbd( ++ self, mock_remote_stop, mock_remote_disable, mock_remove_net, ++ mock_get_corosync, mock_push_corosync ++ ): ++ # cluster consists of two nodes, two_node must be set ++ original_conf = open(rc("corosync-qdevice.conf")).read() ++ no_device_conf = open(rc("corosync.conf")).read() ++ mock_get_corosync.return_value = original_conf ++ lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) ++ ++ lib.remove_device(lib_env) ++ ++ self.assertEqual(1, len(mock_push_corosync.mock_calls)) ++ ac( ++ mock_push_corosync.mock_calls[0][1][0].config.export(), ++ no_device_conf ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.INFO, ++ report_codes.SERVICE_DISABLE_STARTED, ++ { ++ "service": "corosync-qdevice", ++ } ++ ), ++ ( ++ severity.INFO, ++ report_codes.SERVICE_STOP_STARTED, ++ { ++ "service": "corosync-qdevice", ++ } ++ ), ++ ] ++ ) ++ self.assertEqual(1, len(mock_remove_net.mock_calls)) ++ self.assertEqual(2, len(mock_remote_disable.mock_calls)) ++ self.assertEqual(2, len(mock_remote_stop.mock_calls)) ++ ++ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) ++ @mock.patch("pcs.lib.sbd.is_sbd_installed", lambda self: True) ++ @mock.patch("pcs.lib.sbd.is_sbd_enabled", lambda self: True) ++ def test_success_2nodes_sbd( + self, mock_remote_stop, mock_remote_disable, mock_remove_net, + mock_get_corosync, mock_push_corosync + ): ++ # cluster consists of two nodes, but SBD is in use ++ # auto tie breaker must be enabled ++ original_conf = open(rc("corosync-qdevice.conf")).read() ++ no_device_conf = open(rc("corosync.conf")).read().replace( ++ "two_node: 1", ++ "auto_tie_breaker: 1" ++ ) ++ mock_get_corosync.return_value = original_conf ++ lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) ++ ++ lib.remove_device(lib_env) ++ ++ self.assertEqual(1, len(mock_push_corosync.mock_calls)) ++ ac( ++ mock_push_corosync.mock_calls[0][1][0].config.export(), ++ no_device_conf ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severity.WARNING, ++ report_codes.SBD_REQUIRES_ATB, ++ {} ++ ), ++ ( ++ severity.INFO, ++ report_codes.SERVICE_DISABLE_STARTED, ++ { ++ "service": "corosync-qdevice", ++ } ++ ), ++ ( ++ severity.INFO, ++ report_codes.SERVICE_STOP_STARTED, ++ { ++ "service": "corosync-qdevice", ++ } ++ ), ++ ] ++ ) ++ self.assertEqual(1, len(mock_remove_net.mock_calls)) ++ self.assertEqual(2, len(mock_remote_disable.mock_calls)) ++ self.assertEqual(2, len(mock_remote_stop.mock_calls)) ++ ++ @mock.patch("pcs.lib.sbd.atb_has_to_be_enabled") ++ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) ++ def test_success_file( ++ self, mock_atb_check, mock_remote_stop, mock_remote_disable, ++ mock_remove_net, mock_get_corosync, mock_push_corosync ++ ): + original_conf = open(rc("corosync-3nodes-qdevice.conf")).read() + no_device_conf = open(rc("corosync-3nodes.conf")).read() + mock_get_corosync.return_value = original_conf +@@ -1643,6 +1743,7 @@ class RemoveDeviceTest(TestCase, CmanMixin): + mock_remove_net.assert_not_called() + mock_remote_disable.assert_not_called() + mock_remote_stop.assert_not_called() ++ mock_atb_check.assert_not_called() + + + @mock.patch("pcs.lib.commands.quorum.qdevice_net.remote_client_destroy") +diff --git a/pcs/test/test_lib_corosync_live.py b/pcs/test/test_lib_corosync_live.py +index 3173195..f03d78b 100644 +--- a/pcs/test/test_lib_corosync_live.py ++++ b/pcs/test/test_lib_corosync_live.py +@@ -69,9 +69,10 @@ class ReloadConfigTest(TestCase): + + def test_success(self): + cmd_retval = 0 +- cmd_output = "cmd output" ++ cmd_stdout = "cmd output" ++ cmd_stderr = "" + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (cmd_output, cmd_retval) ++ mock_runner.run.return_value = (cmd_stdout, cmd_stderr, cmd_retval) + + lib.reload_config(mock_runner) + +@@ -81,9 +82,10 @@ class ReloadConfigTest(TestCase): + + def test_error(self): + cmd_retval = 1 +- cmd_output = "cmd output" ++ cmd_stdout = "cmd output" ++ cmd_stderr = "cmd error" + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (cmd_output, cmd_retval) ++ mock_runner.run.return_value = (cmd_stdout, cmd_stderr, cmd_retval) + + assert_raise_library_error( + lambda: lib.reload_config(mock_runner), +@@ -91,7 +93,7 @@ class ReloadConfigTest(TestCase): + severity.ERROR, + report_codes.COROSYNC_CONFIG_RELOAD_ERROR, + { +- "reason": cmd_output, ++ "reason": "\n".join([cmd_stderr, cmd_stdout]), + } + ) + ) +@@ -107,7 +109,7 @@ class GetQuorumStatusTextTest(TestCase): + self.quorum_tool = "/usr/sbin/corosync-quorumtool" + + def test_success(self): +- self.mock_runner.run.return_value = ("status info", 0) ++ self.mock_runner.run.return_value = ("status info", "", 0) + self.assertEqual( + "status info", + lib.get_quorum_status_text(self.mock_runner) +@@ -117,7 +119,7 @@ class GetQuorumStatusTextTest(TestCase): + ]) + + def test_success_with_retval_1(self): +- self.mock_runner.run.return_value = ("status info", 1) ++ self.mock_runner.run.return_value = ("status info", "", 1) + self.assertEqual( + "status info", + lib.get_quorum_status_text(self.mock_runner) +@@ -127,7 +129,7 @@ class GetQuorumStatusTextTest(TestCase): + ]) + + def test_error(self): +- self.mock_runner.run.return_value = ("status error", 2) ++ self.mock_runner.run.return_value = ("some info", "status error", 2) + assert_raise_library_error( + lambda: lib.get_quorum_status_text(self.mock_runner), + ( +@@ -152,9 +154,10 @@ class SetExpectedVotesTest(TestCase): + + def test_success(self): + cmd_retval = 0 +- cmd_output = "cmd output" ++ cmd_stdout = "cmd output" ++ cmd_stderr = "" + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (cmd_output, cmd_retval) ++ mock_runner.run.return_value = (cmd_stdout, cmd_stderr, cmd_retval) + + lib.set_expected_votes(mock_runner, 3) + +@@ -164,9 +167,10 @@ class SetExpectedVotesTest(TestCase): + + def test_error(self): + cmd_retval = 1 +- cmd_output = "cmd output" ++ cmd_stdout = "cmd output" ++ cmd_stderr = "cmd stderr" + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (cmd_output, cmd_retval) ++ mock_runner.run.return_value = (cmd_stdout, cmd_stderr, cmd_retval) + + assert_raise_library_error( + lambda: lib.set_expected_votes(mock_runner, 3), +@@ -174,7 +178,7 @@ class SetExpectedVotesTest(TestCase): + severity.ERROR, + report_codes.COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR, + { +- "reason": cmd_output, ++ "reason": cmd_stderr, + } + ) + ) +diff --git a/pcs/test/test_lib_corosync_qdevice_client.py b/pcs/test/test_lib_corosync_qdevice_client.py +index 0b5bd67..8c32c36 100644 +--- a/pcs/test/test_lib_corosync_qdevice_client.py ++++ b/pcs/test/test_lib_corosync_qdevice_client.py +@@ -23,7 +23,7 @@ class GetStatusTextTest(TestCase): + self.qdevice_tool = "/usr/sbin/corosync-qdevice-tool" + + def test_success(self): +- self.mock_runner.run.return_value = ("status info", 0) ++ self.mock_runner.run.return_value = ("status info", "", 0) + self.assertEqual( + "status info", + lib.get_status_text(self.mock_runner) +@@ -33,7 +33,7 @@ class GetStatusTextTest(TestCase): + ]) + + def test_success_verbose(self): +- self.mock_runner.run.return_value = ("status info", 0) ++ self.mock_runner.run.return_value = ("status info", "", 0) + self.assertEqual( + "status info", + lib.get_status_text(self.mock_runner, True) +@@ -43,14 +43,14 @@ class GetStatusTextTest(TestCase): + ]) + + def test_error(self): +- self.mock_runner.run.return_value = ("status error", 1) ++ self.mock_runner.run.return_value = ("some info", "status error", 1) + assert_raise_library_error( + lambda: lib.get_status_text(self.mock_runner), + ( + severity.ERROR, + report_codes.COROSYNC_QUORUM_GET_STATUS_ERROR, + { +- "reason": "status error", ++ "reason": "status error\nsome info", + } + ) + ) +diff --git a/pcs/test/test_lib_corosync_qdevice_net.py b/pcs/test/test_lib_corosync_qdevice_net.py +index 340a8dc..21c526b 100644 +--- a/pcs/test/test_lib_corosync_qdevice_net.py ++++ b/pcs/test/test_lib_corosync_qdevice_net.py +@@ -49,7 +49,7 @@ class QdeviceSetupTest(TestCase): + + def test_success(self, mock_is_dir_nonempty): + mock_is_dir_nonempty.return_value = False +- self.mock_runner.run.return_value = ("initialized", 0) ++ self.mock_runner.run.return_value = ("initialized", "", 0) + + lib.qdevice_setup(self.mock_runner) + +@@ -73,7 +73,7 @@ class QdeviceSetupTest(TestCase): + + def test_init_tool_fail(self, mock_is_dir_nonempty): + mock_is_dir_nonempty.return_value = False +- self.mock_runner.run.return_value = ("test error", 1) ++ self.mock_runner.run.return_value = ("stdout", "test error", 1) + + assert_raise_library_error( + lambda: lib.qdevice_setup(self.mock_runner), +@@ -82,7 +82,7 @@ class QdeviceSetupTest(TestCase): + report_codes.QDEVICE_INITIALIZATION_ERROR, + { + "model": "net", +- "reason": "test error", ++ "reason": "test error\nstdout", + } + ) + ) +@@ -126,7 +126,7 @@ class QdeviceStatusGenericTest(TestCase): + self.mock_runner = mock.MagicMock(spec_set=CommandRunner) + + def test_success(self): +- self.mock_runner.run.return_value = ("status info", 0) ++ self.mock_runner.run.return_value = ("status info", "", 0) + self.assertEqual( + "status info", + lib.qdevice_status_generic_text(self.mock_runner) +@@ -134,7 +134,7 @@ class QdeviceStatusGenericTest(TestCase): + self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-s"]) + + def test_success_verbose(self): +- self.mock_runner.run.return_value = ("status info", 0) ++ self.mock_runner.run.return_value = ("status info", "", 0) + self.assertEqual( + "status info", + lib.qdevice_status_generic_text(self.mock_runner, True) +@@ -142,7 +142,7 @@ class QdeviceStatusGenericTest(TestCase): + self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-s", "-v"]) + + def test_error(self): +- self.mock_runner.run.return_value = ("status error", 1) ++ self.mock_runner.run.return_value = ("some info", "status error", 1) + assert_raise_library_error( + lambda: lib.qdevice_status_generic_text(self.mock_runner), + ( +@@ -150,7 +150,7 @@ class QdeviceStatusGenericTest(TestCase): + report_codes.QDEVICE_GET_STATUS_ERROR, + { + "model": "net", +- "reason": "status error", ++ "reason": "status error\nsome info", + } + ) + ) +@@ -162,7 +162,7 @@ class QdeviceStatusClusterTest(TestCase): + self.mock_runner = mock.MagicMock(spec_set=CommandRunner) + + def test_success(self): +- self.mock_runner.run.return_value = ("status info", 0) ++ self.mock_runner.run.return_value = ("status info", "", 0) + self.assertEqual( + "status info", + lib.qdevice_status_cluster_text(self.mock_runner) +@@ -170,7 +170,7 @@ class QdeviceStatusClusterTest(TestCase): + self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l"]) + + def test_success_verbose(self): +- self.mock_runner.run.return_value = ("status info", 0) ++ self.mock_runner.run.return_value = ("status info", "", 0) + self.assertEqual( + "status info", + lib.qdevice_status_cluster_text(self.mock_runner, verbose=True) +@@ -178,7 +178,7 @@ class QdeviceStatusClusterTest(TestCase): + self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l", "-v"]) + + def test_success_cluster(self): +- self.mock_runner.run.return_value = ("status info", 0) ++ self.mock_runner.run.return_value = ("status info", "", 0) + self.assertEqual( + "status info", + lib.qdevice_status_cluster_text(self.mock_runner, "cluster") +@@ -188,7 +188,7 @@ class QdeviceStatusClusterTest(TestCase): + ]) + + def test_success_cluster_verbose(self): +- self.mock_runner.run.return_value = ("status info", 0) ++ self.mock_runner.run.return_value = ("status info", "", 0) + self.assertEqual( + "status info", + lib.qdevice_status_cluster_text(self.mock_runner, "cluster", True) +@@ -198,7 +198,7 @@ class QdeviceStatusClusterTest(TestCase): + ]) + + def test_error(self): +- self.mock_runner.run.return_value = ("status error", 1) ++ self.mock_runner.run.return_value = ("some info", "status error", 1) + assert_raise_library_error( + lambda: lib.qdevice_status_cluster_text(self.mock_runner), + ( +@@ -206,13 +206,63 @@ class QdeviceStatusClusterTest(TestCase): + report_codes.QDEVICE_GET_STATUS_ERROR, + { + "model": "net", +- "reason": "status error", ++ "reason": "status error\nsome info", + } + ) + ) + self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l"]) + + ++class QdeviceConnectedClustersTest(TestCase): ++ def test_empty_status(self): ++ status = "" ++ self.assertEqual( ++ [], ++ lib.qdevice_connected_clusters(status) ++ ) ++ ++ def test_one_cluster(self): ++ status = """\ ++Cluster "rhel72": ++ Algorithm: LMS ++ Tie-breaker: Node with lowest node ID ++ Node ID 2: ++ Client address: ::ffff:192.168.122.122:59738 ++ Configured node list: 1, 2 ++ Membership node list: 1, 2 ++ Vote: ACK (ACK) ++ Node ID 1: ++ Client address: ::ffff:192.168.122.121:43420 ++ Configured node list: 1, 2 ++ Membership node list: 1, 2 ++ Vote: ACK (ACK) ++""" ++ self.assertEqual( ++ ["rhel72"], ++ lib.qdevice_connected_clusters(status) ++ ) ++ ++ def test_more_clusters(self): ++ status = """\ ++Cluster "rhel72": ++Cluster "rhel73": ++""" ++ self.assertEqual( ++ ["rhel72", "rhel73"], ++ lib.qdevice_connected_clusters(status) ++ ) ++ ++ def test_invalid_status(self): ++ status = """\ ++Cluster: ++ Cluster "rhel72": ++""" ++ self.assertEqual( ++ [], ++ lib.qdevice_connected_clusters(status) ++ ) ++ ++ + @mock.patch("pcs.lib.corosync.qdevice_net._get_output_certificate") + @mock.patch("pcs.lib.corosync.qdevice_net._store_to_tmpfile") + class QdeviceSignCertificateRequestTest(CertificateTestCase): +@@ -222,7 +272,7 @@ class QdeviceSignCertificateRequestTest(CertificateTestCase): + ) + def test_success(self, mock_tmp_store, mock_get_cert): + mock_tmp_store.return_value = self.mock_tmpfile +- self.mock_runner.run.return_value = ("tool output", 0) ++ self.mock_runner.run.return_value = ("tool output", "", 0) + mock_get_cert.return_value = "new certificate".encode("utf-8") + + result = lib.qdevice_sign_certificate_request( +@@ -293,7 +343,7 @@ class QdeviceSignCertificateRequestTest(CertificateTestCase): + ) + def test_sign_error(self, mock_tmp_store, mock_get_cert): + mock_tmp_store.return_value = self.mock_tmpfile +- self.mock_runner.run.return_value = ("tool output error", 1) ++ self.mock_runner.run.return_value = ("stdout", "tool output error", 1) + + assert_raise_library_error( + lambda: lib.qdevice_sign_certificate_request( +@@ -305,7 +355,7 @@ class QdeviceSignCertificateRequestTest(CertificateTestCase): + severity.ERROR, + report_codes.QDEVICE_CERTIFICATE_SIGN_ERROR, + { +- "reason": "tool output error", ++ "reason": "tool output error\nstdout", + } + ) + ) +@@ -326,7 +376,7 @@ class QdeviceSignCertificateRequestTest(CertificateTestCase): + ) + def test_output_read_error(self, mock_tmp_store, mock_get_cert): + mock_tmp_store.return_value = self.mock_tmpfile +- self.mock_runner.run.return_value = ("tool output", 0) ++ self.mock_runner.run.return_value = ("tool output", "", 0) + mock_get_cert.side_effect = LibraryError + + self.assertRaises( +@@ -399,7 +449,7 @@ class ClientSetupTest(TestCase): + + @mock.patch("pcs.lib.corosync.qdevice_net.client_destroy") + def test_success(self, mock_destroy): +- self.mock_runner.run.return_value = ("tool output", 0) ++ self.mock_runner.run.return_value = ("tool output", "", 0) + + lib.client_setup(self.mock_runner, "certificate data".encode("utf-8")) + +@@ -414,7 +464,7 @@ class ClientSetupTest(TestCase): + + @mock.patch("pcs.lib.corosync.qdevice_net.client_destroy") + def test_init_error(self, mock_destroy): +- self.mock_runner.run.return_value = ("tool output error", 1) ++ self.mock_runner.run.return_value = ("stdout", "tool output error", 1) + + assert_raise_library_error( + lambda: lib.client_setup( +@@ -426,7 +476,7 @@ class ClientSetupTest(TestCase): + report_codes.QDEVICE_INITIALIZATION_ERROR, + { + "model": "net", +- "reason": "tool output error", ++ "reason": "tool output error\nstdout", + } + ) + ) +@@ -448,7 +498,7 @@ class ClientGenerateCertificateRequestTest(CertificateTestCase): + lambda: True + ) + def test_success(self, mock_get_cert): +- self.mock_runner.run.return_value = ("tool output", 0) ++ self.mock_runner.run.return_value = ("tool output", "", 0) + mock_get_cert.return_value = "new certificate".encode("utf-8") + + result = lib.client_generate_certificate_request( +@@ -492,7 +542,7 @@ class ClientGenerateCertificateRequestTest(CertificateTestCase): + lambda: True + ) + def test_tool_error(self, mock_get_cert): +- self.mock_runner.run.return_value = ("tool output error", 1) ++ self.mock_runner.run.return_value = ("stdout", "tool output error", 1) + + assert_raise_library_error( + lambda: lib.client_generate_certificate_request( +@@ -504,7 +554,7 @@ class ClientGenerateCertificateRequestTest(CertificateTestCase): + report_codes.QDEVICE_INITIALIZATION_ERROR, + { + "model": "net", +- "reason": "tool output error", ++ "reason": "tool output error\nstdout", + } + ) + ) +@@ -523,7 +573,7 @@ class ClientCertRequestToPk12Test(CertificateTestCase): + ) + def test_success(self, mock_tmp_store, mock_get_cert): + mock_tmp_store.return_value = self.mock_tmpfile +- self.mock_runner.run.return_value = ("tool output", 0) ++ self.mock_runner.run.return_value = ("tool output", "", 0) + mock_get_cert.return_value = "new certificate".encode("utf-8") + + result = lib.client_cert_request_to_pk12( +@@ -594,7 +644,7 @@ class ClientCertRequestToPk12Test(CertificateTestCase): + ) + def test_transform_error(self, mock_tmp_store, mock_get_cert): + mock_tmp_store.return_value = self.mock_tmpfile +- self.mock_runner.run.return_value = ("tool output error", 1) ++ self.mock_runner.run.return_value = ("stdout", "tool output error", 1) + + assert_raise_library_error( + lambda: lib.client_cert_request_to_pk12( +@@ -605,7 +655,7 @@ class ClientCertRequestToPk12Test(CertificateTestCase): + severity.ERROR, + report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR, + { +- "reason": "tool output error", ++ "reason": "tool output error\nstdout", + } + ) + ) +@@ -625,7 +675,7 @@ class ClientCertRequestToPk12Test(CertificateTestCase): + ) + def test_output_read_error(self, mock_tmp_store, mock_get_cert): + mock_tmp_store.return_value = self.mock_tmpfile +- self.mock_runner.run.return_value = ("tool output", 0) ++ self.mock_runner.run.return_value = ("tool output", "", 0) + mock_get_cert.side_effect = LibraryError + + self.assertRaises( +@@ -657,7 +707,7 @@ class ClientImportCertificateAndKeyTest(CertificateTestCase): + ) + def test_success(self, mock_tmp_store): + mock_tmp_store.return_value = self.mock_tmpfile +- self.mock_runner.run.return_value = ("tool output", 0) ++ self.mock_runner.run.return_value = ("tool output", "", 0) + + lib.client_import_certificate_and_key( + self.mock_runner, +@@ -721,7 +771,7 @@ class ClientImportCertificateAndKeyTest(CertificateTestCase): + ) + def test_import_error(self, mock_tmp_store): + mock_tmp_store.return_value = self.mock_tmpfile +- self.mock_runner.run.return_value = ("tool output error", 1) ++ self.mock_runner.run.return_value = ("stdout", "tool output error", 1) + + assert_raise_library_error( + lambda: lib.client_import_certificate_and_key( +@@ -732,7 +782,7 @@ class ClientImportCertificateAndKeyTest(CertificateTestCase): + severity.ERROR, + report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR, + { +- "reason": "tool output error", ++ "reason": "tool output error\nstdout", + } + ) + ) +diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py +index aafbe85..d37747a 100644 +--- a/pcs/test/test_lib_external.py ++++ b/pcs/test/test_lib_external.py +@@ -57,19 +57,23 @@ class CommandRunnerTest(TestCase): + self.assertEqual(filtered_kwargs, kwargs) + + def test_basic(self, mock_popen): +- expected_output = "expected output" ++ expected_stdout = "expected stdout" ++ expected_stderr = "expected stderr" + expected_retval = 123 + command = ["a_command"] + command_str = "a_command" + mock_process = mock.MagicMock(spec_set=["communicate", "returncode"]) +- mock_process.communicate.return_value = (expected_output, "dummy") ++ mock_process.communicate.return_value = ( ++ expected_stdout, expected_stderr ++ ) + mock_process.returncode = expected_retval + mock_popen.return_value = mock_process + + runner = lib.CommandRunner(self.mock_logger, self.mock_reporter) +- real_output, real_retval = runner.run(command) ++ real_stdout, real_stderr, real_retval = runner.run(command) + +- self.assertEqual(real_output, expected_output) ++ self.assertEqual(real_stdout, expected_stdout) ++ self.assertEqual(real_stderr, expected_stderr) + self.assertEqual(real_retval, expected_retval) + mock_process.communicate.assert_called_once_with(None) + self.assert_popen_called_with( +@@ -82,9 +86,14 @@ class CommandRunnerTest(TestCase): + mock.call("""\ + Finished running: {0} + Return value: {1} +---Debug Output Start-- ++--Debug Stdout Start-- + {2} +---Debug Output End--""".format(command_str, expected_retval, expected_output)) ++--Debug Stdout End-- ++--Debug Stderr Start-- ++{3} ++--Debug Stderr End--""".format( ++ command_str, expected_retval, expected_stdout, expected_stderr ++ )) + ] + self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls)) + self.mock_logger.debug.assert_has_calls(logger_calls) +@@ -105,19 +114,23 @@ Return value: {1} + { + "command": command_str, + "return_value": expected_retval, +- "stdout": expected_output, ++ "stdout": expected_stdout, ++ "stderr": expected_stderr, + } + ) + ] + ) + + def test_env(self, mock_popen): +- expected_output = "expected output" ++ expected_stdout = "expected output" ++ expected_stderr = "expected stderr" + expected_retval = 123 + command = ["a_command"] + command_str = "a_command" + mock_process = mock.MagicMock(spec_set=["communicate", "returncode"]) +- mock_process.communicate.return_value = (expected_output, "dummy") ++ mock_process.communicate.return_value = ( ++ expected_stdout, expected_stderr ++ ) + mock_process.returncode = expected_retval + mock_popen.return_value = mock_process + +@@ -126,12 +139,13 @@ Return value: {1} + self.mock_reporter, + {"a": "a", "b": "b"} + ) +- real_output, real_retval = runner.run( ++ real_stdout, real_stderr, real_retval = runner.run( + command, + env_extend={"b": "B", "c": "C"} + ) + +- self.assertEqual(real_output, expected_output) ++ self.assertEqual(real_stdout, expected_stdout) ++ self.assertEqual(real_stderr, expected_stderr) + self.assertEqual(real_retval, expected_retval) + mock_process.communicate.assert_called_once_with(None) + self.assert_popen_called_with( +@@ -144,9 +158,14 @@ Return value: {1} + mock.call("""\ + Finished running: {0} + Return value: {1} +---Debug Output Start-- ++--Debug Stdout Start-- + {2} +---Debug Output End--""".format(command_str, expected_retval, expected_output)) ++--Debug Stdout End-- ++--Debug Stderr Start-- ++{3} ++--Debug Stderr End--""".format( ++ command_str, expected_retval, expected_stdout, expected_stderr ++ )) + ] + self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls)) + self.mock_logger.debug.assert_has_calls(logger_calls) +@@ -167,27 +186,34 @@ Return value: {1} + { + "command": command_str, + "return_value": expected_retval, +- "stdout": expected_output, ++ "stdout": expected_stdout, ++ "stderr": expected_stderr, + } + ) + ] + ) + + def test_stdin(self, mock_popen): +- expected_output = "expected output" ++ expected_stdout = "expected output" ++ expected_stderr = "expected stderr" + expected_retval = 123 + command = ["a_command"] + command_str = "a_command" + stdin = "stdin string" + mock_process = mock.MagicMock(spec_set=["communicate", "returncode"]) +- mock_process.communicate.return_value = (expected_output, "dummy") ++ mock_process.communicate.return_value = ( ++ expected_stdout, expected_stderr ++ ) + mock_process.returncode = expected_retval + mock_popen.return_value = mock_process + + runner = lib.CommandRunner(self.mock_logger, self.mock_reporter) +- real_output, real_retval = runner.run(command, stdin_string=stdin) ++ real_stdout, real_stderr, real_retval = runner.run( ++ command, stdin_string=stdin ++ ) + +- self.assertEqual(real_output, expected_output) ++ self.assertEqual(real_stdout, expected_stdout) ++ self.assertEqual(real_stderr, expected_stderr) + self.assertEqual(real_retval, expected_retval) + mock_process.communicate.assert_called_once_with(stdin) + self.assert_popen_called_with( +@@ -204,9 +230,14 @@ Running: {0} + mock.call("""\ + Finished running: {0} + Return value: {1} +---Debug Output Start-- ++--Debug Stdout Start-- + {2} +---Debug Output End--""".format(command_str, expected_retval, expected_output)) ++--Debug Stdout End-- ++--Debug Stderr Start-- ++{3} ++--Debug Stderr End--""".format( ++ command_str, expected_retval, expected_stdout, expected_stderr ++ )) + ] + self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls)) + self.mock_logger.debug.assert_has_calls(logger_calls) +@@ -227,7 +258,8 @@ Return value: {1} + { + "command": command_str, + "return_value": expected_retval, +- "stdout": expected_output, ++ "stdout": expected_stdout, ++ "stderr": expected_stderr, + } + ) + ] +@@ -957,7 +989,7 @@ class ParallelCommunicationHelperTest(TestCase): + class IsCmanClusterTest(TestCase): + def template_test(self, is_cman, corosync_output, corosync_retval=0): + mock_runner = mock.MagicMock(spec_set=lib.CommandRunner) +- mock_runner.run.return_value = (corosync_output, corosync_retval) ++ mock_runner.run.return_value = (corosync_output, "", corosync_retval) + self.assertEqual(is_cman, lib.is_cman_cluster(mock_runner)) + mock_runner.run.assert_called_once_with([ + os.path.join(settings.corosync_binaries, "corosync"), +@@ -1021,7 +1053,7 @@ class DisableServiceTest(TestCase): + def test_systemctl(self, mock_is_installed, mock_systemctl): + mock_is_installed.return_value = True + mock_systemctl.return_value = True +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("", "Removed symlink", 0) + lib.disable_service(self.mock_runner, self.service) + self.mock_runner.run.assert_called_once_with( + ["systemctl", "disable", self.service + ".service"] +@@ -1030,7 +1062,7 @@ class DisableServiceTest(TestCase): + def test_systemctl_failed(self, mock_is_installed, mock_systemctl): + mock_is_installed.return_value = True + mock_systemctl.return_value = True +- self.mock_runner.run.return_value = ("", 1) ++ self.mock_runner.run.return_value = ("", "Failed", 1) + self.assertRaises( + lib.DisableServiceError, + lambda: lib.disable_service(self.mock_runner, self.service) +@@ -1042,7 +1074,7 @@ class DisableServiceTest(TestCase): + def test_not_systemctl(self, mock_is_installed, mock_systemctl): + mock_is_installed.return_value = True + mock_systemctl.return_value = False +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("", "", 0) + lib.disable_service(self.mock_runner, self.service) + self.mock_runner.run.assert_called_once_with( + ["chkconfig", self.service, "off"] +@@ -1051,7 +1083,7 @@ class DisableServiceTest(TestCase): + def test_not_systemctl_failed(self, mock_is_installed, mock_systemctl): + mock_is_installed.return_value = True + mock_systemctl.return_value = False +- self.mock_runner.run.return_value = ("", 1) ++ self.mock_runner.run.return_value = ("", "error", 1) + self.assertRaises( + lib.DisableServiceError, + lambda: lib.disable_service(self.mock_runner, self.service) +@@ -1079,7 +1111,7 @@ class DisableServiceTest(TestCase): + def test_instance_systemctl(self, mock_is_installed, mock_systemctl): + mock_is_installed.return_value = True + mock_systemctl.return_value = True +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("", "Removed symlink", 0) + lib.disable_service(self.mock_runner, self.service, instance="test") + self.mock_runner.run.assert_called_once_with([ + "systemctl", +@@ -1090,7 +1122,7 @@ class DisableServiceTest(TestCase): + def test_instance_not_systemctl(self, mock_is_installed, mock_systemctl): + mock_is_installed.return_value = True + mock_systemctl.return_value = False +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("", "", 0) + lib.disable_service(self.mock_runner, self.service, instance="test") + self.mock_runner.run.assert_called_once_with( + ["chkconfig", self.service, "off"] +@@ -1104,7 +1136,7 @@ class EnableServiceTest(TestCase): + + def test_systemctl(self, mock_systemctl): + mock_systemctl.return_value = True +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("", "Created symlink", 0) + lib.enable_service(self.mock_runner, self.service) + self.mock_runner.run.assert_called_once_with( + ["systemctl", "enable", self.service + ".service"] +@@ -1112,7 +1144,7 @@ class EnableServiceTest(TestCase): + + def test_systemctl_failed(self, mock_systemctl): + mock_systemctl.return_value = True +- self.mock_runner.run.return_value = ("", 1) ++ self.mock_runner.run.return_value = ("", "Failed", 1) + self.assertRaises( + lib.EnableServiceError, + lambda: lib.enable_service(self.mock_runner, self.service) +@@ -1123,7 +1155,7 @@ class EnableServiceTest(TestCase): + + def test_not_systemctl(self, mock_systemctl): + mock_systemctl.return_value = False +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("", "", 0) + lib.enable_service(self.mock_runner, self.service) + self.mock_runner.run.assert_called_once_with( + ["chkconfig", self.service, "on"] +@@ -1131,7 +1163,7 @@ class EnableServiceTest(TestCase): + + def test_not_systemctl_failed(self, mock_systemctl): + mock_systemctl.return_value = False +- self.mock_runner.run.return_value = ("", 1) ++ self.mock_runner.run.return_value = ("", "error", 1) + self.assertRaises( + lib.EnableServiceError, + lambda: lib.enable_service(self.mock_runner, self.service) +@@ -1142,7 +1174,7 @@ class EnableServiceTest(TestCase): + + def test_instance_systemctl(self, mock_systemctl): + mock_systemctl.return_value = True +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("", "Created symlink", 0) + lib.enable_service(self.mock_runner, self.service, instance="test") + self.mock_runner.run.assert_called_once_with([ + "systemctl", +@@ -1152,7 +1184,7 @@ class EnableServiceTest(TestCase): + + def test_instance_not_systemctl(self, mock_systemctl): + mock_systemctl.return_value = False +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("", "", 0) + lib.enable_service(self.mock_runner, self.service, instance="test") + self.mock_runner.run.assert_called_once_with( + ["chkconfig", self.service, "on"] +@@ -1167,7 +1199,7 @@ class StartServiceTest(TestCase): + + def test_systemctl(self, mock_systemctl): + mock_systemctl.return_value = True +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("", "", 0) + lib.start_service(self.mock_runner, self.service) + self.mock_runner.run.assert_called_once_with( + ["systemctl", "start", self.service + ".service"] +@@ -1175,7 +1207,7 @@ class StartServiceTest(TestCase): + + def test_systemctl_failed(self, mock_systemctl): + mock_systemctl.return_value = True +- self.mock_runner.run.return_value = ("", 1) ++ self.mock_runner.run.return_value = ("", "Failed", 1) + self.assertRaises( + lib.StartServiceError, + lambda: lib.start_service(self.mock_runner, self.service) +@@ -1186,7 +1218,7 @@ class StartServiceTest(TestCase): + + def test_not_systemctl(self, mock_systemctl): + mock_systemctl.return_value = False +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("Starting...", "", 0) + lib.start_service(self.mock_runner, self.service) + self.mock_runner.run.assert_called_once_with( + ["service", self.service, "start"] +@@ -1194,7 +1226,7 @@ class StartServiceTest(TestCase): + + def test_not_systemctl_failed(self, mock_systemctl): + mock_systemctl.return_value = False +- self.mock_runner.run.return_value = ("", 1) ++ self.mock_runner.run.return_value = ("", "unrecognized", 1) + self.assertRaises( + lib.StartServiceError, + lambda: lib.start_service(self.mock_runner, self.service) +@@ -1205,7 +1237,7 @@ class StartServiceTest(TestCase): + + def test_instance_systemctl(self, mock_systemctl): + mock_systemctl.return_value = True +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("", "", 0) + lib.start_service(self.mock_runner, self.service, instance="test") + self.mock_runner.run.assert_called_once_with([ + "systemctl", "start", "{0}@{1}.service".format(self.service, "test") +@@ -1213,7 +1245,7 @@ class StartServiceTest(TestCase): + + def test_instance_not_systemctl(self, mock_systemctl): + mock_systemctl.return_value = False +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("Starting...", "", 0) + lib.start_service(self.mock_runner, self.service, instance="test") + self.mock_runner.run.assert_called_once_with( + ["service", self.service, "start"] +@@ -1228,7 +1260,7 @@ class StopServiceTest(TestCase): + + def test_systemctl(self, mock_systemctl): + mock_systemctl.return_value = True +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("", "", 0) + lib.stop_service(self.mock_runner, self.service) + self.mock_runner.run.assert_called_once_with( + ["systemctl", "stop", self.service + ".service"] +@@ -1236,7 +1268,7 @@ class StopServiceTest(TestCase): + + def test_systemctl_failed(self, mock_systemctl): + mock_systemctl.return_value = True +- self.mock_runner.run.return_value = ("", 1) ++ self.mock_runner.run.return_value = ("", "Failed", 1) + self.assertRaises( + lib.StopServiceError, + lambda: lib.stop_service(self.mock_runner, self.service) +@@ -1247,7 +1279,7 @@ class StopServiceTest(TestCase): + + def test_not_systemctl(self, mock_systemctl): + mock_systemctl.return_value = False +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("Stopping...", "", 0) + lib.stop_service(self.mock_runner, self.service) + self.mock_runner.run.assert_called_once_with( + ["service", self.service, "stop"] +@@ -1255,7 +1287,7 @@ class StopServiceTest(TestCase): + + def test_not_systemctl_failed(self, mock_systemctl): + mock_systemctl.return_value = False +- self.mock_runner.run.return_value = ("", 1) ++ self.mock_runner.run.return_value = ("", "unrecognized", 1) + self.assertRaises( + lib.StopServiceError, + lambda: lib.stop_service(self.mock_runner, self.service) +@@ -1266,7 +1298,7 @@ class StopServiceTest(TestCase): + + def test_instance_systemctl(self, mock_systemctl): + mock_systemctl.return_value = True +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("", "", 0) + lib.stop_service(self.mock_runner, self.service, instance="test") + self.mock_runner.run.assert_called_once_with([ + "systemctl", "stop", "{0}@{1}.service".format(self.service, "test") +@@ -1274,7 +1306,7 @@ class StopServiceTest(TestCase): + + def test_instance_not_systemctl(self, mock_systemctl): + mock_systemctl.return_value = False +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("Stopping...", "", 0) + lib.stop_service(self.mock_runner, self.service, instance="test") + self.mock_runner.run.assert_called_once_with( + ["service", self.service, "stop"] +@@ -1287,14 +1319,14 @@ class KillServicesTest(TestCase): + self.services = ["service1", "service2"] + + def test_success(self): +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("", "", 0) + lib.kill_services(self.mock_runner, self.services) + self.mock_runner.run.assert_called_once_with( + ["killall", "--quiet", "--signal", "9", "--"] + self.services + ) + + def test_failed(self): +- self.mock_runner.run.return_value = ("error", 1) ++ self.mock_runner.run.return_value = ("", "error", 1) + self.assertRaises( + lib.KillServicesError, + lambda: lib.kill_services(self.mock_runner, self.services) +@@ -1304,7 +1336,7 @@ class KillServicesTest(TestCase): + ) + + def test_service_not_running(self): +- self.mock_runner.run.return_value = ("", 1) ++ self.mock_runner.run.return_value = ("", "", 1) + lib.kill_services(self.mock_runner, self.services) + self.mock_runner.run.assert_called_once_with( + ["killall", "--quiet", "--signal", "9", "--"] + self.services +@@ -1348,7 +1380,7 @@ class IsServiceEnabledTest(TestCase): + + def test_systemctl_enabled(self, mock_systemctl): + mock_systemctl.return_value = True +- self.mock_runner.run.return_value = ("enabled\n", 0) ++ self.mock_runner.run.return_value = ("enabled\n", "", 0) + self.assertTrue(lib.is_service_enabled(self.mock_runner, self.service)) + self.mock_runner.run.assert_called_once_with( + ["systemctl", "is-enabled", self.service + ".service"] +@@ -1356,7 +1388,7 @@ class IsServiceEnabledTest(TestCase): + + def test_systemctl_disabled(self, mock_systemctl): + mock_systemctl.return_value = True +- self.mock_runner.run.return_value = ("disabled\n", 2) ++ self.mock_runner.run.return_value = ("disabled\n", "", 2) + self.assertFalse(lib.is_service_enabled(self.mock_runner, self.service)) + self.mock_runner.run.assert_called_once_with( + ["systemctl", "is-enabled", self.service + ".service"] +@@ -1364,7 +1396,7 @@ class IsServiceEnabledTest(TestCase): + + def test_not_systemctl_enabled(self, mock_systemctl): + mock_systemctl.return_value = False +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("", "", 0) + self.assertTrue(lib.is_service_enabled(self.mock_runner, self.service)) + self.mock_runner.run.assert_called_once_with( + ["chkconfig", self.service] +@@ -1372,7 +1404,7 @@ class IsServiceEnabledTest(TestCase): + + def test_not_systemctl_disabled(self, mock_systemctl): + mock_systemctl.return_value = False +- self.mock_runner.run.return_value = ("", 3) ++ self.mock_runner.run.return_value = ("", "", 3) + self.assertFalse(lib.is_service_enabled(self.mock_runner, self.service)) + self.mock_runner.run.assert_called_once_with( + ["chkconfig", self.service] +@@ -1387,7 +1419,7 @@ class IsServiceRunningTest(TestCase): + + def test_systemctl_running(self, mock_systemctl): + mock_systemctl.return_value = True +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("active", "", 0) + self.assertTrue(lib.is_service_running(self.mock_runner, self.service)) + self.mock_runner.run.assert_called_once_with( + ["systemctl", "is-active", self.service + ".service"] +@@ -1395,7 +1427,7 @@ class IsServiceRunningTest(TestCase): + + def test_systemctl_not_running(self, mock_systemctl): + mock_systemctl.return_value = True +- self.mock_runner.run.return_value = ("", 2) ++ self.mock_runner.run.return_value = ("inactive", "", 2) + self.assertFalse(lib.is_service_running(self.mock_runner, self.service)) + self.mock_runner.run.assert_called_once_with( + ["systemctl", "is-active", self.service + ".service"] +@@ -1403,7 +1435,7 @@ class IsServiceRunningTest(TestCase): + + def test_not_systemctl_running(self, mock_systemctl): + mock_systemctl.return_value = False +- self.mock_runner.run.return_value = ("", 0) ++ self.mock_runner.run.return_value = ("is running", "", 0) + self.assertTrue(lib.is_service_running(self.mock_runner, self.service)) + self.mock_runner.run.assert_called_once_with( + ["service", self.service, "status"] +@@ -1411,7 +1443,7 @@ class IsServiceRunningTest(TestCase): + + def test_not_systemctl_not_running(self, mock_systemctl): + mock_systemctl.return_value = False +- self.mock_runner.run.return_value = ("", 3) ++ self.mock_runner.run.return_value = ("is stopped", "", 3) + self.assertFalse(lib.is_service_running(self.mock_runner, self.service)) + self.mock_runner.run.assert_called_once_with( + ["service", self.service, "status"] +@@ -1484,7 +1516,7 @@ sbd.service enabled + pacemaker.service enabled + + 3 unit files listed. +-""", 0) ++""", "", 0) + self.assertEqual( + lib.get_systemd_services(self.mock_runner), + ["pcsd", "sbd", "pacemaker"] +@@ -1496,7 +1528,7 @@ pacemaker.service enabled + + def test_failed(self, mock_is_systemctl): + mock_is_systemctl.return_value = True +- self.mock_runner.run.return_value = ("failed", 1) ++ self.mock_runner.run.return_value = ("stdout", "failed", 1) + self.assertEqual(lib.get_systemd_services(self.mock_runner), []) + self.assertEqual(mock_is_systemctl.call_count, 1) + self.mock_runner.run.assert_called_once_with( +@@ -1505,10 +1537,9 @@ pacemaker.service enabled + + def test_not_systemd(self, mock_is_systemctl): + mock_is_systemctl.return_value = False +- self.mock_runner.run.return_value = ("", 0) + self.assertEqual(lib.get_systemd_services(self.mock_runner), []) +- self.assertEqual(mock_is_systemctl.call_count, 1) +- self.assertEqual(self.mock_runner.call_count, 0) ++ mock_is_systemctl.assert_called_once_with() ++ self.mock_runner.assert_not_called() + + + @mock.patch("pcs.lib.external.is_systemctl") +@@ -1522,24 +1553,20 @@ class GetNonSystemdServicesTest(TestCase): + pcsd 0:off 1:off 2:on 3:on 4:on 5:on 6:off + sbd 0:off 1:on 2:on 3:on 4:on 5:on 6:off + pacemaker 0:off 1:off 2:off 3:off 4:off 5:off 6:off +-""", 0) ++""", "", 0) + self.assertEqual( + lib.get_non_systemd_services(self.mock_runner), + ["pcsd", "sbd", "pacemaker"] + ) + self.assertEqual(mock_is_systemctl.call_count, 1) +- self.mock_runner.run.assert_called_once_with( +- ["chkconfig"], ignore_stderr=True +- ) ++ self.mock_runner.run.assert_called_once_with(["chkconfig"]) + + def test_failed(self, mock_is_systemctl): + mock_is_systemctl.return_value = False +- self.mock_runner.run.return_value = ("failed", 1) ++ self.mock_runner.run.return_value = ("stdout", "failed", 1) + self.assertEqual(lib.get_non_systemd_services(self.mock_runner), []) + self.assertEqual(mock_is_systemctl.call_count, 1) +- self.mock_runner.run.assert_called_once_with( +- ["chkconfig"], ignore_stderr=True +- ) ++ self.mock_runner.run.assert_called_once_with(["chkconfig"]) + + def test_systemd(self, mock_is_systemctl): + mock_is_systemctl.return_value = True +diff --git a/pcs/test/test_lib_pacemaker.py b/pcs/test/test_lib_pacemaker.py +index c475db6..7ca7b77 100644 +--- a/pcs/test/test_lib_pacemaker.py ++++ b/pcs/test/test_lib_pacemaker.py +@@ -64,21 +64,31 @@ class LibraryPacemakerNodeStatusTest(LibraryPacemakerTest): + + class GetClusterStatusXmlTest(LibraryPacemakerTest): + def test_success(self): +- expected_xml = "<xml />" ++ expected_stdout = "<xml />" ++ expected_stderr = "" + expected_retval = 0 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_xml, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + real_xml = lib.get_cluster_status_xml(mock_runner) + + mock_runner.run.assert_called_once_with(self.crm_mon_cmd()) +- self.assertEqual(expected_xml, real_xml) ++ self.assertEqual(expected_stdout, real_xml) + + def test_error(self): +- expected_error = "some error" ++ expected_stdout = "some info" ++ expected_stderr = "some error" + expected_retval = 1 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_error, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + assert_raise_library_error( + lambda: lib.get_cluster_status_xml(mock_runner), +@@ -86,8 +96,7 @@ class GetClusterStatusXmlTest(LibraryPacemakerTest): + Severity.ERROR, + report_codes.CRM_MON_ERROR, + { +- "return_value": expected_retval, +- "stdout": expected_error, ++ "reason": expected_stderr + "\n" + expected_stdout, + } + ) + ) +@@ -96,23 +105,33 @@ class GetClusterStatusXmlTest(LibraryPacemakerTest): + + class GetCibXmlTest(LibraryPacemakerTest): + def test_success(self): +- expected_xml = "<xml />" ++ expected_stdout = "<xml />" ++ expected_stderr = "" + expected_retval = 0 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_xml, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + real_xml = lib.get_cib_xml(mock_runner) + + mock_runner.run.assert_called_once_with( + [self.path("cibadmin"), "--local", "--query"] + ) +- self.assertEqual(expected_xml, real_xml) ++ self.assertEqual(expected_stdout, real_xml) + + def test_error(self): +- expected_error = "some error" ++ expected_stdout = "some info" ++ expected_stderr = "some error" + expected_retval = 1 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_error, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + assert_raise_library_error( + lambda: lib.get_cib_xml(mock_runner), +@@ -120,8 +139,7 @@ class GetCibXmlTest(LibraryPacemakerTest): + Severity.ERROR, + report_codes.CIB_LOAD_ERROR, + { +- "return_value": expected_retval, +- "stdout": expected_error, ++ "reason": expected_stderr + "\n" + expected_stdout, + } + ) + ) +@@ -131,11 +149,16 @@ class GetCibXmlTest(LibraryPacemakerTest): + ) + + def test_success_scope(self): +- expected_xml = "<xml />" ++ expected_stdout = "<xml />" ++ expected_stderr = "" + expected_retval = 0 + scope = "test_scope" + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_xml, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + real_xml = lib.get_cib_xml(mock_runner, scope) + +@@ -145,14 +168,19 @@ class GetCibXmlTest(LibraryPacemakerTest): + "--local", "--query", "--scope={0}".format(scope) + ] + ) +- self.assertEqual(expected_xml, real_xml) ++ self.assertEqual(expected_stdout, real_xml) + + def test_scope_error(self): +- expected_error = "some error" ++ expected_stdout = "some info" ++ expected_stderr = "some error" + expected_retval = 6 + scope = "test_scope" + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_error, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + assert_raise_library_error( + lambda: lib.get_cib_xml(mock_runner, scope=scope), +@@ -161,8 +189,7 @@ class GetCibXmlTest(LibraryPacemakerTest): + report_codes.CIB_LOAD_ERROR_SCOPE_MISSING, + { + "scope": scope, +- "return_value": expected_retval, +- "stdout": expected_error, ++ "reason": expected_stderr + "\n" + expected_stdout, + } + ) + ) +@@ -194,10 +221,15 @@ class GetCibTest(LibraryPacemakerTest): + class ReplaceCibConfigurationTest(LibraryPacemakerTest): + def test_success(self): + xml = "<xml/>" +- expected_output = "expected output" ++ expected_stdout = "expected output" ++ expected_stderr = "" + expected_retval = 0 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_output, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + lib.replace_cib_configuration( + mock_runner, +@@ -214,10 +246,15 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest): + + def test_cib_upgraded(self): + xml = "<xml/>" +- expected_output = "expected output" ++ expected_stdout = "expected output" ++ expected_stderr = "" + expected_retval = 0 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_output, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + lib.replace_cib_configuration( + mock_runner, XmlManipulation.from_str(xml).tree, True +@@ -230,10 +267,15 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest): + + def test_error(self): + xml = "<xml/>" +- expected_error = "expected error" ++ expected_stdout = "expected output" ++ expected_stderr = "expected stderr" + expected_retval = 1 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_error, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + assert_raise_library_error( + lambda: lib.replace_cib_configuration( +@@ -245,8 +287,8 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest): + Severity.ERROR, + report_codes.CIB_PUSH_ERROR, + { +- "return_value": expected_retval, +- "stdout": expected_error, ++ "reason": expected_stderr, ++ "pushed_cib": expected_stdout, + } + ) + ) +@@ -261,10 +303,15 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest): + + class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest): + def test_offline(self): +- expected_error = "some error" ++ expected_stdout = "some info" ++ expected_stderr = "some error" + expected_retval = 1 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_error, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + self.assertEqual( + {"offline": True}, +@@ -273,10 +320,15 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest): + mock_runner.run.assert_called_once_with(self.crm_mon_cmd()) + + def test_invalid_status(self): +- expected_xml = "some error" ++ expected_stdout = "invalid xml" ++ expected_stderr = "" + expected_retval = 0 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_xml, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + assert_raise_library_error( + lambda: lib.get_local_node_status(mock_runner), +@@ -310,9 +362,9 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest): + ), + ] + return_value_list = [ +- (str(self.status), 0), +- (node_id, 0), +- (node_name, 0) ++ (str(self.status), "", 0), ++ (node_id, "", 0), ++ (node_name, "", 0) + ] + mock_runner.run.side_effect = return_value_list + +@@ -339,9 +391,9 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest): + ), + ] + return_value_list = [ +- (str(self.status), 0), +- (node_id, 0), +- (node_name_bad, 0) ++ (str(self.status), "", 0), ++ (node_id, "", 0), ++ (node_name_bad, "", 0) + ] + mock_runner.run.side_effect = return_value_list + +@@ -370,8 +422,8 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest): + mock.call([self.path("crm_node"), "--cluster-id"]), + ] + return_value_list = [ +- (str(self.status), 0), +- ("some error", 1), ++ (str(self.status), "", 0), ++ ("", "some error", 1), + ] + mock_runner.run.side_effect = return_value_list + +@@ -403,9 +455,9 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest): + ), + ] + return_value_list = [ +- (str(self.status), 0), +- (node_id, 0), +- ("some error", 1), ++ (str(self.status), "", 0), ++ (node_id, "", 0), ++ ("", "some error", 1), + ] + mock_runner.run.side_effect = return_value_list + +@@ -437,9 +489,9 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest): + ), + ] + return_value_list = [ +- (str(self.status), 0), +- (node_id, 0), +- ("(null)", 0), ++ (str(self.status), "", 0), ++ (node_id, "", 0), ++ ("(null)", "", 0), + ] + mock_runner.run.side_effect = return_value_list + +@@ -465,15 +517,16 @@ class ResourceCleanupTest(LibraryPacemakerTest): + return str(XmlManipulation(doc)) + + def test_basic(self): +- expected_output = "expected output" ++ expected_stdout = "expected output" ++ expected_stderr = "expected stderr" + mock_runner = mock.MagicMock(spec_set=CommandRunner) + call_list = [ + mock.call(self.crm_mon_cmd()), + mock.call([self.path("crm_resource"), "--cleanup"]), + ] + return_value_list = [ +- (self.fixture_status_xml(1, 1), 0), +- (expected_output, 0), ++ (self.fixture_status_xml(1, 1), "", 0), ++ (expected_stdout, expected_stderr, 0), + ] + mock_runner.run.side_effect = return_value_list + +@@ -482,11 +535,18 @@ class ResourceCleanupTest(LibraryPacemakerTest): + self.assertEqual(len(return_value_list), len(call_list)) + self.assertEqual(len(return_value_list), mock_runner.run.call_count) + mock_runner.run.assert_has_calls(call_list) +- self.assertEqual(expected_output, real_output) ++ self.assertEqual( ++ expected_stdout + "\n" + expected_stderr, ++ real_output ++ ) + + def test_threshold_exceeded(self): + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (self.fixture_status_xml(1000, 1000), 0) ++ mock_runner.run.return_value = ( ++ self.fixture_status_xml(1000, 1000), ++ "", ++ 0 ++ ) + + assert_raise_library_error( + lambda: lib.resource_cleanup(mock_runner), +@@ -501,49 +561,62 @@ class ResourceCleanupTest(LibraryPacemakerTest): + mock_runner.run.assert_called_once_with(self.crm_mon_cmd()) + + def test_forced(self): +- expected_output = "expected output" ++ expected_stdout = "expected output" ++ expected_stderr = "expected stderr" + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_output, 0) ++ mock_runner.run.return_value = (expected_stdout, expected_stderr, 0) + + real_output = lib.resource_cleanup(mock_runner, force=True) + + mock_runner.run.assert_called_once_with( + [self.path("crm_resource"), "--cleanup"] + ) +- self.assertEqual(expected_output, real_output) ++ self.assertEqual( ++ expected_stdout + "\n" + expected_stderr, ++ real_output ++ ) + + def test_resource(self): + resource = "test_resource" +- expected_output = "expected output" ++ expected_stdout = "expected output" ++ expected_stderr = "expected stderr" + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_output, 0) ++ mock_runner.run.return_value = (expected_stdout, expected_stderr, 0) + + real_output = lib.resource_cleanup(mock_runner, resource=resource) + + mock_runner.run.assert_called_once_with( + [self.path("crm_resource"), "--cleanup", "--resource", resource] + ) +- self.assertEqual(expected_output, real_output) ++ self.assertEqual( ++ expected_stdout + "\n" + expected_stderr, ++ real_output ++ ) + + def test_node(self): + node = "test_node" +- expected_output = "expected output" ++ expected_stdout = "expected output" ++ expected_stderr = "expected stderr" + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_output, 0) ++ mock_runner.run.return_value = (expected_stdout, expected_stderr, 0) + + real_output = lib.resource_cleanup(mock_runner, node=node) + + mock_runner.run.assert_called_once_with( + [self.path("crm_resource"), "--cleanup", "--node", node] + ) +- self.assertEqual(expected_output, real_output) ++ self.assertEqual( ++ expected_stdout + "\n" + expected_stderr, ++ real_output ++ ) + + def test_node_and_resource(self): + node = "test_node" + resource = "test_resource" +- expected_output = "expected output" ++ expected_stdout = "expected output" ++ expected_stderr = "expected stderr" + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_output, 0) ++ mock_runner.run.return_value = (expected_stdout, expected_stderr, 0) + + real_output = lib.resource_cleanup( + mock_runner, resource=resource, node=node +@@ -555,13 +628,21 @@ class ResourceCleanupTest(LibraryPacemakerTest): + "--cleanup", "--resource", resource, "--node", node + ] + ) +- self.assertEqual(expected_output, real_output) ++ self.assertEqual( ++ expected_stdout + "\n" + expected_stderr, ++ real_output ++ ) + + def test_error_state(self): +- expected_error = "some error" ++ expected_stdout = "some info" ++ expected_stderr = "some error" + expected_retval = 1 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_error, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + assert_raise_library_error( + lambda: lib.resource_cleanup(mock_runner), +@@ -569,8 +650,7 @@ class ResourceCleanupTest(LibraryPacemakerTest): + Severity.ERROR, + report_codes.CRM_MON_ERROR, + { +- "return_value": expected_retval, +- "stdout": expected_error, ++ "reason": expected_stderr + "\n" + expected_stdout, + } + ) + ) +@@ -578,7 +658,8 @@ class ResourceCleanupTest(LibraryPacemakerTest): + mock_runner.run.assert_called_once_with(self.crm_mon_cmd()) + + def test_error_cleanup(self): +- expected_error = "expected error" ++ expected_stdout = "some info" ++ expected_stderr = "some error" + expected_retval = 1 + mock_runner = mock.MagicMock(spec_set=CommandRunner) + call_list = [ +@@ -586,8 +667,8 @@ class ResourceCleanupTest(LibraryPacemakerTest): + mock.call([self.path("crm_resource"), "--cleanup"]), + ] + return_value_list = [ +- (self.fixture_status_xml(1, 1), 0), +- (expected_error, expected_retval), ++ (self.fixture_status_xml(1, 1), "", 0), ++ (expected_stdout, expected_stderr, expected_retval), + ] + mock_runner.run.side_effect = return_value_list + +@@ -597,8 +678,7 @@ class ResourceCleanupTest(LibraryPacemakerTest): + Severity.ERROR, + report_codes.RESOURCE_CLEANUP_ERROR, + { +- "return_value": expected_retval, +- "stdout": expected_error, ++ "reason": expected_stderr + "\n" + expected_stdout, + } + ) + ) +@@ -609,10 +689,33 @@ class ResourceCleanupTest(LibraryPacemakerTest): + + class ResourcesWaitingTest(LibraryPacemakerTest): + def test_has_support(self): +- expected_output = "something --wait something else" ++ expected_stdout = "" ++ expected_stderr = "something --wait something else" ++ expected_retval = 1 ++ mock_runner = mock.MagicMock(spec_set=CommandRunner) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) ++ ++ self.assertTrue( ++ lib.has_resource_wait_support(mock_runner) ++ ) ++ mock_runner.run.assert_called_once_with( ++ [self.path("crm_resource"), "-?"] ++ ) ++ ++ def test_has_support_stdout(self): ++ expected_stdout = "something --wait something else" ++ expected_stderr = "" + expected_retval = 1 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_output, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + self.assertTrue( + lib.has_resource_wait_support(mock_runner) +@@ -622,10 +725,15 @@ class ResourcesWaitingTest(LibraryPacemakerTest): + ) + + def test_doesnt_have_support(self): +- expected_output = "something something else" ++ expected_stdout = "something something else" ++ expected_stderr = "something something else" + expected_retval = 1 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_output, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + self.assertFalse( + lib.has_resource_wait_support(mock_runner) +@@ -652,10 +760,15 @@ class ResourcesWaitingTest(LibraryPacemakerTest): + ) + + def test_wait_success(self): +- expected_output = "expected output" ++ expected_stdout = "expected output" ++ expected_stderr = "expected stderr" + expected_retval = 0 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_output, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + self.assertEqual(None, lib.wait_for_resources(mock_runner)) + +@@ -664,11 +777,16 @@ class ResourcesWaitingTest(LibraryPacemakerTest): + ) + + def test_wait_timeout_success(self): +- expected_output = "expected output" ++ expected_stdout = "expected output" ++ expected_stderr = "expected stderr" + expected_retval = 0 + timeout = 10 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_output, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + self.assertEqual(None, lib.wait_for_resources(mock_runner, timeout)) + +@@ -680,10 +798,15 @@ class ResourcesWaitingTest(LibraryPacemakerTest): + ) + + def test_wait_error(self): +- expected_error = "some error" ++ expected_stdout = "some info" ++ expected_stderr = "some error" + expected_retval = 1 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_error, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + assert_raise_library_error( + lambda: lib.wait_for_resources(mock_runner), +@@ -691,8 +814,7 @@ class ResourcesWaitingTest(LibraryPacemakerTest): + Severity.ERROR, + report_codes.RESOURCE_WAIT_ERROR, + { +- "return_value": expected_retval, +- "stdout": expected_error, ++ "reason": expected_stderr + "\n" + expected_stdout, + } + ) + ) +@@ -702,10 +824,15 @@ class ResourcesWaitingTest(LibraryPacemakerTest): + ) + + def test_wait_error_timeout(self): +- expected_error = "some error" ++ expected_stdout = "some info" ++ expected_stderr = "some error" + expected_retval = 62 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_error, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + assert_raise_library_error( + lambda: lib.wait_for_resources(mock_runner), +@@ -713,8 +840,7 @@ class ResourcesWaitingTest(LibraryPacemakerTest): + Severity.ERROR, + report_codes.RESOURCE_WAIT_TIMED_OUT, + { +- "return_value": expected_retval, +- "stdout": expected_error, ++ "reason": expected_stderr + "\n" + expected_stdout, + } + ) + ) +@@ -727,7 +853,7 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest): + def test_standby_local(self): + expected_retval = 0 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = ("dummy", expected_retval) ++ mock_runner.run.return_value = ("dummy", "", expected_retval) + + output = lib.nodes_standby(mock_runner) + +@@ -739,7 +865,7 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest): + def test_unstandby_local(self): + expected_retval = 0 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = ("dummy", expected_retval) ++ mock_runner.run.return_value = ("dummy", "", expected_retval) + + output = lib.nodes_unstandby(mock_runner) + +@@ -760,8 +886,8 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest): + mock.call([self.path("crm_standby"), "-v", "on", "-N", n]) + for n in nodes + ] +- return_value_list = [(str(self.status), 0)] +- return_value_list += [("dummy", 0) for n in nodes] ++ return_value_list = [(str(self.status), "", 0)] ++ return_value_list += [("dummy", "", 0) for n in nodes] + mock_runner.run.side_effect = return_value_list + + output = lib.nodes_standby(mock_runner, all_nodes=True) +@@ -783,8 +909,8 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest): + mock.call([self.path("crm_standby"), "-D", "-N", n]) + for n in nodes + ] +- return_value_list = [(str(self.status), 0)] +- return_value_list += [("dummy", 0) for n in nodes] ++ return_value_list = [(str(self.status), "", 0)] ++ return_value_list += [("dummy", "", 0) for n in nodes] + mock_runner.run.side_effect = return_value_list + + output = lib.nodes_unstandby(mock_runner, all_nodes=True) +@@ -806,8 +932,8 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest): + mock.call([self.path("crm_standby"), "-v", "on", "-N", n]) + for n in nodes[1:] + ] +- return_value_list = [(str(self.status), 0)] +- return_value_list += [("dummy", 0) for n in nodes[1:]] ++ return_value_list = [(str(self.status), "", 0)] ++ return_value_list += [("dummy", "", 0) for n in nodes[1:]] + mock_runner.run.side_effect = return_value_list + + output = lib.nodes_standby(mock_runner, node_list=nodes[1:]) +@@ -829,8 +955,8 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest): + mock.call([self.path("crm_standby"), "-D", "-N", n]) + for n in nodes[:2] + ] +- return_value_list = [(str(self.status), 0)] +- return_value_list += [("dummy", 0) for n in nodes[:2]] ++ return_value_list = [(str(self.status), "", 0)] ++ return_value_list += [("dummy", "", 0) for n in nodes[:2]] + mock_runner.run.side_effect = return_value_list + + output = lib.nodes_unstandby(mock_runner, node_list=nodes[:2]) +@@ -845,7 +971,7 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest): + self.fixture_get_node_status("node_1", "id_1") + ) + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (str(self.status), 0) ++ mock_runner.run.return_value = (str(self.status), "", 0) + + assert_raise_library_error( + lambda: lib.nodes_standby(mock_runner, ["node_2"]), +@@ -863,7 +989,7 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest): + self.fixture_get_node_status("node_1", "id_1") + ) + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (str(self.status), 0) ++ mock_runner.run.return_value = (str(self.status), "", 0) + + assert_raise_library_error( + lambda: lib.nodes_unstandby(mock_runner, ["node_2", "node_3"]), +@@ -882,17 +1008,24 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest): + mock_runner.run.assert_called_once_with(self.crm_mon_cmd()) + + def test_error_one_node(self): +- expected_error = "some error" ++ expected_stdout = "some info" ++ expected_stderr = "some error" + expected_retval = 1 + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (expected_error, expected_retval) ++ mock_runner.run.return_value = ( ++ expected_stdout, ++ expected_stderr, ++ expected_retval ++ ) + + assert_raise_library_error( + lambda: lib.nodes_unstandby(mock_runner), + ( + Severity.ERROR, + report_codes.COMMON_ERROR, +- {} ++ { ++ "text": expected_stderr + "\n" + expected_stdout, ++ } + ) + ) + +@@ -913,11 +1046,11 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest): + for n in nodes + ] + return_value_list = [ +- (str(self.status), 0), +- ("dummy1", 0), +- ("dummy2", 1), +- ("dummy3", 0), +- ("dummy4", 1), ++ (str(self.status), "", 0), ++ ("dummy1", "", 0), ++ ("dummy2", "error2", 1), ++ ("dummy3", "", 0), ++ ("dummy4", "error4", 1), + ] + mock_runner.run.side_effect = return_value_list + +@@ -926,12 +1059,16 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest): + ( + Severity.ERROR, + report_codes.COMMON_ERROR, +- {} ++ { ++ "text": "error2\ndummy2", ++ } + ), + ( + Severity.ERROR, + report_codes.COMMON_ERROR, +- {} ++ { ++ "text": "error4\ndummy4", ++ } + ) + ) + +diff --git a/pcs/test/test_lib_resource_agent.py b/pcs/test/test_lib_resource_agent.py +index 08f9061..a569e66 100644 +--- a/pcs/test/test_lib_resource_agent.py ++++ b/pcs/test/test_lib_resource_agent.py +@@ -199,7 +199,7 @@ class GetFenceAgentMetadataTest(LibraryResourceTest): + def test_execution_failed(self, mock_is_runnable): + mock_is_runnable.return_value = True + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = ("error", 1) ++ mock_runner.run.return_value = ("", "error", 1) + agent_name = "fence_ipmi" + + self.assert_raises( +@@ -210,13 +210,13 @@ class GetFenceAgentMetadataTest(LibraryResourceTest): + + script_path = os.path.join(settings.fence_agent_binaries, agent_name) + mock_runner.run.assert_called_once_with( +- [script_path, "-o", "metadata"], ignore_stderr=True ++ [script_path, "-o", "metadata"] + ) + + @mock.patch("pcs.lib.resource_agent.is_path_runnable") + def test_invalid_xml(self, mock_is_runnable): + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = ("not xml", 0) ++ mock_runner.run.return_value = ("not xml", "", 0) + mock_is_runnable.return_value = True + agent_name = "fence_ipmi" + self.assert_raises( +@@ -227,7 +227,7 @@ class GetFenceAgentMetadataTest(LibraryResourceTest): + + script_path = os.path.join(settings.fence_agent_binaries, agent_name) + mock_runner.run.assert_called_once_with( +- [script_path, "-o", "metadata"], ignore_stderr=True ++ [script_path, "-o", "metadata"] + ) + + @mock.patch("pcs.lib.resource_agent.is_path_runnable") +@@ -235,14 +235,14 @@ class GetFenceAgentMetadataTest(LibraryResourceTest): + agent_name = "fence_ipmi" + xml = "<xml />" + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (xml, 0) ++ mock_runner.run.return_value = (xml, "", 0) + mock_is_runnable.return_value = True + + out_dom = lib_ra.get_fence_agent_metadata(mock_runner, agent_name) + + script_path = os.path.join(settings.fence_agent_binaries, agent_name) + mock_runner.run.assert_called_once_with( +- [script_path, "-o", "metadata"], ignore_stderr=True ++ [script_path, "-o", "metadata"] + ) + assert_xml_equal(xml, str(XmlMan(out_dom))) + +@@ -304,7 +304,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest): + provider = "provider" + agent = "agent" + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = ("error", 1) ++ mock_runner.run.return_value = ("", "error", 1) + mock_is_runnable.return_value = True + + self.assert_raises( +@@ -318,8 +318,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest): + script_path = os.path.join(settings.ocf_resources, provider, agent) + mock_runner.run.assert_called_once_with( + [script_path, "meta-data"], +- env_extend={"OCF_ROOT": settings.ocf_root}, +- ignore_stderr=True ++ env_extend={"OCF_ROOT": settings.ocf_root} + ) + + @mock.patch("pcs.lib.resource_agent.is_path_runnable") +@@ -327,7 +326,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest): + provider = "provider" + agent = "agent" + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = ("not xml", 0) ++ mock_runner.run.return_value = ("not xml", "", 0) + mock_is_runnable.return_value = True + + self.assert_raises( +@@ -341,8 +340,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest): + script_path = os.path.join(settings.ocf_resources, provider, agent) + mock_runner.run.assert_called_once_with( + [script_path, "meta-data"], +- env_extend={"OCF_ROOT": settings.ocf_root}, +- ignore_stderr=True ++ env_extend={"OCF_ROOT": settings.ocf_root} + ) + + @mock.patch("pcs.lib.resource_agent.is_path_runnable") +@@ -351,7 +349,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest): + agent = "agent" + xml = "<xml />" + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (xml, 0) ++ mock_runner.run.return_value = (xml, "", 0) + mock_is_runnable.return_value = True + + out_dom = lib_ra._get_ocf_resource_agent_metadata( +@@ -361,8 +359,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest): + script_path = os.path.join(settings.ocf_resources, provider, agent) + mock_runner.run.assert_called_once_with( + [script_path, "meta-data"], +- env_extend={"OCF_ROOT": settings.ocf_root}, +- ignore_stderr=True ++ env_extend={"OCF_ROOT": settings.ocf_root} + ) + assert_xml_equal(xml, str(XmlMan(out_dom))) + +@@ -596,7 +593,7 @@ class GetPcmkAdvancedStonithParametersTest(LibraryResourceTest): + </resource-agent> + """ + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = (xml, 0) ++ mock_runner.run.return_value = (xml, "", 0) + self.assertEqual( + [ + { +@@ -623,12 +620,12 @@ class GetPcmkAdvancedStonithParametersTest(LibraryResourceTest): + lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner) + ) + mock_runner.run.assert_called_once_with( +- [settings.stonithd_binary, "metadata"], ignore_stderr=True ++ [settings.stonithd_binary, "metadata"] + ) + + def test_failed_to_get_xml(self): + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = ("", 1) ++ mock_runner.run.return_value = ("", "some error", 1) + self.assert_raises( + lib_ra.UnableToGetAgentMetadata, + lambda: lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner), +@@ -636,19 +633,19 @@ class GetPcmkAdvancedStonithParametersTest(LibraryResourceTest): + ) + + mock_runner.run.assert_called_once_with( +- [settings.stonithd_binary, "metadata"], ignore_stderr=True ++ [settings.stonithd_binary, "metadata"] + ) + + def test_invalid_xml(self): + mock_runner = mock.MagicMock(spec_set=CommandRunner) +- mock_runner.run.return_value = ("invalid XML", 0) ++ mock_runner.run.return_value = ("invalid XML", "", 0) + self.assertRaises( + lib_ra.InvalidMetadataFormat, + lambda: lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner) + ) + + mock_runner.run.assert_called_once_with( +- [settings.stonithd_binary, "metadata"], ignore_stderr=True ++ [settings.stonithd_binary, "metadata"] + ) + + +diff --git a/pcs/test/test_lib_sbd.py b/pcs/test/test_lib_sbd.py +index 720d8b1..9b7b801 100644 +--- a/pcs/test/test_lib_sbd.py ++++ b/pcs/test/test_lib_sbd.py +@@ -155,9 +155,8 @@ class AtbHasToBeEnabledTest(TestCase): + self.assertFalse(lib_sbd.atb_has_to_be_enabled( + self.mock_runner, self.mock_conf, 1 + )) +- mock_is_needed.assert_called_once_with( +- self.mock_runner, self.mock_conf, 1 +- ) ++ self.mock_conf.is_enabled_auto_tie_breaker.assert_called_once_with() ++ mock_is_needed.assert_not_called() + + def test_atb_needed_is_disabled(self, mock_is_needed): + mock_is_needed.return_value = True +@@ -165,6 +164,7 @@ class AtbHasToBeEnabledTest(TestCase): + self.assertTrue(lib_sbd.atb_has_to_be_enabled( + self.mock_runner, self.mock_conf, -1 + )) ++ self.mock_conf.is_enabled_auto_tie_breaker.assert_called_once_with() + mock_is_needed.assert_called_once_with( + self.mock_runner, self.mock_conf, -1 + ) +@@ -175,9 +175,8 @@ class AtbHasToBeEnabledTest(TestCase): + self.assertFalse(lib_sbd.atb_has_to_be_enabled( + self.mock_runner, self.mock_conf, 2 + )) +- mock_is_needed.assert_called_once_with( +- self.mock_runner, self.mock_conf, 2 +- ) ++ self.mock_conf.is_enabled_auto_tie_breaker.assert_called_once_with() ++ mock_is_needed.assert_not_called() + + def test_atb_not_needed_is_disabled(self, mock_is_needed): + mock_is_needed.return_value = False +@@ -185,6 +184,7 @@ class AtbHasToBeEnabledTest(TestCase): + self.assertFalse(lib_sbd.atb_has_to_be_enabled( + self.mock_runner, self.mock_conf, -2 + )) ++ self.mock_conf.is_enabled_auto_tie_breaker.assert_called_once_with() + mock_is_needed.assert_called_once_with( + self.mock_runner, self.mock_conf, -2 + ) +-- +1.8.3.1 + diff --git a/SOURCES/bz1158805-03-add-support-for-qdeviceqnetd-provided-by-corosync.patch b/SOURCES/bz1158805-03-add-support-for-qdeviceqnetd-provided-by-corosync.patch new file mode 100644 index 0000000..f13113c --- /dev/null +++ b/SOURCES/bz1158805-03-add-support-for-qdeviceqnetd-provided-by-corosync.patch @@ -0,0 +1,201 @@ +From 4fe757d176060089e46f76d66ef20918b65e1f7f Mon Sep 17 00:00:00 2001 +From: Ivan Devat <idevat@redhat.com> +Date: Tue, 20 Sep 2016 08:20:29 +0200 +Subject: [PATCH] squash bz1158805 Add support for qdevice/qnetd pro + +66e72fa18ddb lib: do not error out in "qdevice stop" if qdevice is stopped already + +788407652f58 lib: fix removing qdevice from a cluster +--- + pcs/common/report_codes.py | 1 + + pcs/lib/commands/qdevice.py | 22 +++++++++++++++------- + pcs/lib/commands/quorum.py | 7 +++---- + pcs/lib/corosync/qdevice_net.py | 32 +++++++++++++++++++++++++------- + pcs/lib/reports.py | 13 +++++++++++++ + 5 files changed, 57 insertions(+), 18 deletions(-) + +diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py +index 23e931f..9b05951 100644 +--- a/pcs/common/report_codes.py ++++ b/pcs/common/report_codes.py +@@ -134,6 +134,7 @@ QDEVICE_INITIALIZATION_ERROR = "QDEVICE_INITIALIZATION_ERROR" + QDEVICE_INITIALIZATION_SUCCESS = "QDEVICE_INITIALIZATION_SUCCESS" + QDEVICE_NOT_DEFINED = "QDEVICE_NOT_DEFINED" + QDEVICE_NOT_INITIALIZED = "QDEVICE_NOT_INITIALIZED" ++QDEVICE_NOT_RUNNING = "QDEVICE_NOT_RUNNING" + QDEVICE_CLIENT_RELOAD_STARTED = "QDEVICE_CLIENT_RELOAD_STARTED" + QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED = "QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED" + QDEVICE_USED_BY_CLUSTERS = "QDEVICE_USED_BY_CLUSTERS" +diff --git a/pcs/lib/commands/qdevice.py b/pcs/lib/commands/qdevice.py +index ca0ae86..119c51d 100644 +--- a/pcs/lib/commands/qdevice.py ++++ b/pcs/lib/commands/qdevice.py +@@ -61,11 +61,16 @@ def qdevice_status_text(lib_env, model, verbose=False, cluster=None): + _ensure_not_cman(lib_env) + _check_model(model) + runner = lib_env.cmd_runner() +- return ( +- qdevice_net.qdevice_status_generic_text(runner, verbose) +- + +- qdevice_net.qdevice_status_cluster_text(runner, cluster, verbose) +- ) ++ try: ++ return ( ++ qdevice_net.qdevice_status_generic_text(runner, verbose) ++ + ++ qdevice_net.qdevice_status_cluster_text(runner, cluster, verbose) ++ ) ++ except qdevice_net.QnetdNotRunningException: ++ raise LibraryError( ++ reports.qdevice_not_running(model) ++ ) + + def qdevice_enable(lib_env, model): + """ +@@ -196,8 +201,11 @@ def _check_qdevice_not_used(reporter, runner, model, force=False): + _check_model(model) + connected_clusters = [] + if model == "net": +- status = qdevice_net.qdevice_status_cluster_text(runner) +- connected_clusters = qdevice_net.qdevice_connected_clusters(status) ++ try: ++ status = qdevice_net.qdevice_status_cluster_text(runner) ++ connected_clusters = qdevice_net.qdevice_connected_clusters(status) ++ except qdevice_net.QnetdNotRunningException: ++ pass + if connected_clusters: + reporter.process(reports.qdevice_used_by_clusters( + connected_clusters, +diff --git a/pcs/lib/commands/quorum.py b/pcs/lib/commands/quorum.py +index 8390fc6..aa98e61 100644 +--- a/pcs/lib/commands/quorum.py ++++ b/pcs/lib/commands/quorum.py +@@ -285,6 +285,7 @@ def remove_device(lib_env, skip_offline_nodes=False): + cfg.remove_quorum_device() + + if lib_env.is_corosync_conf_live: ++ communicator = lib_env.node_communicator() + # fix quorum options for SBD to work properly + if sbd.atb_has_to_be_enabled(lib_env.cmd_runner(), cfg): + lib_env.report_processor.process(reports.sbd_requires_atb()) +@@ -292,10 +293,6 @@ def remove_device(lib_env, skip_offline_nodes=False): + lib_env.report_processor, {"auto_tie_breaker": "1"} + ) + +- lib_env.push_corosync_conf(cfg, skip_offline_nodes) +- +- if lib_env.is_corosync_conf_live: +- communicator = lib_env.node_communicator() + # disable qdevice + lib_env.report_processor.process( + reports.service_disable_started("corosync-qdevice") +@@ -330,6 +327,8 @@ def remove_device(lib_env, skip_offline_nodes=False): + skip_offline_nodes + ) + ++ lib_env.push_corosync_conf(cfg, skip_offline_nodes) ++ + def _remove_device_model_net(lib_env, cluster_nodes, skip_offline_nodes): + """ + remove configuration used by qdevice model net +diff --git a/pcs/lib/corosync/qdevice_net.py b/pcs/lib/corosync/qdevice_net.py +index 200e45a..fa44923 100644 +--- a/pcs/lib/corosync/qdevice_net.py ++++ b/pcs/lib/corosync/qdevice_net.py +@@ -35,6 +35,9 @@ __qdevice_certutil = os.path.join( + "corosync-qdevice-net-certutil" + ) + ++class QnetdNotRunningException(Exception): ++ pass ++ + def qdevice_setup(runner): + """ + initialize qdevice on local host +@@ -79,10 +82,10 @@ def qdevice_status_generic_text(runner, verbose=False): + get qdevice runtime status in plain text + bool verbose get more detailed output + """ +- cmd = [__qnetd_tool, "-s"] ++ args = ["-s"] + if verbose: +- cmd.append("-v") +- stdout, stderr, retval = runner.run(cmd) ++ args.append("-v") ++ stdout, stderr, retval = _qdevice_run_tool(runner, args) + if retval != 0: + raise LibraryError( + reports.qdevice_get_status_error( +@@ -98,12 +101,12 @@ def qdevice_status_cluster_text(runner, cluster=None, verbose=False): + bool verbose get more detailed output + string cluster show information only about specified cluster + """ +- cmd = [__qnetd_tool, "-l"] ++ args = ["-l"] + if verbose: +- cmd.append("-v") ++ args.append("-v") + if cluster: +- cmd.extend(["-c", cluster]) +- stdout, stderr, retval = runner.run(cmd) ++ args.extend(["-c", cluster]) ++ stdout, stderr, retval = _qdevice_run_tool(runner, args) + if retval != 0: + raise LibraryError( + reports.qdevice_get_status_error( +@@ -114,6 +117,10 @@ def qdevice_status_cluster_text(runner, cluster=None, verbose=False): + return stdout + + def qdevice_connected_clusters(status_cluster_text): ++ """ ++ parse qnetd cluster status listing and return connected clusters' names ++ string status_cluster_text output of corosync-qnetd-tool -l ++ """ + connected_clusters = [] + regexp = re.compile(r'^Cluster "(?P<cluster>[^"]+)":$') + for line in status_cluster_text.splitlines(): +@@ -122,6 +129,17 @@ def qdevice_connected_clusters(status_cluster_text): + connected_clusters.append(match.group("cluster")) + return connected_clusters + ++def _qdevice_run_tool(runner, args): ++ """ ++ run corosync-qnetd-tool, raise QnetdNotRunningException if qnetd not running ++ CommandRunner runner ++ iterable args corosync-qnetd-tool arguments ++ """ ++ stdout, stderr, retval = runner.run([__qnetd_tool] + args) ++ if retval == 3 and "is qnetd running?" in stderr.lower(): ++ raise QnetdNotRunningException() ++ return stdout, stderr, retval ++ + def qdevice_enable(runner): + """ + make qdevice start automatically on boot on local host +diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py +index b9e9a66..cff491c 100644 +--- a/pcs/lib/reports.py ++++ b/pcs/lib/reports.py +@@ -842,6 +842,19 @@ def qdevice_destroy_error(model, reason): + } + ) + ++def qdevice_not_running(model): ++ """ ++ qdevice is expected to be running but is not running ++ string model qdevice model ++ """ ++ return ReportItem.error( ++ report_codes.QDEVICE_NOT_RUNNING, ++ "Quorum device '{model}' is not running", ++ info={ ++ "model": model, ++ } ++ ) ++ + def qdevice_get_status_error(model, reason): + """ + unable to get runtime status of qdevice +-- +1.8.3.1 + diff --git a/SOURCES/bz1164402-01-sbd-fix-call_node-calls-on-python3.patch b/SOURCES/bz1164402-01-sbd-fix-call_node-calls-on-python3.patch new file mode 100644 index 0000000..454e32a --- /dev/null +++ b/SOURCES/bz1164402-01-sbd-fix-call_node-calls-on-python3.patch @@ -0,0 +1,129 @@ +From dff92f778f692f0ec2aa7d0c20e76a06a767e4b2 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular <omular@redhat.com> +Date: Fri, 24 Jun 2016 20:17:38 +0200 +Subject: [PATCH] bz1164402-01 sbd fix call_node calls on python3 + +--- + pcs/lib/sbd.py | 14 +++++++------- + pcs/test/test_lib_sbd.py | 10 +++++----- + 2 files changed, 12 insertions(+), 12 deletions(-) + +diff --git a/pcs/lib/sbd.py b/pcs/lib/sbd.py +index 1330bfc..4488a73 100644 +--- a/pcs/lib/sbd.py ++++ b/pcs/lib/sbd.py +@@ -57,7 +57,7 @@ def check_sbd(communicator, node, watchdog): + return communicator.call_node( + node, + "remote/check_sbd", +- NodeCommunicator.format_data_dict({"watchdog": watchdog}) ++ NodeCommunicator.format_data_dict([("watchdog", watchdog)]) + ) + + +@@ -119,7 +119,7 @@ def set_sbd_config(communicator, node, config): + communicator.call_node( + node, + "remote/set_sbd_config", +- NodeCommunicator.format_data_dict({"config": config}) ++ NodeCommunicator.format_data_dict([("config", config)]) + ) + + +@@ -171,7 +171,7 @@ def enable_sbd_service(communicator, node): + communicator -- NodeCommunicator + node -- NodeAddresses + """ +- communicator.call_node(node, "remote/sbd_enable", "") ++ communicator.call_node(node, "remote/sbd_enable", None) + + + def enable_sbd_service_on_node(report_processor, node_communicator, node): +@@ -215,7 +215,7 @@ def disable_sbd_service(communicator, node): + communicator -- NodeCommunicator + node -- NodeAddresses + """ +- communicator.call_node(node, "remote/sbd_disable", "") ++ communicator.call_node(node, "remote/sbd_disable", None) + + + def disable_sbd_service_on_node(report_processor, node_communicator, node): +@@ -259,7 +259,7 @@ def set_stonith_watchdog_timeout_to_zero(communicator, node): + node -- NodeAddresses + """ + communicator.call_node( +- node, "remote/set_stonith_watchdog_timeout_to_zero", "" ++ node, "remote/set_stonith_watchdog_timeout_to_zero", None + ) + + +@@ -292,7 +292,7 @@ def remove_stonith_watchdog_timeout(communicator, node): + communicator -- NodeCommunicator + node -- NodeAddresses + """ +- communicator.call_node(node, "remote/remove_stonith_watchdog_timeout", "") ++ communicator.call_node(node, "remote/remove_stonith_watchdog_timeout", None) + + + def remove_stonith_watchdog_timeout_on_all_nodes(node_communicator, node_list): +@@ -351,7 +351,7 @@ def get_sbd_config(communicator, node): + communicator -- NodeCommunicator + node -- NodeAddresses + """ +- return communicator.call_node(node, "remote/get_sbd_config", "") ++ return communicator.call_node(node, "remote/get_sbd_config", None) + + + def is_sbd_enabled(runner): +diff --git a/pcs/test/test_lib_sbd.py b/pcs/test/test_lib_sbd.py +index 54c5669..e3c1401 100644 +--- a/pcs/test/test_lib_sbd.py ++++ b/pcs/test/test_lib_sbd.py +@@ -360,7 +360,7 @@ class EnableSbdServiceTest(TestCase): + node = NodeAddresses("node1") + lib_sbd.enable_sbd_service(mock_communicator, node) + mock_communicator.call_node.assert_called_once_with( +- node, "remote/sbd_enable", "" ++ node, "remote/sbd_enable", None + ) + + +@@ -408,7 +408,7 @@ class DisableSbdServiceTest(TestCase): + node = NodeAddresses("node1") + lib_sbd.disable_sbd_service(mock_communicator, node) + mock_communicator.call_node.assert_called_once_with( +- node, "remote/sbd_disable", "" ++ node, "remote/sbd_disable", None + ) + + +@@ -456,7 +456,7 @@ class SetStonithWatchdogTimeoutToZeroTest(TestCase): + node = NodeAddresses("node1") + lib_sbd.set_stonith_watchdog_timeout_to_zero(mock_communicator, node) + mock_communicator.call_node.assert_called_once_with( +- node, "remote/set_stonith_watchdog_timeout_to_zero", "" ++ node, "remote/set_stonith_watchdog_timeout_to_zero", None + ) + + +@@ -520,7 +520,7 @@ class RemoveStonithWatchdogTimeoutTest(TestCase): + node = NodeAddresses("node1") + lib_sbd.remove_stonith_watchdog_timeout(mock_communicator, node) + mock_communicator.call_node.assert_called_once_with( +- node, "remote/remove_stonith_watchdog_timeout", "" ++ node, "remote/remove_stonith_watchdog_timeout", None + ) + + +@@ -584,7 +584,7 @@ class GetSbdConfigTest(TestCase): + node = NodeAddresses("node1") + lib_sbd.get_sbd_config(mock_communicator, node) + mock_communicator.call_node.assert_called_once_with( +- node, "remote/get_sbd_config", "" ++ node, "remote/get_sbd_config", None + ) + + +-- +1.8.3.1 + diff --git a/SOURCES/bz1164402-02-sbd-fixes.patch b/SOURCES/bz1164402-02-sbd-fixes.patch new file mode 100644 index 0000000..aac71ce --- /dev/null +++ b/SOURCES/bz1164402-02-sbd-fixes.patch @@ -0,0 +1,1621 @@ +From e43d7324b9fc6933d8fa431e66c6236721724b98 Mon Sep 17 00:00:00 2001 +From: Ivan Devat <idevat@redhat.com> +Date: Fri, 19 Aug 2016 02:57:38 +0200 +Subject: [PATCH] squash bz1164402 Support for sbd configuration is + +f2da8ad476c3 fix disable_service + +9367e7162b7b fix code formating + +57b618777d14 test: fix tests with parallel operations in SBD + +f7b9fc15072c sbd: change error message + +0dbdff4628d5 sbd: fix setting watchdog in config + +1c5ccd3be588 sbd: ban changing sbd option SBD_PACEMAKER + +733e28337589 sbd: add validation for SBD_WATCHDOG_TIMEOUT option + +9951f3262ef1 docs: add watchdog option to node add command + +d79592e05158 lib: fix disabling service on systemd systems + +17e4c5838842 sbd: set auto_tie_breaker whenever it is needed for SBD to work + +1ed4c2e3bc38 lib: fix enabled ATB in corosync.conf detection +--- + pcs/cluster.py | 54 +++++- + pcs/common/report_codes.py | 4 +- + pcs/lib/commands/quorum.py | 41 ++++- + pcs/lib/commands/sbd.py | 30 ++- + pcs/lib/corosync/config_facade.py | 15 +- + pcs/lib/external.py | 4 +- + pcs/lib/reports.py | 27 +++ + pcs/lib/sbd.py | 78 +++++++- + pcs/pcs.8 | 4 +- + pcs/quorum.py | 3 +- + pcs/stonith.py | 4 +- + pcs/test/test_lib_commands_quorum.py | 205 ++++++++++++++++++++- + pcs/test/test_lib_commands_sbd.py | 134 +++++++++++++- + pcs/test/test_lib_corosync_config_facade.py | 28 +++ + pcs/test/test_lib_external.py | 22 ++- + pcs/test/test_lib_sbd.py | 272 +++++++++++++++++++++++++++- + pcs/usage.py | 3 + + pcs/utils.py | 25 ++- + pcsd/pcs.rb | 10 +- + 19 files changed, 908 insertions(+), 55 deletions(-) + +diff --git a/pcs/cluster.py b/pcs/cluster.py +index 90fec63..577e08e 100644 +--- a/pcs/cluster.py ++++ b/pcs/cluster.py +@@ -43,6 +43,7 @@ from pcs.lib import ( + reports as lib_reports, + ) + from pcs.lib.booth import sync as booth_sync ++from pcs.lib.nodes_task import check_corosync_offline_on_nodes + from pcs.lib.commands.quorum import _add_device_model_net + from pcs.lib.corosync import ( + config_parser as corosync_conf_utils, +@@ -1328,6 +1329,36 @@ def get_cib(argv): + except IOError as e: + utils.err("Unable to write to file '%s', %s" % (filename, e.strerror)) + ++ ++def _ensure_cluster_is_offline_if_atb_should_be_enabled( ++ lib_env, node_num_modifier, skip_offline_nodes=False ++): ++ """ ++ Check if cluster is offline if auto tie breaker should be enabled. ++ Raises LibraryError if ATB needs to be enabled cluster is not offline. ++ ++ lib_env -- LibraryEnvironment ++ node_num_modifier -- number which wil be added to the number of nodes in ++ cluster when determining whenever ATB is needed. ++ skip_offline_nodes -- if True offline nodes will be skipped ++ """ ++ corosync_conf = lib_env.get_corosync_conf() ++ if lib_sbd.atb_has_to_be_enabled( ++ lib_env.cmd_runner(), corosync_conf, node_num_modifier ++ ): ++ print( ++ "Warning: auto_tie_breaker quorum option will be enabled to make " ++ "SBD fencing effecive after this change. Cluster has to be offline " ++ "to be able to make this change." ++ ) ++ check_corosync_offline_on_nodes( ++ lib_env.node_communicator(), ++ lib_env.report_processor, ++ corosync_conf.get_nodes(), ++ skip_offline_nodes ++ ) ++ ++ + def cluster_node(argv): + if len(argv) != 2: + usage.cluster() +@@ -1363,6 +1394,9 @@ def cluster_node(argv): + msg += ", use --force to override" + utils.err(msg) + ++ lib_env = utils.get_lib_env() ++ modifiers = utils.get_modificators() ++ + if add_node == True: + wait = False + wait_timeout = None +@@ -1385,11 +1419,9 @@ def cluster_node(argv): + if not canAdd: + utils.err("Unable to add '%s' to cluster: %s" % (node0, error)) + +- lib_env = utils.get_lib_env() + report_processor = lib_env.report_processor + node_communicator = lib_env.node_communicator() + node_addr = NodeAddresses(node0, node1) +- modifiers = utils.get_modificators() + try: + if lib_sbd.is_sbd_enabled(utils.cmd_runner()): + if "--watchdog" not in utils.pcs_options: +@@ -1400,6 +1432,10 @@ def cluster_node(argv): + else: + watchdog = utils.pcs_options["--watchdog"][0] + ++ _ensure_cluster_is_offline_if_atb_should_be_enabled( ++ lib_env, 1, modifiers["skip_offline_nodes"] ++ ) ++ + report_processor.process(lib_reports.sbd_check_started()) + lib_sbd.check_sbd_on_node( + report_processor, node_communicator, node_addr, watchdog +@@ -1407,12 +1443,15 @@ def cluster_node(argv): + sbd_cfg = environment_file_to_dict( + lib_sbd.get_local_sbd_config() + ) +- sbd_cfg["SBD_WATCHDOG_DEV"] = watchdog + report_processor.process( + lib_reports.sbd_config_distribution_started() + ) + lib_sbd.set_sbd_config_on_node( +- report_processor, node_communicator, node_addr, sbd_cfg ++ report_processor, ++ node_communicator, ++ node_addr, ++ sbd_cfg, ++ watchdog + ) + report_processor.process(lib_reports.sbd_enabling_started()) + lib_sbd.enable_sbd_service_on_node( +@@ -1549,6 +1588,13 @@ def cluster_node(argv): + ) + # else the node seems to be stopped already, we're ok to proceed + ++ try: ++ _ensure_cluster_is_offline_if_atb_should_be_enabled( ++ lib_env, -1, modifiers["skip_offline_nodes"] ++ ) ++ except LibraryError as e: ++ utils.process_library_reports(e.args) ++ + nodesRemoved = False + c_nodes = utils.getNodesFromCorosyncConf() + destroy_cluster([node0], keep_going=("--force" in utils.pcs_options)) +diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py +index e71d418..672c2e3 100644 +--- a/pcs/common/report_codes.py ++++ b/pcs/common/report_codes.py +@@ -10,9 +10,9 @@ FORCE_ACTIVE_RRP = "ACTIVE_RRP" + FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE = "FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE" + FORCE_BOOTH_REMOVE_FROM_CIB = "FORCE_BOOTH_REMOVE_FROM_CIB" + FORCE_BOOTH_DESTROY = "FORCE_BOOTH_DESTROY" +-FORCE_FILE_OVERWRITE = "FORCE_FILE_OVERWRITE" + FORCE_CONSTRAINT_DUPLICATE = "CONSTRAINT_DUPLICATE" + FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE = "CONSTRAINT_MULTIINSTANCE_RESOURCE" ++FORCE_FILE_OVERWRITE = "FORCE_FILE_OVERWRITE" + FORCE_LOAD_THRESHOLD = "LOAD_THRESHOLD" + FORCE_OPTIONS = "OPTIONS" + FORCE_QDEVICE_MODEL = "QDEVICE_MODEL" +@@ -81,6 +81,7 @@ COROSYNC_NOT_RUNNING_CHECK_STARTED = "COROSYNC_NOT_RUNNING_CHECK_STARTED" + COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR = "COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR" + COROSYNC_NOT_RUNNING_ON_NODE = "COROSYNC_NOT_RUNNING_ON_NODE" + COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE = "COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE" ++COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD = "COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD" + COROSYNC_QUORUM_GET_STATUS_ERROR = "COROSYNC_QUORUM_GET_STATUS_ERROR" + COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR = "COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR" + COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE" +@@ -179,5 +180,6 @@ UNABLE_TO_GET_SBD_CONFIG = "UNABLE_TO_GET_SBD_CONFIG" + UNABLE_TO_GET_SBD_STATUS = "UNABLE_TO_GET_SBD_STATUS" + UNKNOWN_COMMAND = 'UNKNOWN_COMMAND' + UNSUPPORTED_AGENT = 'UNSUPPORTED_AGENT' ++WATCHDOG_INVALID = "WATCHDOG_INVALID" + UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS = "UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS" + WATCHDOG_NOT_FOUND = "WATCHDOG_NOT_FOUND" +diff --git a/pcs/lib/commands/quorum.py b/pcs/lib/commands/quorum.py +index 7425e78..7fb7bb4 100644 +--- a/pcs/lib/commands/quorum.py ++++ b/pcs/lib/commands/quorum.py +@@ -5,8 +5,9 @@ from __future__ import ( + unicode_literals, + ) + +-from pcs.lib import reports +-from pcs.lib.errors import LibraryError ++from pcs.common import report_codes ++from pcs.lib import reports, sbd ++from pcs.lib.errors import LibraryError, ReportItemSeverity + from pcs.lib.corosync import ( + live as corosync_live, + qdevice_net, +@@ -39,16 +40,50 @@ def get_config(lib_env): + "device": device, + } + +-def set_options(lib_env, options, skip_offline_nodes=False): ++ ++def _check_if_atb_can_be_disabled( ++ runner, report_processor, corosync_conf, was_enabled, force=False ++): ++ """ ++ Check whenever auto_tie_breaker can be changed without affecting SBD. ++ Raises LibraryError if change of ATB will affect SBD functionality. ++ ++ runner -- CommandRunner ++ report_processor -- report processor ++ corosync_conf -- corosync conf facade ++ was_enabled -- True if ATB was enabled, False otherwise ++ force -- force change ++ """ ++ if ( ++ was_enabled ++ and ++ not corosync_conf.is_enabled_auto_tie_breaker() ++ and ++ sbd.is_auto_tie_breaker_needed(runner, corosync_conf) ++ ): ++ report_processor.process(reports.quorum_cannot_disable_atb_due_to_sbd( ++ ReportItemSeverity.WARNING if force else ReportItemSeverity.ERROR, ++ None if force else report_codes.FORCE_OPTIONS ++ )) ++ ++ ++def set_options(lib_env, options, skip_offline_nodes=False, force=False): + """ + Set corosync quorum options, distribute and reload corosync.conf if live + lib_env LibraryEnvironment + options quorum options (dict) + skip_offline_nodes continue even if not all nodes are accessible ++ bool force force changes + """ + __ensure_not_cman(lib_env) + cfg = lib_env.get_corosync_conf() ++ atb_enabled = cfg.is_enabled_auto_tie_breaker() + cfg.set_quorum_options(lib_env.report_processor, options) ++ if lib_env.is_corosync_conf_live: ++ _check_if_atb_can_be_disabled( ++ lib_env.cmd_runner(), lib_env.report_processor, ++ cfg, atb_enabled, force ++ ) + lib_env.push_corosync_conf(cfg, skip_offline_nodes) + + def status_text(lib_env): +diff --git a/pcs/lib/commands/sbd.py b/pcs/lib/commands/sbd.py +index 875758f..265ebb5 100644 +--- a/pcs/lib/commands/sbd.py ++++ b/pcs/lib/commands/sbd.py +@@ -5,6 +5,7 @@ from __future__ import ( + unicode_literals, + ) + ++import os + import json + + from pcs import settings +@@ -44,7 +45,9 @@ def _validate_sbd_options(sbd_config, allow_unknown_opts=False): + """ + + report_item_list = [] +- unsupported_sbd_option_list = ["SBD_WATCHDOG_DEV", "SBD_OPTS"] ++ unsupported_sbd_option_list = [ ++ "SBD_WATCHDOG_DEV", "SBD_OPTS", "SBD_PACEMAKER" ++ ] + allowed_sbd_options = [ + "SBD_DELAY_START", "SBD_STARTMODE", "SBD_WATCHDOG_TIMEOUT" + ] +@@ -62,6 +65,17 @@ def _validate_sbd_options(sbd_config, allow_unknown_opts=False): + Severities.WARNING if allow_unknown_opts else Severities.ERROR, + None if allow_unknown_opts else report_codes.FORCE_OPTIONS + )) ++ if "SBD_WATCHDOG_TIMEOUT" in sbd_config: ++ report_item = reports.invalid_option_value( ++ "SBD_WATCHDOG_TIMEOUT", ++ sbd_config["SBD_WATCHDOG_TIMEOUT"], ++ "nonnegative integer" ++ ) ++ try: ++ if int(sbd_config["SBD_WATCHDOG_TIMEOUT"]) < 0: ++ report_item_list.append(report_item) ++ except (ValueError, TypeError): ++ report_item_list.append(report_item) + + return report_item_list + +@@ -81,6 +95,9 @@ def _get_full_watchdog_list(node_list, default_watchdog, watchdog_dict): + report_item_list = [] + + for node_name, watchdog in watchdog_dict.items(): ++ if not watchdog or not os.path.isabs(watchdog): ++ report_item_list.append(reports.invalid_watchdog_path(watchdog)) ++ continue + try: + full_dict[node_list.find_by_label(node_name)] = watchdog + except NodeNotFound: +@@ -140,6 +157,14 @@ def enable_sbd( + full_watchdog_dict + ) + ++ # enable ATB if needed ++ corosync_conf = lib_env.get_corosync_conf() ++ if sbd.atb_has_to_be_enabled(lib_env.cmd_runner(), corosync_conf): ++ corosync_conf.set_quorum_options( ++ lib_env.report_processor, {"auto_tie_breaker": "1"} ++ ) ++ lib_env.push_corosync_conf(corosync_conf, ignore_offline_nodes) ++ + # distribute SBD configuration + config = sbd.get_default_sbd_config() + config.update(sbd_options) +@@ -147,7 +172,8 @@ def enable_sbd( + lib_env.report_processor, + lib_env.node_communicator(), + online_nodes, +- config ++ config, ++ full_watchdog_dict + ) + + # remove cluster prop 'stonith_watchdog_timeout' +diff --git a/pcs/lib/corosync/config_facade.py b/pcs/lib/corosync/config_facade.py +index 600a89b..be621c0 100644 +--- a/pcs/lib/corosync/config_facade.py ++++ b/pcs/lib/corosync/config_facade.py +@@ -129,6 +129,16 @@ class ConfigFacade(object): + options[name] = value + return options + ++ def is_enabled_auto_tie_breaker(self): ++ """ ++ Returns True if auto tie braker option is enabled, False otherwise. ++ """ ++ auto_tie_breaker = "0" ++ for quorum in self.config.get_sections("quorum"): ++ for attr in quorum.get_attributes("auto_tie_breaker"): ++ auto_tie_breaker = attr[1] ++ return auto_tie_breaker == "1" ++ + def __validate_quorum_options(self, options): + report_items = [] + has_qdevice = self.has_quorum_device() +@@ -488,10 +498,7 @@ class ConfigFacade(object): + # get relevant status + has_quorum_device = self.has_quorum_device() + has_two_nodes = len(self.get_nodes()) == 2 +- auto_tie_breaker = False +- for quorum in self.config.get_sections("quorum"): +- for attr in quorum.get_attributes("auto_tie_breaker"): +- auto_tie_breaker = attr[1] != "0" ++ auto_tie_breaker = self.is_enabled_auto_tie_breaker() + # update two_node + if has_two_nodes and not auto_tie_breaker and not has_quorum_device: + quorum_section_list = self.__ensure_section(self.config, "quorum") +diff --git a/pcs/lib/external.py b/pcs/lib/external.py +index 25e071f..08bf2bb 100644 +--- a/pcs/lib/external.py ++++ b/pcs/lib/external.py +@@ -135,13 +135,13 @@ def disable_service(runner, service, instance=None): + instance -- instance name, it ha no effect on not systemd systems. + If None no instance name will be used. + """ ++ if not is_service_installed(runner, service): ++ return + if is_systemctl(): + output, retval = runner.run([ + "systemctl", "disable", _get_service_name(service, instance) + ]) + else: +- if not is_service_installed(runner, service): +- return + output, retval = runner.run(["chkconfig", service, "off"]) + if retval != 0: + raise DisableServiceError(service, output.rstrip(), instance) +diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py +index eac95c7..568bb7e 100644 +--- a/pcs/lib/reports.py ++++ b/pcs/lib/reports.py +@@ -1701,6 +1701,19 @@ def watchdog_not_found(node, watchdog): + ) + + ++def invalid_watchdog_path(watchdog): ++ """ ++ watchdog path is not absolut path ++ ++ watchdog -- watchdog device path ++ """ ++ return ReportItem.error( ++ report_codes.WATCHDOG_INVALID, ++ "Watchdog path '{watchdog}' is invalid.", ++ info={"watchdog": watchdog} ++ ) ++ ++ + def unable_to_get_sbd_status(node, reason): + """ + there was (communication or parsing) failure during obtaining status of SBD +@@ -1901,3 +1914,17 @@ def live_environment_required(forbidden_options): + "options_string": ", ".join(forbidden_options), + } + ) ++ ++ ++def quorum_cannot_disable_atb_due_to_sbd( ++ severity=ReportItemSeverity.ERROR, forceable=None ++): ++ """ ++ Quorum option auto_tie_breaker cannot be disbled due to SBD. ++ """ ++ return ReportItem( ++ report_codes.COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD, ++ severity, ++ "unable to disable auto_tie_breaker: SBD fencing will have no effect", ++ forceable=forceable ++ ) +diff --git a/pcs/lib/sbd.py b/pcs/lib/sbd.py +index 4488a73..c9f013b 100644 +--- a/pcs/lib/sbd.py ++++ b/pcs/lib/sbd.py +@@ -46,6 +46,50 @@ def _run_parallel_and_raise_lib_error_on_failure(func, param_list): + raise LibraryError(*report_list) + + ++def is_auto_tie_breaker_needed( ++ runner, corosync_conf_facade, node_number_modifier=0 ++): ++ """ ++ Returns True whenever quorum option auto tie breaker is needed to be enabled ++ for proper working of SBD fencing. False if it is not needed. ++ ++ runner -- command runner ++ corosync_conf_facade -- ++ node_number_modifier -- this value vill be added to current number of nodes. ++ This can be useful to test whenever is ATB needed when adding/removeing ++ node. ++ """ ++ return ( ++ not corosync_conf_facade.has_quorum_device() ++ and ++ (len(corosync_conf_facade.get_nodes()) + node_number_modifier) % 2 == 0 ++ and ++ is_sbd_installed(runner) ++ and ++ is_sbd_enabled(runner) ++ ) ++ ++def atb_has_to_be_enabled(runner, corosync_conf_facade, node_number_modifier=0): ++ """ ++ Return True whenever quorum option auto tie breaker has to be enabled for ++ proper working of SBD fencing. False if it's not needed or it is already ++ enabled. ++ ++ runner -- command runner ++ corosync_conf_facade -- ++ node_number_modifier -- this value vill be added to current number of nodes. ++ This can be useful to test whenever is ATB needed when adding/removeing ++ node. ++ """ ++ return ( ++ is_auto_tie_breaker_needed( ++ runner, corosync_conf_facade, node_number_modifier ++ ) ++ and ++ not corosync_conf_facade.is_enabled_auto_tie_breaker() ++ ) ++ ++ + def check_sbd(communicator, node, watchdog): + """ + Check SBD on specified 'node' and existence of specified watchdog. +@@ -123,18 +167,23 @@ def set_sbd_config(communicator, node, config): + ) + + +-def set_sbd_config_on_node(report_processor, node_communicator, node, config): ++def set_sbd_config_on_node( ++ report_processor, node_communicator, node, config, watchdog ++): + """ +- Send SBD configuration to 'node'. Also puts correct node name into +- SBD_OPTS option (SBD_OPTS="-n <node_name>"). ++ Send SBD configuration to 'node' with specified watchdog set. Also puts ++ correct node name into SBD_OPTS option (SBD_OPTS="-n <node_name>"). + + report_processor -- + node_communicator -- NodeCommunicator + node -- NodeAddresses + config -- dictionary in format: <SBD config option>: <value> ++ watchdog -- path to watchdog device + """ + config = dict(config) + config["SBD_OPTS"] = '"-n {node_name}"'.format(node_name=node.label) ++ if watchdog: ++ config["SBD_WATCHDOG_DEV"] = watchdog + set_sbd_config(node_communicator, node, dict_to_environment_file(config)) + report_processor.process( + reports.sbd_config_accepted_by_node(node.label) +@@ -142,7 +191,7 @@ def set_sbd_config_on_node(report_processor, node_communicator, node, config): + + + def set_sbd_config_on_all_nodes( +- report_processor, node_communicator, node_list, config ++ report_processor, node_communicator, node_list, config, watchdog_dict + ): + """ + Send SBD configuration 'config' to all nodes in 'node_list'. Option +@@ -153,12 +202,20 @@ def set_sbd_config_on_all_nodes( + node_communicator -- NodeCommunicator + node_list -- NodeAddressesList + config -- dictionary in format: <SBD config option>: <value> ++ watchdog_dict -- dictionary of watchdogs where key is NodeAdresses object ++ and value is path to watchdog + """ + report_processor.process(reports.sbd_config_distribution_started()) + _run_parallel_and_raise_lib_error_on_failure( + set_sbd_config_on_node, + [ +- ([report_processor, node_communicator, node, config], {}) ++ ( ++ [ ++ report_processor, node_communicator, node, config, ++ watchdog_dict.get(node) ++ ], ++ {} ++ ) + for node in node_list + ] + ) +@@ -362,3 +419,14 @@ def is_sbd_enabled(runner): + runner -- CommandRunner + """ + return external.is_service_enabled(runner, "sbd") ++ ++ ++def is_sbd_installed(runner): ++ """ ++ Check if SBD service is installed in local system. ++ Reurns True id SBD service is installed. False otherwise. ++ ++ runner -- CommandRunner ++ """ ++ return external.is_service_installed(runner, "sbd") ++ +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 9064054..7a054ca 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -271,8 +271,8 @@ Upgrade the CIB to conform to the latest version of the document schema. + edit [scope=<scope> | \fB\-\-config\fR] + Edit the cib in the editor specified by the $EDITOR environment variable and push out any changes upon saving. Specify scope to edit a specific section of the CIB. Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults. \fB\-\-config\fR is the same as scope=configuration. Use of \fB\-\-config\fR is recommended. Do not specify a scope if you need to edit the whole CIB or be warned in the case of outdated CIB. + .TP +-node add <node[,node\-altaddr]> [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-enable\fR] +-Add the node to corosync.conf and corosync on all nodes in the cluster and sync the new corosync.conf to the new node. If \fB\-\-start\fR is specified also start corosync/pacemaker on the new node, if \fB\-\-wait\fR is sepcified wait up to 'n' seconds for the new node to start. If \fB\-\-enable\fR is specified enable corosync/pacemaker on new node. When using Redundant Ring Protocol (RRP) with udpu transport, specify the ring 0 address first followed by a ',' and then the ring 1 address. ++node add <node[,node\-altaddr]> [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-enable\fR] [\fB\-\-watchdog\fR=<watchdog\-path>] ++Add the node to corosync.conf and corosync on all nodes in the cluster and sync the new corosync.conf to the new node. If \fB\-\-start\fR is specified also start corosync/pacemaker on the new node, if \fB\-\-wait\fR is sepcified wait up to 'n' seconds for the new node to start. If \fB\-\-enable\fR is specified enable corosync/pacemaker on new node. When using Redundant Ring Protocol (RRP) with udpu transport, specify the ring 0 address first followed by a ',' and then the ring 1 address. Use \fB\-\-watchdog\fR to specify path to watchdog on newly added node, when SBD is enabled in cluster. + .TP + node remove <node> + Shutdown specified node and remove it from pacemaker and corosync on all other nodes in the cluster. +diff --git a/pcs/quorum.py b/pcs/quorum.py +index 1c2d41d..6cd06ca 100644 +--- a/pcs/quorum.py ++++ b/pcs/quorum.py +@@ -121,7 +121,8 @@ def quorum_update_cmd(lib, argv, modificators): + + lib.quorum.set_options( + options, +- skip_offline_nodes=modificators["skip_offline_nodes"] ++ skip_offline_nodes=modificators["skip_offline_nodes"], ++ force=modificators["force"] + ) + + def quorum_device_add_cmd(lib, argv, modificators): +diff --git a/pcs/stonith.py b/pcs/stonith.py +index 93332ef..23f3800 100644 +--- a/pcs/stonith.py ++++ b/pcs/stonith.py +@@ -495,7 +495,7 @@ def _sbd_parse_watchdogs(watchdog_list): + for watchdog_node in watchdog_list: + if "@" not in watchdog_node: + if default_watchdog: +- raise CmdLineInputError("Multiple default watchdogs.") ++ raise CmdLineInputError("Multiple watchdog definitions.") + default_watchdog = watchdog_node + else: + watchdog, node_name = watchdog_node.rsplit("@", 1) +@@ -553,7 +553,7 @@ def sbd_config(lib, argv, modifiers): + + config = config_list[0]["config"] + +- filtered_options = ["SBD_WATCHDOG_DEV", "SBD_OPTS"] ++ filtered_options = ["SBD_WATCHDOG_DEV", "SBD_OPTS", "SBD_PACEMAKER"] + for key, val in config.items(): + if key in filtered_options: + continue +diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py +index 826251a..d286a8f 100644 +--- a/pcs/test/test_lib_commands_quorum.py ++++ b/pcs/test/test_lib_commands_quorum.py +@@ -25,6 +25,7 @@ from pcs.lib.errors import ( + LibraryError, + ReportItemSeverity as severity, + ) ++from pcs.lib.corosync.config_facade import ConfigFacade + from pcs.lib.external import NodeCommunicationException + from pcs.lib.node import NodeAddresses, NodeAddressesList + +@@ -146,23 +147,201 @@ class GetQuorumConfigTest(TestCase, CmanMixin): + self.assertEqual([], self.mock_reporter.report_item_list) + + ++@mock.patch("pcs.lib.sbd.is_auto_tie_breaker_needed") ++class CheckIfAtbCanBeDisabledTest(TestCase): ++ def setUp(self): ++ self.mock_reporter = MockLibraryReportProcessor() ++ self.mock_runner = "cmd_runner" ++ self.mock_corosync_conf = mock.MagicMock(spec_set=ConfigFacade) ++ ++ def test_atb_no_need_was_disabled_atb_disabled(self, mock_atb_needed): ++ mock_atb_needed.return_value = False ++ self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False ++ lib._check_if_atb_can_be_disabled( ++ self.mock_runner, self.mock_reporter, self.mock_corosync_conf, False ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ def test_atb_no_need_was_disabled_atb_enabled(self, mock_atb_needed): ++ mock_atb_needed.return_value = False ++ self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True ++ lib._check_if_atb_can_be_disabled( ++ self.mock_runner, self.mock_reporter, self.mock_corosync_conf, False ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ def test_atb_no_need_was_enable_atb_disabled(self, mock_atb_needed): ++ mock_atb_needed.return_value = False ++ self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False ++ lib._check_if_atb_can_be_disabled( ++ self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ def test_atb_no_need_was_enabled_atb_enabled(self, mock_atb_needed): ++ mock_atb_needed.return_value = False ++ self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True ++ lib._check_if_atb_can_be_disabled( ++ self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ def test_atb_needed_was_disabled_atb_disabled(self, mock_atb_needed): ++ mock_atb_needed.return_value = True ++ self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False ++ lib._check_if_atb_can_be_disabled( ++ self.mock_runner, self.mock_reporter, self.mock_corosync_conf, False ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ def test_atb_needed_was_disabled_atb_enabled(self, mock_atb_needed): ++ mock_atb_needed.return_value = True ++ self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True ++ lib._check_if_atb_can_be_disabled( ++ self.mock_runner, self.mock_reporter, self.mock_corosync_conf, False ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ def test_atb_needed_was_enable_atb_disabled(self, mock_atb_needed): ++ mock_atb_needed.return_value = True ++ self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False ++ report_item = ( ++ severity.ERROR, ++ report_codes.COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD, ++ {}, ++ report_codes.FORCE_OPTIONS ++ ) ++ assert_raise_library_error( ++ lambda: lib._check_if_atb_can_be_disabled( ++ self.mock_runner, ++ self.mock_reporter, ++ self.mock_corosync_conf, ++ True ++ ), ++ report_item ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, [report_item] ++ ) ++ ++ def test_atb_needed_was_enabled_atb_enabled(self, mock_atb_needed): ++ mock_atb_needed.return_value = True ++ self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True ++ lib._check_if_atb_can_be_disabled( ++ self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ ++ def test_atb_no_need_was_disabled_atb_disabled_force( ++ self, mock_atb_needed ++ ): ++ mock_atb_needed.return_value = False ++ self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False ++ lib._check_if_atb_can_be_disabled( ++ self.mock_runner, self.mock_reporter, self.mock_corosync_conf, ++ False, force=True ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ def test_atb_no_need_was_disabled_atb_enabled_force( ++ self, mock_atb_needed ++ ): ++ mock_atb_needed.return_value = False ++ self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True ++ lib._check_if_atb_can_be_disabled( ++ self.mock_runner, self.mock_reporter, self.mock_corosync_conf, ++ False, force=True ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ def test_atb_no_need_was_enable_atb_disabled_force(self, mock_atb_needed): ++ mock_atb_needed.return_value = False ++ self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False ++ lib._check_if_atb_can_be_disabled( ++ self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True, ++ force=True ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ def test_atb_no_need_was_enabled_atb_enabled_force(self, mock_atb_needed): ++ mock_atb_needed.return_value = False ++ self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True ++ lib._check_if_atb_can_be_disabled( ++ self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True, ++ force=True ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ def test_atb_needed_was_disabled_atb_disabled_force( ++ self, mock_atb_needed ++ ): ++ mock_atb_needed.return_value = True ++ self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False ++ lib._check_if_atb_can_be_disabled( ++ self.mock_runner, self.mock_reporter, self.mock_corosync_conf, ++ False, force=True ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ def test_atb_needed_was_disabled_atb_enabled_force(self, mock_atb_needed): ++ mock_atb_needed.return_value = True ++ self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True ++ lib._check_if_atb_can_be_disabled( ++ self.mock_runner, self.mock_reporter, self.mock_corosync_conf, ++ False, force=True ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ def test_atb_needed_was_enable_atb_disabled_force(self, mock_atb_needed): ++ mock_atb_needed.return_value = True ++ self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False ++ lib._check_if_atb_can_be_disabled( ++ self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True, ++ force=True ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [( ++ severity.WARNING, ++ report_codes.COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD, ++ {}, ++ None ++ )] ++ ) ++ ++ def test_atb_needed_was_enabled_atb_enabled_force(self, mock_atb_needed): ++ mock_atb_needed.return_value = True ++ self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True ++ lib._check_if_atb_can_be_disabled( ++ self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True, ++ force=True ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ ++@mock.patch("pcs.lib.commands.quorum._check_if_atb_can_be_disabled") + @mock.patch.object(LibraryEnvironment, "push_corosync_conf") + @mock.patch.object(LibraryEnvironment, "get_corosync_conf_data") ++@mock.patch.object(LibraryEnvironment, "cmd_runner") + class SetQuorumOptionsTest(TestCase, CmanMixin): + def setUp(self): + self.mock_logger = mock.MagicMock(logging.Logger) + self.mock_reporter = MockLibraryReportProcessor() + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True) +- def test_disabled_on_cman(self, mock_get_corosync, mock_push_corosync): ++ def test_disabled_on_cman( ++ self, mock_runner, mock_get_corosync, mock_push_corosync, mock_check ++ ): + lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) + self.assert_disabled_on_cman(lambda: lib.set_options(lib_env, {})) + mock_get_corosync.assert_not_called() + mock_push_corosync.assert_not_called() ++ mock_check.assert_not_called() + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True) + def test_enabled_on_cman_if_not_live( +- self, mock_get_corosync, mock_push_corosync ++ self, mock_runner, mock_get_corosync, mock_push_corosync, mock_check + ): + original_conf = "invalid {\nconfig: stop after cman test" + mock_get_corosync.return_value = original_conf +@@ -182,11 +361,16 @@ class SetQuorumOptionsTest(TestCase, CmanMixin): + ) + + mock_push_corosync.assert_not_called() ++ mock_check.assert_not_called() ++ mock_runner.assert_not_called() + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) +- def test_success(self, mock_get_corosync, mock_push_corosync): ++ def test_success( ++ self, mock_runner, mock_get_corosync, mock_push_corosync, mock_check ++ ): + original_conf = open(rc("corosync-3nodes.conf")).read() + mock_get_corosync.return_value = original_conf ++ mock_runner.return_value = "cmd_runner" + lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) + + new_options = {"wait_for_all": "1"} +@@ -201,9 +385,16 @@ class SetQuorumOptionsTest(TestCase, CmanMixin): + ) + ) + self.assertEqual([], self.mock_reporter.report_item_list) ++ self.assertEqual(1, mock_check.call_count) ++ self.assertEqual("cmd_runner", mock_check.call_args[0][0]) ++ self.assertEqual(self.mock_reporter, mock_check.call_args[0][1]) ++ self.assertFalse(mock_check.call_args[0][3]) ++ self.assertFalse(mock_check.call_args[0][4]) + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) +- def test_bad_options(self, mock_get_corosync, mock_push_corosync): ++ def test_bad_options( ++ self, mock_runner, mock_get_corosync, mock_push_corosync, mock_check ++ ): + original_conf = open(rc("corosync.conf")).read() + mock_get_corosync.return_value = original_conf + lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) +@@ -228,9 +419,12 @@ class SetQuorumOptionsTest(TestCase, CmanMixin): + ) + + mock_push_corosync.assert_not_called() ++ mock_check.assert_not_called() + + @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) +- def test_bad_config(self, mock_get_corosync, mock_push_corosync): ++ def test_bad_config( ++ self, mock_runner, mock_get_corosync, mock_push_corosync, mock_check ++ ): + original_conf = "invalid {\nconfig: this is" + mock_get_corosync.return_value = original_conf + lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) +@@ -246,6 +440,7 @@ class SetQuorumOptionsTest(TestCase, CmanMixin): + ) + + mock_push_corosync.assert_not_called() ++ mock_check.assert_not_called() + + + @mock.patch("pcs.lib.commands.quorum.corosync_live.get_quorum_status_text") +diff --git a/pcs/test/test_lib_commands_sbd.py b/pcs/test/test_lib_commands_sbd.py +index 9a96757..0663082 100644 +--- a/pcs/test/test_lib_commands_sbd.py ++++ b/pcs/test/test_lib_commands_sbd.py +@@ -35,6 +35,15 @@ from pcs.lib.external import ( + import pcs.lib.commands.sbd as cmd_sbd + + ++def _assert_equal_list_of_dictionaries_without_order(expected, actual): ++ for item in actual: ++ if item not in expected: ++ raise AssertionError("Given but not expected: {0}".format(item)) ++ for item in expected: ++ if item not in actual: ++ raise AssertionError("Expected but not given: {0}".format(item)) ++ ++ + class CommandSbdTest(TestCase): + def setUp(self): + self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment) +@@ -234,7 +243,8 @@ class ValidateSbdOptionsTest(TestCase): + "SBD_STARTMODE": "clean", + "SBD_WATCHDOG_DEV": "/dev/watchdog", + "SBD_UNKNOWN": "", +- "SBD_OPTS": " " ++ "SBD_OPTS": " ", ++ "SBD_PACEMAKER": "false", + } + + assert_report_item_list_equal( +@@ -272,6 +282,90 @@ class ValidateSbdOptionsTest(TestCase): + "allowed_str": self.allowed_sbd_options_str + }, + None ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.INVALID_OPTION, ++ { ++ "option_name": "SBD_PACEMAKER", ++ "option_type": None, ++ "allowed": self.allowed_sbd_options, ++ "allowed_str": self.allowed_sbd_options_str ++ }, ++ None ++ ) ++ ] ++ ) ++ ++ def test_watchdog_timeout_is_not_present(self): ++ config = { ++ "SBD_DELAY_START": "yes", ++ "SBD_STARTMODE": "clean" ++ } ++ self.assertEqual([], cmd_sbd._validate_sbd_options(config)) ++ ++ def test_watchdog_timeout_is_nonnegative_int(self): ++ config = { ++ "SBD_WATCHDOG_TIMEOUT": "-1", ++ } ++ ++ assert_report_item_list_equal( ++ cmd_sbd._validate_sbd_options(config), ++ [ ++ ( ++ Severities.ERROR, ++ report_codes.INVALID_OPTION_VALUE, ++ { ++ "option_name": "SBD_WATCHDOG_TIMEOUT", ++ "option_value": "-1", ++ "allowed_values": "nonnegative integer", ++ "allowed_values_str": "nonnegative integer", ++ }, ++ None ++ ) ++ ] ++ ) ++ ++ def test_watchdog_timeout_is_not_int(self): ++ config = { ++ "SBD_WATCHDOG_TIMEOUT": "not int", ++ } ++ ++ assert_report_item_list_equal( ++ cmd_sbd._validate_sbd_options(config), ++ [ ++ ( ++ Severities.ERROR, ++ report_codes.INVALID_OPTION_VALUE, ++ { ++ "option_name": "SBD_WATCHDOG_TIMEOUT", ++ "option_value": "not int", ++ "allowed_values": "nonnegative integer", ++ "allowed_values_str": "nonnegative integer", ++ }, ++ None ++ ) ++ ] ++ ) ++ ++ def test_watchdog_timeout_is_none(self): ++ config = { ++ "SBD_WATCHDOG_TIMEOUT": None, ++ } ++ ++ assert_report_item_list_equal( ++ cmd_sbd._validate_sbd_options(config), ++ [ ++ ( ++ Severities.ERROR, ++ report_codes.INVALID_OPTION_VALUE, ++ { ++ "option_name": "SBD_WATCHDOG_TIMEOUT", ++ "option_value": None, ++ "allowed_values": "nonnegative integer", ++ "allowed_values_str": "nonnegative integer", ++ }, ++ None + ) + ] + ) +@@ -325,6 +419,35 @@ class GetFullWatchdogListTest(TestCase): + ) + ) + ++ def test_invalid_watchdogs(self): ++ watchdog_dict = { ++ self.node_list[1].label: "", ++ self.node_list[2].label: None, ++ self.node_list[3].label: "not/abs/path", ++ self.node_list[4].label: "/dev/watchdog" ++ ++ } ++ assert_raise_library_error( ++ lambda: cmd_sbd._get_full_watchdog_list( ++ self.node_list, "/dev/dog", watchdog_dict ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.WATCHDOG_INVALID, ++ {"watchdog": ""} ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.WATCHDOG_INVALID, ++ {"watchdog": None} ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.WATCHDOG_INVALID, ++ {"watchdog": "not/abs/path"} ++ ) ++ ) ++ + + @mock.patch("pcs.lib.commands.sbd._get_cluster_nodes") + @mock.patch("pcs.lib.sbd.check_sbd") +@@ -393,8 +516,7 @@ class GetClusterSbdStatusTest(CommandSbdTest): + } + } + ] +- +- self.assertEqual( ++ _assert_equal_list_of_dictionaries_without_order( + expected, cmd_sbd.get_cluster_sbd_status(self.mock_env) + ) + mock_get_nodes.assert_called_once_with(self.mock_env) +@@ -447,7 +569,7 @@ class GetClusterSbdStatusTest(CommandSbdTest): + } + ] + +- self.assertEqual( ++ _assert_equal_list_of_dictionaries_without_order( + expected, cmd_sbd.get_cluster_sbd_status(self.mock_env) + ) + mock_get_nodes.assert_called_once_with(self.mock_env) +@@ -538,7 +660,7 @@ OPTION= value + } + ] + +- self.assertEqual( ++ _assert_equal_list_of_dictionaries_without_order( + expected, cmd_sbd.get_cluster_sbd_config(self.mock_env) + ) + mock_get_nodes.assert_called_once_with(self.mock_env) +@@ -589,7 +711,7 @@ invalid value + } + ] + +- self.assertEqual( ++ _assert_equal_list_of_dictionaries_without_order( + expected, cmd_sbd.get_cluster_sbd_config(self.mock_env) + ) + mock_get_nodes.assert_called_once_with(self.mock_env) +diff --git a/pcs/test/test_lib_corosync_config_facade.py b/pcs/test/test_lib_corosync_config_facade.py +index 4a35fd9..91f7b40 100644 +--- a/pcs/test/test_lib_corosync_config_facade.py ++++ b/pcs/test/test_lib_corosync_config_facade.py +@@ -281,6 +281,34 @@ quorum { + self.assertFalse(facade.need_qdevice_reload) + + ++class IsEnabledAutoTieBreaker(TestCase): ++ def test_enabled(self): ++ config = """\ ++quorum { ++ auto_tie_breaker: 1 ++} ++""" ++ facade = lib.ConfigFacade.from_string(config) ++ self.assertTrue(facade.is_enabled_auto_tie_breaker()) ++ ++ def test_disabled(self): ++ config = """\ ++quorum { ++ auto_tie_breaker: 0 ++} ++""" ++ facade = lib.ConfigFacade.from_string(config) ++ self.assertFalse(facade.is_enabled_auto_tie_breaker()) ++ ++ def test_no_value(self): ++ config = """\ ++quorum { ++} ++""" ++ facade = lib.ConfigFacade.from_string(config) ++ self.assertFalse(facade.is_enabled_auto_tie_breaker()) ++ ++ + class SetQuorumOptionsTest(TestCase): + def get_two_node(self, facade): + two_node = None +diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py +index a4ec0f9..b0ffdbb 100644 +--- a/pcs/test/test_lib_external.py ++++ b/pcs/test/test_lib_external.py +@@ -1012,12 +1012,14 @@ Copyright (c) 2006-2009 Red Hat, Inc. + + + @mock.patch("pcs.lib.external.is_systemctl") ++@mock.patch("pcs.lib.external.is_service_installed") + class DisableServiceTest(TestCase): + def setUp(self): + self.mock_runner = mock.MagicMock(spec_set=lib.CommandRunner) + self.service = "service_name" + +- def test_systemctl(self, mock_systemctl): ++ def test_systemctl(self, mock_is_installed, mock_systemctl): ++ mock_is_installed.return_value = True + mock_systemctl.return_value = True + self.mock_runner.run.return_value = ("", 0) + lib.disable_service(self.mock_runner, self.service) +@@ -1025,7 +1027,8 @@ class DisableServiceTest(TestCase): + ["systemctl", "disable", self.service + ".service"] + ) + +- def test_systemctl_failed(self, mock_systemctl): ++ def test_systemctl_failed(self, mock_is_installed, mock_systemctl): ++ mock_is_installed.return_value = True + mock_systemctl.return_value = True + self.mock_runner.run.return_value = ("", 1) + self.assertRaises( +@@ -1036,7 +1039,6 @@ class DisableServiceTest(TestCase): + ["systemctl", "disable", self.service + ".service"] + ) + +- @mock.patch("pcs.lib.external.is_service_installed") + def test_not_systemctl(self, mock_is_installed, mock_systemctl): + mock_is_installed.return_value = True + mock_systemctl.return_value = False +@@ -1046,7 +1048,6 @@ class DisableServiceTest(TestCase): + ["chkconfig", self.service, "off"] + ) + +- @mock.patch("pcs.lib.external.is_service_installed") + def test_not_systemctl_failed(self, mock_is_installed, mock_systemctl): + mock_is_installed.return_value = True + mock_systemctl.return_value = False +@@ -1059,7 +1060,14 @@ class DisableServiceTest(TestCase): + ["chkconfig", self.service, "off"] + ) + +- @mock.patch("pcs.lib.external.is_service_installed") ++ def test_systemctl_not_installed( ++ self, mock_is_installed, mock_systemctl ++ ): ++ mock_is_installed.return_value = False ++ mock_systemctl.return_value = True ++ lib.disable_service(self.mock_runner, self.service) ++ self.assertEqual(self.mock_runner.run.call_count, 0) ++ + def test_not_systemctl_not_installed( + self, mock_is_installed, mock_systemctl + ): +@@ -1068,7 +1076,8 @@ class DisableServiceTest(TestCase): + lib.disable_service(self.mock_runner, self.service) + self.assertEqual(self.mock_runner.run.call_count, 0) + +- def test_instance_systemctl(self, mock_systemctl): ++ def test_instance_systemctl(self, mock_is_installed, mock_systemctl): ++ mock_is_installed.return_value = True + mock_systemctl.return_value = True + self.mock_runner.run.return_value = ("", 0) + lib.disable_service(self.mock_runner, self.service, instance="test") +@@ -1078,7 +1087,6 @@ class DisableServiceTest(TestCase): + "{0}@{1}.service".format(self.service, "test") + ]) + +- @mock.patch("pcs.lib.external.is_service_installed") + def test_instance_not_systemctl(self, mock_is_installed, mock_systemctl): + mock_is_installed.return_value = True + mock_systemctl.return_value = False +diff --git a/pcs/test/test_lib_sbd.py b/pcs/test/test_lib_sbd.py +index e3c1401..fd29484 100644 +--- a/pcs/test/test_lib_sbd.py ++++ b/pcs/test/test_lib_sbd.py +@@ -28,6 +28,7 @@ from pcs.lib.external import ( + NodeConnectionException, + ) + import pcs.lib.sbd as lib_sbd ++from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade + + + class TestException(Exception): +@@ -85,6 +86,246 @@ class RunParallelAndRaiseLibErrorOnFailureTest(TestCase): + ) + + ++@mock.patch("pcs.lib.sbd.is_sbd_installed") ++@mock.patch("pcs.lib.sbd.is_sbd_enabled") ++class IsAutoTieBreakerNeededTest(TestCase): ++ def setUp(self): ++ self.runner = "runner" ++ self.mock_corosync_conf = mock.MagicMock(spec_set=CorosyncConfigFacade) ++ ++ def _set_ret_vals(self, nodes, qdevice): ++ self.mock_corosync_conf.get_nodes.return_value = nodes ++ self.mock_corosync_conf.has_quorum_device.return_value = qdevice ++ ++ def test_sbd_enabled_even_nodes_has_qdevice( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = True ++ mock_installed.return_value = True ++ self._set_ret_vals([1, 2], True) ++ self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf ++ )) ++ ++ def test_sbd_enabled_even_nodes_no_qdevice( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = True ++ mock_installed.return_value = True ++ self._set_ret_vals([1, 2], False) ++ self.assertTrue(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf ++ )) ++ ++ def test_sbd_not_installed_even_nodes_no_qdevice( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = False ++ mock_installed.return_value = False ++ self._set_ret_vals([1, 2], False) ++ self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf ++ )) ++ ++ def test_sbd_enabled_odd_nodes_has_qdevice( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = True ++ mock_installed.return_value = True ++ self._set_ret_vals([1, 2, 3], True) ++ self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf ++ )) ++ ++ def test_sbd_enabled_odd_nodes_no_qdevice( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = True ++ mock_installed.return_value = True ++ self._set_ret_vals([1, 2, 3], False) ++ self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf ++ )) ++ ++ def test_sbd_disabled_even_nodes_has_qdevice( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = False ++ mock_installed.return_value = True ++ self._set_ret_vals([1, 2], True) ++ self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf ++ )) ++ ++ def test_sbd_disabled_even_nodes_no_qdevice( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = False ++ mock_installed.return_value = True ++ self._set_ret_vals([1, 2], False) ++ self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf ++ )) ++ ++ def test_sbd_disabled_odd_nodes_has_qdevice( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = False ++ mock_installed.return_value = True ++ self._set_ret_vals([1, 2, 3], True) ++ self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf ++ )) ++ ++ def test_sbd_disabled_odd_nodes_no_qdevice( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = False ++ mock_installed.return_value = True ++ self._set_ret_vals([1, 2, 3], False) ++ self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf ++ )) ++ ++ def test_sbd_enabled_odd_nodes_no_qdevice_plus_node( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = True ++ mock_installed.return_value = True ++ self._set_ret_vals([1, 2, 3], False) ++ self.assertTrue(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf, 1 ++ )) ++ ++ def test_sbd_not_installed_odd_nodes_no_qdevice_plus_node( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = False ++ mock_installed.return_value = False ++ self._set_ret_vals([1, 2, 3], False) ++ self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf, 1 ++ )) ++ ++ def test_sbd_enabled_odd_nodes_no_qdevice_minus_node( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = True ++ mock_installed.return_value = True ++ self._set_ret_vals([1, 2, 3], False) ++ self.assertTrue(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf, -1 ++ )) ++ ++ def test_sbd_enabled_odd_nodes_no_qdevice_plus_2_nodes( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = True ++ mock_installed.return_value = True ++ self._set_ret_vals([1, 2, 3], False) ++ self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf, 2 ++ )) ++ ++ def test_sbd_enabled_odd_nodes_no_qdevice_minus_2_nodes( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = True ++ mock_installed.return_value = True ++ self._set_ret_vals([1, 2, 3], False) ++ self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf, -2 ++ )) ++ ++ def test_sbd_enabled_even_nodes_no_qdevice_plus_node( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = True ++ mock_installed.return_value = True ++ self._set_ret_vals([1, 2], False) ++ self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf, 1 ++ )) ++ ++ def test_sbd_enabled_even_nodes_no_qdevice_minus_node( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = True ++ mock_installed.return_value = True ++ self._set_ret_vals([1, 2], False) ++ self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf, -1 ++ )) ++ ++ def test_sbd_enabled_even_nodes_no_qdevice_plus_2_nodes( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = True ++ mock_installed.return_value = True ++ self._set_ret_vals([1, 2], False) ++ self.assertTrue(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf, 2 ++ )) ++ ++ def test_sbd_enabled_even_nodes_no_qdevice_minus_2_nodes( ++ self, mock_enabled, mock_installed ++ ): ++ mock_enabled.return_value = True ++ mock_installed.return_value = True ++ self._set_ret_vals([1, 2, 3, 4], False) ++ self.assertTrue(lib_sbd.is_auto_tie_breaker_needed( ++ self.runner, self.mock_corosync_conf, -2 ++ )) ++ ++ ++@mock.patch("pcs.lib.sbd.is_auto_tie_breaker_needed") ++class AtbHasToBeEnabledTest(TestCase): ++ def setUp(self): ++ self.mock_runner = "runner" ++ self.mock_conf = mock.MagicMock(spec_set=CorosyncConfigFacade) ++ ++ def test_atb_needed_is_enabled(self, mock_is_needed): ++ mock_is_needed.return_value = True ++ self.mock_conf.is_enabled_auto_tie_breaker.return_value = True ++ self.assertFalse(lib_sbd.atb_has_to_be_enabled( ++ self.mock_runner, self.mock_conf, 1 ++ )) ++ mock_is_needed.assert_called_once_with( ++ self.mock_runner, self.mock_conf, 1 ++ ) ++ ++ def test_atb_needed_is_disabled(self, mock_is_needed): ++ mock_is_needed.return_value = True ++ self.mock_conf.is_enabled_auto_tie_breaker.return_value = False ++ self.assertTrue(lib_sbd.atb_has_to_be_enabled( ++ self.mock_runner, self.mock_conf, -1 ++ )) ++ mock_is_needed.assert_called_once_with( ++ self.mock_runner, self.mock_conf, -1 ++ ) ++ ++ def test_atb_not_needed_is_enabled(self, mock_is_needed): ++ mock_is_needed.return_value = False ++ self.mock_conf.is_enabled_auto_tie_breaker.return_value = True ++ self.assertFalse(lib_sbd.atb_has_to_be_enabled( ++ self.mock_runner, self.mock_conf, 2 ++ )) ++ mock_is_needed.assert_called_once_with( ++ self.mock_runner, self.mock_conf, 2 ++ ) ++ ++ def test_atb_not_needed_is_disabled(self, mock_is_needed): ++ mock_is_needed.return_value = False ++ self.mock_conf.is_enabled_auto_tie_breaker.return_value = False ++ self.assertFalse(lib_sbd.atb_has_to_be_enabled( ++ self.mock_runner, self.mock_conf, -2 ++ )) ++ mock_is_needed.assert_called_once_with( ++ self.mock_runner, self.mock_conf, -2 ++ ) ++ ++ ++ + class CheckSbdTest(TestCase): + def test_success(self): + mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) +@@ -316,11 +557,11 @@ class SetSbdConfigOnNodeTest(TestCase): + } + cfg_out = """# This file has been generated by pcs. + SBD_OPTS="-n node1" +-SBD_WATCHDOG_DEV=/dev/watchdog ++SBD_WATCHDOG_DEV=/my/watchdog + SBD_WATCHDOG_TIMEOUT=0 + """ + lib_sbd.set_sbd_config_on_node( +- self.mock_rep, self.mock_com, self.node, cfg_in ++ self.mock_rep, self.mock_com, self.node, cfg_in, "/my/watchdog" + ) + mock_set_sbd_cfg.assert_called_once_with( + self.mock_com, self.node, cfg_out +@@ -340,17 +581,24 @@ class SetSbdConfigOnAllNodesTest(TestCase): + def test_success(self, mock_func): + mock_com = mock.MagicMock(spec_set=NodeCommunicator) + mock_rep = MockLibraryReportProcessor() +- node_list = [NodeAddresses("node" + str(i)) for i in range(5)] ++ watchdog_dict = dict([ ++ (NodeAddresses("node" + str(i)), "/dev/watchdog" + str(i)) ++ for i in range(5) ++ ]) ++ node_list = list(watchdog_dict.keys()) + config = { + "opt1": "val1", + "opt2": "val2" + } + lib_sbd.set_sbd_config_on_all_nodes( +- mock_rep, mock_com, node_list, config ++ mock_rep, mock_com, node_list, config, watchdog_dict + ) + mock_func.assert_called_once_with( + lib_sbd.set_sbd_config_on_node, +- [([mock_rep, mock_com, node, config], {}) for node in node_list] ++ [ ++ ([mock_rep, mock_com, node, config, watchdog_dict[node]], {}) ++ for node in node_list ++ ] + ) + + +@@ -594,3 +842,17 @@ class IsSbdEnabledTest(TestCase): + mock_obj = mock.MagicMock() + mock_is_service_enabled.return_value = True + self.assertTrue(lib_sbd.is_sbd_enabled(mock_obj)) ++ ++ ++@mock.patch("pcs.lib.external.is_service_installed") ++class IsSbdInstalledTest(TestCase): ++ def test_installed(self, mock_is_service_installed): ++ mock_obj = mock.MagicMock() ++ mock_is_service_installed.return_value = True ++ self.assertTrue(lib_sbd.is_sbd_installed(mock_obj)) ++ ++ def test_not_installed(self, mock_is_service_installed): ++ mock_obj = mock.MagicMock() ++ mock_is_service_installed.return_value = False ++ self.assertFalse(lib_sbd.is_sbd_installed(mock_obj)) ++ +diff --git a/pcs/usage.py b/pcs/usage.py +index b11a5fa..9ebbca9 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -683,6 +683,7 @@ Commands: + the whole CIB or be warned in the case of outdated CIB. + + node add <node[,node-altaddr]> [--start [--wait[=<n>]]] [--enable] ++ [--watchdog=<watchdog-path>] + Add the node to corosync.conf and corosync on all nodes in the cluster + and sync the new corosync.conf to the new node. If --start is + specified also start corosync/pacemaker on the new node, if --wait is +@@ -690,6 +691,8 @@ Commands: + is specified enable corosync/pacemaker on new node. + When using Redundant Ring Protocol (RRP) with udpu transport, specify + the ring 0 address first followed by a ',' and then the ring 1 address. ++ Use --watchdog to specify path to watchdog on newly added node, when SBD ++ is enabled in cluster. + + node remove <node> + Shutdown specified node and remove it from pacemaker and corosync on +diff --git a/pcs/utils.py b/pcs/utils.py +index 53cc0b0..a7ff7ca 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -32,7 +32,7 @@ from pcs.cli.common.reports import ( + LibraryReportProcessorToConsole as LibraryReportProcessorToConsole, + ) + from pcs.common.tools import simple_cache +-from pcs.lib import reports ++from pcs.lib import reports, sbd + from pcs.lib.env import LibraryEnvironment + from pcs.lib.errors import LibraryError + from pcs.lib.external import ( +@@ -574,6 +574,23 @@ def getCorosyncActiveNodes(): + + return nodes_active + ++ ++def _enable_auto_tie_breaker_for_sbd(corosync_conf): ++ """ ++ Enable auto tie breaker in specified corosync conf if it is needed by SBD. ++ ++ corosync_conf -- parsed corosync conf ++ """ ++ try: ++ corosync_facade = corosync_conf_facade(corosync_conf) ++ if sbd.atb_has_to_be_enabled(cmd_runner(), corosync_facade): ++ corosync_facade.set_quorum_options( ++ get_report_processor(), {"auto_tie_breaker": "1"} ++ ) ++ except LibraryError as e: ++ process_library_reports(e.args) ++ ++ + # Add node specified to corosync.conf and reload corosync.conf (if running) + def addNodeToCorosync(node): + # Before adding, make sure node isn't already in corosync.conf +@@ -600,6 +617,9 @@ def addNodeToCorosync(node): + new_node.add_attribute("ring1_addr", node1) + new_node.add_attribute("nodeid", new_nodeid) + ++ # enable ATB if it's needed ++ _enable_auto_tie_breaker_for_sbd(corosync_conf) ++ + corosync_conf = autoset_2node_corosync(corosync_conf) + setCorosyncConf(str(corosync_conf)) + return True +@@ -667,6 +687,9 @@ def removeNodeFromCorosync(node): + removed_node = True + + if removed_node: ++ # enable ATB if it's needed ++ _enable_auto_tie_breaker_for_sbd(corosync_conf) ++ + corosync_conf = autoset_2node_corosync(corosync_conf) + setCorosyncConf(str(corosync_conf)) + +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index d46cd62..137bb3d 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -1995,14 +1995,14 @@ def enable_service(service) + end + + def disable_service(service) ++ # fails when the service is not installed, so we need to check it beforehand ++ if not is_service_installed?(service) ++ return true ++ end ++ + if ISSYSTEMCTL +- # returns success even if the service is not installed + cmd = ['systemctl', 'disable', "#{service}.service"] + else +- if not is_service_installed?(service) +- return true +- end +- # fails when the service is not installed, so we need to check it beforehand + cmd = ['chkconfig', service, 'off'] + end + _, _, retcode = run_cmd(PCSAuth.getSuperuserAuth(), *cmd) +-- +1.8.3.1 + diff --git a/SOURCES/bz1164402-03-sbd-fix-check-if-atb-is-required-when-enabling-sbd.patch b/SOURCES/bz1164402-03-sbd-fix-check-if-atb-is-required-when-enabling-sbd.patch new file mode 100644 index 0000000..41f8110 --- /dev/null +++ b/SOURCES/bz1164402-03-sbd-fix-check-if-atb-is-required-when-enabling-sbd.patch @@ -0,0 +1,354 @@ +From 66b5e393aebd84b08047f33d09bc4cbce730e205 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular <omular@redhat.com> +Date: Tue, 23 Aug 2016 11:19:20 +0200 +Subject: [PATCH] sbd: fix check if ATB is required when enabling sbd + +--- + pcs/common/report_codes.py | 1 + + pcs/lib/commands/sbd.py | 3 +- + pcs/lib/reports.py | 12 +++ + pcs/lib/sbd.py | 39 ++++++++- + pcs/test/test_lib_sbd.py | 193 +++++++-------------------------------------- + 5 files changed, 80 insertions(+), 168 deletions(-) + +diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py +index 5e46a1f..e6a86ec 100644 +--- a/pcs/common/report_codes.py ++++ b/pcs/common/report_codes.py +@@ -155,6 +155,7 @@ SBD_DISABLING_STARTED = "SBD_DISABLING_STARTED" + SBD_ENABLING_STARTED = "SBD_ENABLING_STARTED" + SBD_NOT_INSTALLED = "SBD_NOT_INSTALLED" + SBD_NOT_ENABLED = "SBD_NOT_ENABLED" ++SBD_REQUIRES_ATB = "SBD_REQUIRES_ATB" + SERVICE_DISABLE_ERROR = "SERVICE_DISABLE_ERROR" + SERVICE_DISABLE_STARTED = "SERVICE_DISABLE_STARTED" + SERVICE_DISABLE_SUCCESS = "SERVICE_DISABLE_SUCCESS" +diff --git a/pcs/lib/commands/sbd.py b/pcs/lib/commands/sbd.py +index 265ebb5..2acb104 100644 +--- a/pcs/lib/commands/sbd.py ++++ b/pcs/lib/commands/sbd.py +@@ -159,7 +159,8 @@ def enable_sbd( + + # enable ATB if needed + corosync_conf = lib_env.get_corosync_conf() +- if sbd.atb_has_to_be_enabled(lib_env.cmd_runner(), corosync_conf): ++ if sbd.atb_has_to_be_enabled_pre_enable_check(corosync_conf): ++ lib_env.report_processor.process(reports.sbd_requires_atb()) + corosync_conf.set_quorum_options( + lib_env.report_processor, {"auto_tie_breaker": "1"} + ) +diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py +index 568bb7e..a701679 100644 +--- a/pcs/lib/reports.py ++++ b/pcs/lib/reports.py +@@ -1928,3 +1928,15 @@ def quorum_cannot_disable_atb_due_to_sbd( + "unable to disable auto_tie_breaker: SBD fencing will have no effect", + forceable=forceable + ) ++ ++ ++def sbd_requires_atb(): ++ """ ++ Warning that ATB will be enabled in order to make SBD fencing effective. ++ """ ++ return ReportItem.warning( ++ report_codes.SBD_REQUIRES_ATB, ++ "auto_tie_breaker quorum option will be enabled to make SBD fencing " ++ "effective. Cluster has to be offline to be able to make this change." ++ ) ++ +diff --git a/pcs/lib/sbd.py b/pcs/lib/sbd.py +index c9f013b..39de740 100644 +--- a/pcs/lib/sbd.py ++++ b/pcs/lib/sbd.py +@@ -46,6 +46,25 @@ def _run_parallel_and_raise_lib_error_on_failure(func, param_list): + raise LibraryError(*report_list) + + ++def _even_number_of_nodes_and_no_qdevice( ++ corosync_conf_facade, node_number_modifier=0 ++): ++ """ ++ Returns True whenever cluster has no quorum device configured and number of ++ nodes + node_number_modifier is even number, False otherwise. ++ ++ corosync_conf_facade -- ++ node_number_modifier -- this value will be added to current number of nodes. ++ This can be useful to test whenever is ATB needed when adding/removing ++ node. ++ """ ++ return ( ++ not corosync_conf_facade.has_quorum_device() ++ and ++ (len(corosync_conf_facade.get_nodes()) + node_number_modifier) % 2 == 0 ++ ) ++ ++ + def is_auto_tie_breaker_needed( + runner, corosync_conf_facade, node_number_modifier=0 + ): +@@ -60,15 +79,29 @@ def is_auto_tie_breaker_needed( + node. + """ + return ( +- not corosync_conf_facade.has_quorum_device() +- and +- (len(corosync_conf_facade.get_nodes()) + node_number_modifier) % 2 == 0 ++ _even_number_of_nodes_and_no_qdevice( ++ corosync_conf_facade, node_number_modifier ++ ) + and + is_sbd_installed(runner) + and + is_sbd_enabled(runner) + ) + ++ ++def atb_has_to_be_enabled_pre_enable_check(corosync_conf_facade): ++ """ ++ Returns True whenever quorum option auto_tie_breaker is needed to be enabled ++ for proper working of SBD fencing. False if it is not needed. This function ++ doesn't check if sbd is installed nor enabled. ++ """ ++ return ( ++ not corosync_conf_facade.is_enabled_auto_tie_breaker() ++ and ++ _even_number_of_nodes_and_no_qdevice(corosync_conf_facade) ++ ) ++ ++ + def atb_has_to_be_enabled(runner, corosync_conf_facade, node_number_modifier=0): + """ + Return True whenever quorum option auto tie breaker has to be enabled for +diff --git a/pcs/test/test_lib_sbd.py b/pcs/test/test_lib_sbd.py +index fd29484..516e0bd 100644 +--- a/pcs/test/test_lib_sbd.py ++++ b/pcs/test/test_lib_sbd.py +@@ -86,195 +86,60 @@ class RunParallelAndRaiseLibErrorOnFailureTest(TestCase): + ) + + +-@mock.patch("pcs.lib.sbd.is_sbd_installed") +-@mock.patch("pcs.lib.sbd.is_sbd_enabled") +-class IsAutoTieBreakerNeededTest(TestCase): ++class EvenNumberOfNodesAndNoQdevice(TestCase): + def setUp(self): +- self.runner = "runner" + self.mock_corosync_conf = mock.MagicMock(spec_set=CorosyncConfigFacade) + + def _set_ret_vals(self, nodes, qdevice): + self.mock_corosync_conf.get_nodes.return_value = nodes + self.mock_corosync_conf.has_quorum_device.return_value = qdevice + +- def test_sbd_enabled_even_nodes_has_qdevice( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = True +- mock_installed.return_value = True +- self._set_ret_vals([1, 2], True) +- self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf +- )) +- +- def test_sbd_enabled_even_nodes_no_qdevice( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = True +- mock_installed.return_value = True +- self._set_ret_vals([1, 2], False) +- self.assertTrue(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf +- )) +- +- def test_sbd_not_installed_even_nodes_no_qdevice( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = False +- mock_installed.return_value = False ++ def test_even_num_no_qdevice(self): + self._set_ret_vals([1, 2], False) +- self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf +- )) +- +- def test_sbd_enabled_odd_nodes_has_qdevice( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = True +- mock_installed.return_value = True +- self._set_ret_vals([1, 2, 3], True) +- self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf +- )) +- +- def test_sbd_enabled_odd_nodes_no_qdevice( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = True +- mock_installed.return_value = True +- self._set_ret_vals([1, 2, 3], False) +- self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf ++ self.assertTrue(lib_sbd._even_number_of_nodes_and_no_qdevice( ++ self.mock_corosync_conf + )) + +- def test_sbd_disabled_even_nodes_has_qdevice( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = False +- mock_installed.return_value = True ++ def test_even_num_qdevice(self): + self._set_ret_vals([1, 2], True) +- self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf +- )) +- +- def test_sbd_disabled_even_nodes_no_qdevice( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = False +- mock_installed.return_value = True +- self._set_ret_vals([1, 2], False) +- self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf ++ self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice( ++ self.mock_corosync_conf + )) + +- def test_sbd_disabled_odd_nodes_has_qdevice( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = False +- mock_installed.return_value = True +- self._set_ret_vals([1, 2, 3], True) +- self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf +- )) +- +- def test_sbd_disabled_odd_nodes_no_qdevice( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = False +- mock_installed.return_value = True ++ def test_odd_num_no_qdevice(self): + self._set_ret_vals([1, 2, 3], False) +- self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf ++ self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice( ++ self.mock_corosync_conf + )) + +- def test_sbd_enabled_odd_nodes_no_qdevice_plus_node( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = True +- mock_installed.return_value = True +- self._set_ret_vals([1, 2, 3], False) +- self.assertTrue(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf, 1 +- )) +- +- def test_sbd_not_installed_odd_nodes_no_qdevice_plus_node( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = False +- mock_installed.return_value = False +- self._set_ret_vals([1, 2, 3], False) +- self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf, 1 +- )) +- +- def test_sbd_enabled_odd_nodes_no_qdevice_minus_node( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = True +- mock_installed.return_value = True +- self._set_ret_vals([1, 2, 3], False) +- self.assertTrue(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf, -1 +- )) +- +- def test_sbd_enabled_odd_nodes_no_qdevice_plus_2_nodes( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = True +- mock_installed.return_value = True +- self._set_ret_vals([1, 2, 3], False) +- self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf, 2 +- )) +- +- def test_sbd_enabled_odd_nodes_no_qdevice_minus_2_nodes( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = True +- mock_installed.return_value = True +- self._set_ret_vals([1, 2, 3], False) +- self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf, -2 ++ def test_odd_num_qdevice(self): ++ self._set_ret_vals([1, 2, 3], True) ++ self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice( ++ self.mock_corosync_conf + )) + +- def test_sbd_enabled_even_nodes_no_qdevice_plus_node( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = True +- mock_installed.return_value = True ++ def test_even_num_no_qdevice_plus_one(self): + self._set_ret_vals([1, 2], False) +- self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf, 1 ++ self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice( ++ self.mock_corosync_conf, 1 + )) + +- def test_sbd_enabled_even_nodes_no_qdevice_minus_node( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = True +- mock_installed.return_value = True +- self._set_ret_vals([1, 2], False) +- self.assertFalse(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf, -1 ++ def test_even_num_qdevice_plus_one(self): ++ self._set_ret_vals([1, 2], True) ++ self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice( ++ self.mock_corosync_conf, 1 + )) + +- def test_sbd_enabled_even_nodes_no_qdevice_plus_2_nodes( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = True +- mock_installed.return_value = True +- self._set_ret_vals([1, 2], False) +- self.assertTrue(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf, 2 ++ def test_odd_num_no_qdevice_plus_one(self): ++ self._set_ret_vals([1, 2, 3], False) ++ self.assertTrue(lib_sbd._even_number_of_nodes_and_no_qdevice( ++ self.mock_corosync_conf, 1 + )) + +- def test_sbd_enabled_even_nodes_no_qdevice_minus_2_nodes( +- self, mock_enabled, mock_installed +- ): +- mock_enabled.return_value = True +- mock_installed.return_value = True +- self._set_ret_vals([1, 2, 3, 4], False) +- self.assertTrue(lib_sbd.is_auto_tie_breaker_needed( +- self.runner, self.mock_corosync_conf, -2 ++ def test_odd_num_qdevice_plus_one(self): ++ self._set_ret_vals([1, 2, 3], True) ++ self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice( ++ self.mock_corosync_conf, 1 + )) + + +-- +1.8.3.1 + diff --git a/SOURCES/bz1188361-01-Make-port-parameter-of-fence-agents-optional.patch b/SOURCES/bz1188361-01-Make-port-parameter-of-fence-agents-optional.patch deleted file mode 100644 index c84b220..0000000 --- a/SOURCES/bz1188361-01-Make-port-parameter-of-fence-agents-optional.patch +++ /dev/null @@ -1,42 +0,0 @@ -From cc373126c53282ff2f266e9f243c5798fca98d37 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek <tojeline@redhat.com> -Date: Thu, 10 Sep 2015 17:19:00 +0200 -Subject: [PATCH] Make "port" parameter of fence agents optional - ---- - pcs/test/test_stonith.py | 12 ++++++++---- - pcs/utils.py | 18 ++++++++++-------- - 2 files changed, 18 insertions(+), 12 deletions(-) - -diff --git a/pcs/utils.py b/pcs/utils.py -index 757c159..0b8d03f 100644 ---- a/pcs/utils.py -+++ b/pcs/utils.py -@@ -1791,14 +1791,16 @@ def validInstanceAttributes(res_id, ra_values, resource_type): - - if missing_required_parameters: - if resClass == "stonith" and "port" in missing_required_parameters: -- if ( -- "pcmk_host_argument" in ra_values -- or -- "pcmk_host_map" in ra_values -- or -- "pcmk_host_list" in ra_values -- ): -- missing_required_parameters.remove("port") -+ # Temporarily make "port" an optional parameter. Once we are -+ # getting metadata from pacemaker, this will be reviewed and fixed. -+ #if ( -+ # "pcmk_host_argument" in ra_values -+ # or -+ # "pcmk_host_map" in ra_values -+ # or -+ # "pcmk_host_list" in ra_values -+ #): -+ missing_required_parameters.remove("port") - - return bad_parameters, missing_required_parameters - --- -1.9.1 - diff --git a/SOURCES/bz1189857-01-fix-Add-Resource-form-in-web-UI.patch b/SOURCES/bz1189857-01-fix-Add-Resource-form-in-web-UI.patch deleted file mode 100644 index 4d2b674..0000000 --- a/SOURCES/bz1189857-01-fix-Add-Resource-form-in-web-UI.patch +++ /dev/null @@ -1,25 +0,0 @@ -From 7572aa0033e2d027685807e0f7d49be86e36ee50 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek <tojeline@redhat.com> -Date: Tue, 11 Aug 2015 16:08:50 +0200 -Subject: [PATCH] fix 'Add Resource' form in web UI - ---- - pcsd/views/resourceagentform.erb | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/pcsd/views/resourceagentform.erb b/pcsd/views/resourceagentform.erb -index bccc689..039023e 100644 ---- a/pcsd/views/resourceagentform.erb -+++ b/pcsd/views/resourceagentform.erb -@@ -68,7 +68,7 @@ nodes in Slave mode.") %>'> - <input type="hidden" name="resource_id" value="<%=@cur_resource.id%>"> - <% end %> - -- <input type="hidden" name="resource_type" value="<%=@resource.type%>"> -+ <input type="hidden" name="resource_type" value="<%=@resource.name%>"> - <% @resource.required_options.each { |name, desc| %> - <tr title="<%=h(desc[1])%>"> - <td class="reg"> --- -1.9.1 - diff --git a/SOURCES/bz1189857-02-fix-tree-view-of-resources-in-web-UI.patch b/SOURCES/bz1189857-02-fix-tree-view-of-resources-in-web-UI.patch deleted file mode 100644 index 7e34f88..0000000 --- a/SOURCES/bz1189857-02-fix-tree-view-of-resources-in-web-UI.patch +++ /dev/null @@ -1,735 +0,0 @@ -From 85ea8bf4630bd3760ab935c24c7b78cdd255f55b Mon Sep 17 00:00:00 2001 -From: Ondrej Mular <omular@redhat.com> -Date: Wed, 26 Aug 2015 10:55:57 +0200 -Subject: [PATCH] fix tree view of resources in web UI - ---- - pcsd/cluster_entity.rb | 15 +- - pcsd/pcs.rb | 30 ++- - pcsd/public/js/nodes-ember.js | 34 +++- - pcsd/remote.rb | 12 +- - pcsd/views/nodes.erb | 457 +++++++++++++++++++++--------------------- - 5 files changed, 284 insertions(+), 264 deletions(-) - -diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb -index 182969f..b291937 100644 ---- a/pcsd/cluster_entity.rb -+++ b/pcsd/cluster_entity.rb -@@ -895,7 +895,7 @@ module ClusterEntity - class Node < JSONable - attr_accessor :id, :error_list, :warning_list, :status, :quorum, :uptime, - :name, :corosync, :pacemaker, :cman, :corosync_enabled, -- :pacemaker_enabled, :pcsd_enabled, :attr, :fence_levels -+ :pacemaker_enabled, :pcsd_enabled - - def initialize - @id = nil -@@ -911,8 +911,6 @@ module ClusterEntity - @corosync_enabled = false - @pacemaker_enabled = false - @pcsd_enabled = false -- @attr = ClusterEntity::NvSet.new -- @fence_levels = {} - end - - def self.load_current_node(session, crm_dom=nil) -@@ -923,7 +921,6 @@ module ClusterEntity - node.pacemaker_enabled = pacemaker_enabled? - node.cman = cman_running? - node.pcsd_enabled = pcsd_enabled? -- node.fence_levels = get_fence_levels(session) - - node_online = (node.corosync and node.pacemaker) - node.status = node_online ? 'online' : 'offline' -@@ -939,16 +936,6 @@ module ClusterEntity - node.status = 'online' - end - node.quorum = !!crm_dom.elements['//current_dc[@with_quorum="true"]'] -- -- node_name = get_current_node_name() -- all_nodes_attr = get_node_attributes(session) -- if all_nodes_attr[node_name] -- all_nodes_attr[node_name].each { |pair| -- node.attr << ClusterEntity::NvPair.new( -- nil, pair[:key], pair[:value] -- ) -- } -- end - else - node.status = 'offline' - end -diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb -index 37f6b83..1fe9b99 100644 ---- a/pcsd/pcs.rb -+++ b/pcsd/pcs.rb -@@ -1624,8 +1624,11 @@ def get_node_status(session, cib_dom) - :need_ring1_address => need_ring1_address?, - :is_cman_with_udpu_transport => is_cman_with_udpu_transport?, - :acls => get_acls(session), -- :username => session[:username] -+ :username => session[:username], -+ :fence_levels => get_fence_levels(session), -+ :node_attr => node_attrs_to_v2(get_node_attributes(session)) - } -+ - nodes = get_nodes_status() - - known_nodes = [] -@@ -1742,14 +1745,31 @@ def get_cib_dom(session) - return nil - end - -+def node_attrs_to_v2(node_attrs) -+ all_nodes_attr = {} -+ node_attrs.each { |node, attrs| -+ all_nodes_attr[node] = [] -+ attrs.each { |attr| -+ all_nodes_attr[node] << { -+ :id => nil, -+ :name => attr[:key], -+ :value => attr[:value] -+ } -+ } -+ } -+ return all_nodes_attr -+end -+ - def status_v1_to_v2(status) - new_status = status.select { |k,_| - [:cluster_name, :username, :is_cman_with_udpu_transport, - :need_ring1_address, :cluster_settings, :constraints, :groups, - :corosync_online, :corosync_offline, :pacemaker_online, :pacemaker_standby, -- :pacemaker_offline, :acls -+ :pacemaker_offline, :acls, :fence_levels - ].include?(k) - } -+ new_status[:node_attr] = node_attrs_to_v2(status[:node_attr]) -+ - resources = ClusterEntity::make_resources_tree( - ClusterEntity::get_primitives_from_status_v1(status[:resources]) - ) -@@ -1764,15 +1784,9 @@ def status_v1_to_v2(status) - ].include?(k) - } - -- node_attr = ClusterEntity::NvSet.new -- status[:node_attr].each { |k,v| -- node_attr << ClusterEntity::NvPair.new(nil, k, v) -- } - new_status[:node].update( - { - :id => status[:node_id], -- :attr => node_attr.to_status, -- :fence_levels => status[:fence_levels], - :quorum => nil, - :warning_list => [], - :error_list => [], -diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js -index 46e34fa..1f60adc 100644 ---- a/pcsd/public/js/nodes-ember.js -+++ b/pcsd/public/js/nodes-ember.js -@@ -170,7 +170,7 @@ Pcs = Ember.Application.createWithMixins({ - tree_view_onclick(self.get('cur_resource').get('id'), true); - if (!fence_change && self.get('cur_fence')) - tree_view_select(self.get('cur_fence').get('id')); -- if (!resource_change && self.get('cur_fence')) -+ if (!resource_change && self.get('cur_resource')) - tree_view_select(self.get('cur_resource').get('id')); - Pcs.selectedNodeController.reset(); - setup_node_links(); -@@ -932,6 +932,9 @@ Pcs.Setting = Ember.Object.extend({ - Pcs.Clusternode = Ember.Object.extend({ - name: null, - status: null, -+ status_unknown: function() { -+ return this.get('status') == "unknown"; -+ }.property("status"), - status_val: function() { - if (this.warnings && this.warnings.length) - return get_status_value("warning"); -@@ -1013,6 +1016,10 @@ Pcs.Clusternode = Ember.Object.extend({ - return "color:red"; - } - }.property("up","pacemaker_standby"), -+ pacemaker_standby: null, -+ corosync_enabled: null, -+ pacemaker_enabled: null, -+ pcsd_enabled: null, - standby_style: function () { - if (this.pacemaker_standby) - return "display: none;"; -@@ -1043,7 +1050,12 @@ Pcs.Clusternode = Ember.Object.extend({ - else - return "Disabled"; - }.property("pcsd_enabled"), -- location_constraints: null -+ location_constraints: null, -+ node_attrs: [], -+ fence_levels: [], -+ pcsd: null, -+ corosync_daemon: null, -+ pacemaker_daemon: null, - }); - - Pcs.Aclrole = Ember.Object.extend({ -@@ -1509,8 +1521,8 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({ - cur_node: null, - cur_node_attr: function () { - var nc = this; -- if (nc.cur_node && "node_attrs" in nc.cur_node) { -- return nc.cur_node.node_attrs; -+ if (nc.get('cur_node')) { -+ return nc.get('cur_node').get('node_attrs'); - } - return []; - }.property("cur_node", "content.@each.node_attrs"), -@@ -1599,7 +1611,7 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({ - pacemaker_standby = false; - } - -- if (node_obj["noresponse"] == true) { -+ if (node_obj["status"] == 'unknown') { - pcsd_daemon = false - } else { - pcsd_daemon = true -@@ -1618,9 +1630,9 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({ - up_status = false; - } - -- var node_attr = {}; -- if (node_obj["attr"]) { -- node_attr = node_obj["attr"]; -+ var node_attr = []; -+ if (data["node_attr"] && data["node_attr"][node_id]) { -+ node_attr = data["node_attr"][node_id]; - } - - found = false; -@@ -1646,7 +1658,8 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({ - node.set("uptime", node_obj["uptime"]); - node.set("node_id", node_obj["id"]); - node.set("node_attrs", node_attr); -- node.set("fence_levels", node_obj["fence_levels"]); -+ node.set("fence_levels", data["fence_levels"]); -+ node.set("status", node_obj["status"]); - } - }); - -@@ -1670,7 +1683,8 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({ - uptime: node_obj["uptime"], - node_id: node_obj["id"], - node_attrs: node_attr, -- fence_levels: node_obj["fence_levels"] -+ fence_levels: data["fence_levels"], -+ status: node_obj["status"] - }); - } - var pathname = window.location.pathname.split('/'); -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index 22af38a..a40c1c7 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -1014,8 +1014,14 @@ def node_status(params, request, session) - status[:cluster_settings] - - node_attr = {} -- node.attr.each { |v| -- node_attr[v.name.to_sym] = v.value -+ status[:node_attr].each { |node, attrs| -+ node_attr[node] = [] -+ attrs.each { |attr| -+ node_attr[node] << { -+ :key => attr[:name], -+ :value => attr[:value] -+ } -+ } - } - - old_status = { -@@ -1038,7 +1044,7 @@ def node_status(params, request, session) - :cluster_settings => cluster_settings, - :node_id => node.id, - :node_attr => node_attr, -- :fence_levels => node.fence_levels, -+ :fence_levels => status[:fence_levels], - :need_ring1_address => status[:need_ring1_address], - :is_cman_with_udpu_transport => status[:is_cman_with_udpu_transport], - :acls => status[:acls], -diff --git a/pcsd/views/nodes.erb b/pcsd/views/nodes.erb -index b8ecf6d..19bba62 100644 ---- a/pcsd/views/nodes.erb -+++ b/pcsd/views/nodes.erb -@@ -40,242 +40,241 @@ - </table> - </td> - <td id="node_info" colspan=2> -- <div id="node_info_div" style="opacity: 0;"> -- <div id="test"> -- <div id="node_info_header"> -- <div id="node_info_header_title" class="node_info_header_title">Edit Node </div> -- <div id="node_info_header_title_name"> --{{Pcs.nodesController.cur_node.name}} -- </div> -- -- </div> -- -- <div id="node_sub_info"> -- <table> -- <tr> -- <td rowspan=2> -- <input disabled style="margin-right: 50px;" type="text" {{bind-attr value="Pcs.nodesController.cur_node.name"}} size="35" class="text_field"> -- </td> -- <td><div style="margin-right: 8px;" class="check sprites"></div></td> -- <td> -- <!-- <tr><td>pacemaker</td><td><div style="float:left;margin-right:6px;" class="check sprites"></div><div id="pacemaker_status" style="float:left" class="status"></div></td></tr> --> -- {{#if Pcs.nodesController.cur_node.pacemaker}} -- <div id="pacemaker_online_status" class="status"> -- Pacemaker Connected -- {{else}} -- {{#if Pcs.nodesController.cur_node.pacemaker_standby}} -- <div id="pacemaker_online_status" class="status-standby"> -- Pacemaker Standby -- {{else}} -- <div id="pacemaker_online_status" class="status-offline"> -- Pacemaker Not Connected -- {{/if}} -- {{/if}} -- </div> -- </td> -- </tr> -- <tr> -- <td><div style="margin-right: 8px;" class="check sprites"></div></td> -- <td> -- {{#if Pcs.nodesController.cur_node.corosync}} -- <div id="corosync_online_status" class="status"> -- Corosync Connected -- {{else}} -- <div id="corosync_online_status" class="status-offline"> -- Corosync Not Connected -- {{/if}} -- </div> -- </td> -- </tr> -- </table> -- </div> -- -- <div id="node_options_buttons"> -- <div id="node_start" class="link"> -- <div class="restart sprites" style="float: left"></div> -- Start -- </div> -- <div id="node_stop" class="link"> -- <div class="cancel sprites" style="float: left"></div> -- Stop -- </div> -- <div id="node_restart" class="link"> -- <div class="restart sprites" style="float: left"></div> -- Restart -- </div> -- <div id="node_unstandby" class="link" {{bind-attr style="Pcs.nodesController.cur_node.unstandby_style"}}> -- <div class="unstandby sprites" style="float: left"></div> -- Unstandby -- </div> -- <div id="node_standby" class="link" {{bind-attr style="Pcs.nodesController.cur_node.standby_style"}}> -- <div class="standby sprites" style="float: left"></div> -- Standby -- </div> -- <div class="configure sprites" style="float: left"></div> -- <div class="link"><a href="#/fencedevices" onclick="select_menu('FENCE DEVICES');return true;">Configure Fencing</a></div> -- </div> -- -- <div id="node_details"> -- <table><tr> -- <td><div class="reg">Node ID:</div></td> -- <td><div class="bold">{{Pcs.nodesController.cur_node.node_id}}</div></td> -- <td><div class="reg"> Uptime:</div></td> -- <td><div class="bold" id="uptime">{{Pcs.nodesController.cur_node.uptime}}</div></td> -- </tr> -- </table> -- </div> -+ <div id="node_info_div" style="opacity: 0;"> -+ <div id="test"> -+ <div id="node_info_header"> -+ <div id="node_info_header_title" class="node_info_header_title">Edit Node </div> -+ <div id="node_info_header_title_name"> -+ {{Pcs.nodesController.cur_node.name}} -+ </div> -+ </div> - -- <table style="float:left;margin-top:25px"> -- <tr><td class="datatable_header">Cluster Daemons</td></tr> -- <tr><td> -- <div id="clusterdaemons"> -- <table class="datatable"> -- <tr><th>NAME</th><th>STATUS</th></tr> -- <tr><td>pacemaker</td><td><div style="float:left;margin-right:6px;" class="check sprites"></div> --{{#if Pcs.nodesController.cur_node.pacemaker_daemon}} --<span id="pacemaker_status" style="float:left" class="status">Running ({{Pcs.nodesController.cur_node.pacemaker_startup}})</span> --{{else}} --{{#if Pcs.nodesController.cur_node.pcsd}} --<span id="pacemaker_status" style="float:left" class="status-offline">Stopped ({{Pcs.nodesController.cur_node.pacemaker_startup}})</span> --{{else}} --<span id="pacemaker_status" style="float:left" class="status-unknown">Unknown ({{Pcs.nodesController.cur_node.pacemaker_startup}})</span> --{{/if}} --{{/if}} --</td></tr> -- <tr><td>corosync</td><td><div style="float:left;margin-right:6px;" class="check sprites"></div> --{{#if Pcs.nodesController.cur_node.corosync_daemon}} --<span id="corosync_status" style="float:left" class="status">Running ({{Pcs.nodesController.cur_node.corosync_startup}})</span> --{{else}} --{{#if Pcs.nodesController.cur_node.pcsd}} --<span id="corosync_status" style="float:left" class="status-offline">Stopped ({{Pcs.nodesController.cur_node.corosync_startup}})</span> --{{else}} --<span id="corosync_status" style="float:left" class="status-unknown">Unknown ({{Pcs.nodesController.cur_node.corosync_startup}})</span> --{{/if}} --{{/if}} --</td></tr> -- <tr><td>pcsd</td><td><div style="float:left;margin-right:6px;" class="check sprites"></div> --{{#if Pcs.nodesController.cur_node.pcsd}} --<span id="pcsd_status" style="float:left" class="status">Running ({{Pcs.nodesController.cur_node.pcsd_startup}})</span> --{{else}} -- {{#if Pcs.nodesController.cur_node.authorized}} -- <span id="pcsd_status" style="float:left" class="status-offline">Stopped ({{Pcs.nodesController.cur_node.pcsd_startup}})</span> -- {{else}} -- <span id="pcsd_status" style="float:left" class="status-offline">Running (not Authorized) ({{Pcs.nodesController.cur_node.pcsd_startup}})</span> -- {{/if}} --{{/if}} --</td></tr> -- </table> -- </div> -- </td> -- </tr> -- </table> -- <table style="clear:left;float:left;margin-top:25px;"> -- <tr><td class="datatable_header">Running Resources</td></tr> -- <tr><td> -- <div id="resources_running"> -- <table class="datatable"> -- <tr><th>NAME</th></tr> -- {{#if Pcs.nodesController.cur_node.running_resources}} -- {{#each res in Pcs.nodesController.cur_node.running_resources}} -- <tr><td> -- {{#unless res.stonith}} -- {{#link-to 'Resources.index' res}}{{res.name}} ({{res.res_type}}){{/link-to}} -- {{/unless}} -- </td></tr> -- {{/each}} -- {{else}} -- <tr><td style="color: gray;">NONE</td></tr> -- {{/if}} -- </table> -- </div> -- </td> -- </tr> -- </table> -- <table style="clear:left;float:left;margin-top:25px;"> -- <tr><td class="datatable_header">Resource Location Preferences</td></tr> -- <tr><td> -- <div id="locationdep"> -- <table class="datatable"> -- <tr><th>NAME</th><th>Score</th></tr> -- {{#if Pcs.nodesController.cur_node.location_constraints}} -- {{#each Pcs.nodesController.cur_node.location_constraints}} -- <tr><td>{{rsc}}</td><td>{{score}}</td><td></td></tr> -- {{/each}} -- {{else}} -- <tr><td style="color: gray;">NONE</td><td></td></tr> -- {{/if}} -- </table> -- </div> -- </td> -- </tr> -- </table> -- <table style="clear:left;float:left;margin-top:25px;"> -- <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Node Attributes ({{#if Pcs.nodesController.cur_node_attr.length}}{{Pcs.nodesController.cur_node_attr.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr> -- <tr><td> -- <div id="node_attributes"> -- <table class="datatable"> -- <tr><th>Attribute</th><th>Value</th><th>Remove</th></tr> -- {{#each Pcs.nodesController.cur_node_attr}} -- <tr><td>{{this.name}}</td> -- <td>{{this.value}}</td> -- <td {{bind-attr node_attr_key="this.name"}} {{bind-attr node_attr_parent="this.parent"}} style="text-align:center"> -- <a onclick="remove_node_attr($(this).parent());return false;" href="#" class="remove">X</a> -+ <div id="node_sub_info"> -+ <table> -+ <tr> -+ <td rowspan=2> -+ <input disabled style="margin-right: 50px;" type="text" {{bind-attr value="Pcs.nodesController.cur_node.name"}} size="35" class="text_field"> -+ </td> -+ <td><div style="margin-right: 8px;" class="check sprites"></div></td> -+ <td> -+ {{#if Pcs.nodesController.cur_node.pacemaker}} -+ <div id="pacemaker_online_status" class="status"> -+ Pacemaker Connected -+ </div> -+ {{else}} -+ {{#if Pcs.nodesController.cur_node.pacemaker_standby}} -+ <div id="pacemaker_online_status" class="status-standby"> -+ Pacemaker Standby -+ </div> -+ {{else}} -+ <div id="pacemaker_online_status" class="status-offline"> -+ Pacemaker Not Connected -+ </div> -+ {{/if}} -+ {{/if}} - </td> - </tr> -- {{/each}} -- {{#unless Pcs.nodesController.cur_node_attr}} -- <tr><td style="color: gray;">NONE</td><td></td><td></td></tr> -- {{/unless}} -- <tr id="new_node_attr_col"> -- <td><input type="text" name="new_node_attr_key" size="20"></td> -- <td><input type="text" name="new_node_attr_value" size="20"></td> -- <td><button type="button" onclick="add_node_attr('#new_node_attr_col');" name="add">Add</button></td> -+ <tr> -+ <td><div style="margin-right: 8px;" class="check sprites"></div></td> -+ <td> -+ {{#if Pcs.nodesController.cur_node.corosync}} -+ <div id="corosync_online_status" class="status"> -+ Corosync Connected -+ </div> -+ {{else}} -+ <div id="corosync_online_status" class="status-offline"> -+ Corosync Not Connected -+ </div> -+ {{/if}} -+ </td> - </tr> -+ </table> -+ </div> -+ -+ <div id="node_options_buttons"> -+ <div id="node_start" class="link"> -+ <div class="restart sprites" style="float: left"></div> -+ Start -+ </div> -+ <div id="node_stop" class="link"> -+ <div class="cancel sprites" style="float: left"></div> -+ Stop -+ </div> -+ <div id="node_restart" class="link"> -+ <div class="restart sprites" style="float: left"></div> -+ Restart -+ </div> -+ <div id="node_unstandby" class="link" {{bind-attr style="Pcs.nodesController.cur_node.unstandby_style"}}> -+ <div class="unstandby sprites" style="float: left"></div> -+ Unstandby -+ </div> -+ <div id="node_standby" class="link" {{bind-attr style="Pcs.nodesController.cur_node.standby_style"}}> -+ <div class="standby sprites" style="float: left"></div> -+ Standby -+ </div> -+ <div class="configure sprites" style="float: left"></div> -+ <div class="link"><a href="#/fencedevices" onclick="select_menu('FENCE DEVICES');return true;">Configure Fencing</a></div> -+ </div> - -+ <div id="node_details"> -+ {{#unless Pcs.nodesController.cur_node.status_unknown}} -+ <table><tr> -+ <td><div class="reg">Node ID:</div></td> -+ <td><div class="bold">{{Pcs.nodesController.cur_node.node_id}}</div></td> -+ <td><div class="reg"> Uptime:</div></td> -+ <td><div class="bold" id="uptime">{{Pcs.nodesController.cur_node.uptime}}</div></td> -+ </tr> - </table> -+ {{/unless}} - </div> -- </td> -- </tr> -- </table> -- <table style="clear:left;float:left;margin-top:25px;"> -- <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Fence Levels ({{#if Pcs.nodesController.cur_node_fence_levels.length}}{{Pcs.nodesController.cur_node_fence_levels.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr> -- <tr><td> -- <div id="fencelevels"> -- <table class="datatable"> -- <tr><th>Level</th><th>Fence Devices</th><th>Remove</th></tr> -- {{#each Pcs.nodesController.cur_node_fence_levels}} -- <tr> -- <td>{{this.level}}</td> -- <td>{{this.devices}}</td> -- <td {{bind-attr fence_level="this.level" fence_devices="this.devices"}} style="text-align:center"> -- <a onclick="add_remove_fence_level($(this).parent(),true);return false;" href="#" class="remove">X</a> -- </td> -- </tr> -- {{/each}} -- {{#unless Pcs.nodesController.cur_node_fence_levels}} -- <tr><td style="color: gray;">NONE</td><td></td><td></td></tr> -- {{/unless}} -- <tr id="new_fence_level_col"> -- <td><input type="text" name="new_level_level" size="2"></td> -- <td><select name="new_level_value"> -- <option></option> -- {{#each Pcs.resourcesContainer.fence_list}} -- <option {{bind-attr value="this.id"}}>{{this.id}}</option> -- {{/each }} -- </select></td> -- <td><button type="button" onclick="add_remove_fence_level($(this).parent());" name="add">Add</button></td> -- </tr> -- </table> -- </div> -- </td> -- </tr> -- </table> -- </td> -- </tr> -- </div> -- </div> -- </td> --</tr> -+ -+ <table style="float:left;margin-top:25px"> -+ <tr><td class="datatable_header">Cluster Daemons</td></tr> -+ <tr><td> -+ <div id="clusterdaemons"> -+ <table class="datatable"> -+ <tr><th>NAME</th><th>STATUS</th></tr> -+ <tr><td>pacemaker</td><td><div style="float:left;margin-right:6px;" class="check sprites"></div> -+ {{#if Pcs.nodesController.cur_node.pacemaker_daemon}} -+ <span id="pacemaker_status" style="float:left" class="status">Running ({{Pcs.nodesController.cur_node.pacemaker_startup}})</span> -+ {{else}} -+ {{#if Pcs.nodesController.cur_node.pcsd}} -+ <span id="pacemaker_status" style="float:left" class="status-offline">Stopped ({{Pcs.nodesController.cur_node.pacemaker_startup}})</span> -+ {{else}} -+ <span id="pacemaker_status" style="float:left" class="status-unknown">Unknown ({{Pcs.nodesController.cur_node.pacemaker_startup}})</span> -+ {{/if}} -+ {{/if}} -+ </td></tr> -+ <tr><td>corosync</td><td><div style="float:left;margin-right:6px;" class="check sprites"></div> -+ {{#if Pcs.nodesController.cur_node.corosync_daemon}} -+ <span id="corosync_status" style="float:left" class="status">Running ({{Pcs.nodesController.cur_node.corosync_startup}})</span> -+ {{else}} -+ {{#if Pcs.nodesController.cur_node.pcsd}} -+ <span id="corosync_status" style="float:left" class="status-offline">Stopped ({{Pcs.nodesController.cur_node.corosync_startup}})</span> -+ {{else}} -+ <span id="corosync_status" style="float:left" class="status-unknown">Unknown ({{Pcs.nodesController.cur_node.corosync_startup}})</span> -+ {{/if}} -+ {{/if}} -+ </td></tr> -+ <tr><td>pcsd</td><td><div style="float:left;margin-right:6px;" class="check sprites"></div> -+ {{#if Pcs.nodesController.cur_node.pcsd}} -+ <span id="pcsd_status" style="float:left" class="status">Running ({{Pcs.nodesController.cur_node.pcsd_startup}})</span> -+ {{else}} -+ {{#if Pcs.nodesController.cur_node.authorized}} -+ <span id="pcsd_status" style="float:left" class="status-offline">Stopped ({{Pcs.nodesController.cur_node.pcsd_startup}})</span> -+ {{else}} -+ <span id="pcsd_status" style="float:left" class="status-offline">Running (not Authorized) ({{Pcs.nodesController.cur_node.pcsd_startup}})</span> -+ {{/if}} -+ {{/if}} -+ </td></tr> -+ </table> -+ </div> -+ </td> -+ </tr> -+ </table> -+ <table style="clear:left;float:left;margin-top:25px;"> -+ <tr><td class="datatable_header">Running Resources</td></tr> -+ <tr><td> -+ <div id="resources_running"> -+ <table class="datatable"> -+ <tr><th>NAME</th></tr> -+ {{#if Pcs.nodesController.cur_node.running_resources}} -+ {{#each res in Pcs.nodesController.cur_node.running_resources}} -+ <tr><td> -+ {{#unless res.stonith}} -+ {{#link-to 'Resources.index' res}}{{res.name}} ({{res.res_type}}){{/link-to}} -+ {{/unless}} -+ </td></tr> -+ {{/each}} -+ {{else}} -+ <tr><td style="color: gray;">NONE</td></tr> -+ {{/if}} -+ </table> -+ </div> -+ </td> -+ </tr> -+ </table> -+ <table style="clear:left;float:left;margin-top:25px;"> -+ <tr><td class="datatable_header">Resource Location Preferences</td></tr> -+ <tr><td> -+ <div id="locationdep"> -+ <table class="datatable"> -+ <tr><th>NAME</th><th>Score</th></tr> -+ {{#if Pcs.nodesController.cur_node.location_constraints}} -+ {{#each Pcs.nodesController.cur_node.location_constraints}} -+ <tr><td>{{rsc}}</td><td>{{score}}</td><td></td></tr> -+ {{/each}} -+ {{else}} -+ <tr><td style="color: gray;">NONE</td><td></td></tr> -+ {{/if}} -+ </table> -+ </div> -+ </td> -+ </tr> -+ </table> -+ <table style="clear:left;float:left;margin-top:25px;"> -+ <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Node Attributes ({{#if Pcs.nodesController.cur_node_attr.length}}{{Pcs.nodesController.cur_node_attr.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr> -+ <tr><td> -+ <div id="node_attributes"> -+ <table class="datatable"> -+ <tr><th>Attribute</th><th>Value</th><th>Remove</th></tr> -+ {{#each attr in Pcs.nodesController.cur_node_attr}} -+ <tr><td>{{attr.name}}</td> -+ <td>{{attr.value}}</td> -+ <td {{bind-attr node_attr_key=attr.name}} {{bind-attr node_attr_parent=attr.parent}} style="text-align:center"> -+ <a onclick="remove_node_attr($(this).parent());return false;" href="#" class="remove">X</a> -+ </td> -+ </tr> -+ {{else}} -+ <tr><td style="color: gray;">NONE</td><td></td><td></td></tr> -+ {{/each}} -+ <tr id="new_node_attr_col"> -+ <td><input type="text" name="new_node_attr_key" size="20"></td> -+ <td><input type="text" name="new_node_attr_value" size="20"></td> -+ <td><button type="button" onclick="add_node_attr('#new_node_attr_col');" name="add">Add</button></td> -+ </tr> -+ </table> -+ </div> -+ </td> -+ </tr> -+ </table> -+ <table style="clear:left;float:left;margin-top:25px;"> -+ <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Fence Levels ({{#if Pcs.nodesController.cur_node_fence_levels.length}}{{Pcs.nodesController.cur_node_fence_levels.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr> -+ <tr><td> -+ <div id="fencelevels"> -+ <table class="datatable"> -+ <tr><th>Level</th><th>Fence Devices</th><th>Remove</th></tr> -+ {{#each Pcs.nodesController.cur_node_fence_levels}} -+ <tr> -+ <td>{{this.level}}</td> -+ <td>{{this.devices}}</td> -+ <td {{bind-attr fence_level="this.level" fence_devices="this.devices"}} style="text-align:center"> -+ <a onclick="add_remove_fence_level($(this).parent(),true);return false;" href="#" class="remove">X</a> -+ </td> -+ </tr> -+ {{/each}} -+ {{#unless Pcs.nodesController.cur_node_fence_levels}} -+ <tr><td style="color: gray;">NONE</td><td></td><td></td></tr> -+ {{/unless}} -+ <tr id="new_fence_level_col"> -+ <td><input type="text" name="new_level_level" size="2"></td> -+ <td><select name="new_level_value"> -+ <option></option> -+ {{#each Pcs.resourcesContainer.fence_list}} -+ <option {{bind-attr value="this.id"}}>{{this.id}}</option> -+ {{/each }} -+ </select></td> -+ <td><button type="button" onclick="add_remove_fence_level($(this).parent());" name="add">Add</button></td> -+ </tr> -+ </table> -+ </div> -+ </td> -+ </tr> -+ </table> -+ </div> -+ </div> -+ </td> -+ </tr> - <%= erb :_configure %> - <%= erb :_acls %> - <%= erb :_wizards %> --- -1.9.1 - diff --git a/SOURCES/bz1189857-03-web-UI-prevents-running-update-multiple-times-at-onc.patch b/SOURCES/bz1189857-03-web-UI-prevents-running-update-multiple-times-at-onc.patch deleted file mode 100644 index 9398777..0000000 --- a/SOURCES/bz1189857-03-web-UI-prevents-running-update-multiple-times-at-onc.patch +++ /dev/null @@ -1,173 +0,0 @@ -From 032a2571656c646f17bb3453b6a7d4883241ad46 Mon Sep 17 00:00:00 2001 -From: Ondrej Mular <omular@redhat.com> -Date: Tue, 1 Sep 2015 12:06:20 +0200 -Subject: [PATCH] web UI: prevents running update multiple times at once - ---- - pcsd/public/js/nodes-ember.js | 106 ++++++++++++++++++++++++++++++++++++------ - 1 file changed, 91 insertions(+), 15 deletions(-) - -diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js -index 172c00a..d2f85bd 100644 ---- a/pcsd/public/js/nodes-ember.js -+++ b/pcsd/public/js/nodes-ember.js -@@ -49,22 +49,25 @@ Pcs = Ember.Application.createWithMixins({ - }); - return retArray; - }, -- update_timeout: null, -- update: function(first_run) { -+ updater: null, -+ -+ update: function() { -+ Pcs.get('updater').update(); -+ }, -+ -+ _update: function(first_run) { - if (window.location.pathname.lastIndexOf('/manage', 0) !== 0) { - return; - } -- clearTimeout(Pcs.get('update_timeout')); -- Pcs.set('update_timeout', null); -+ if (first_run) { -+ show_loading_screen(); -+ } - var self = Pcs; - var cluster_name = self.cluster_name; - if (cluster_name == null) { - if (location.pathname.indexOf("/manage") != 0) { - return; - } -- if (first_run) { -- show_loading_screen(); -- } - Ember.debug("Empty Cluster Name"); - $.ajax({ - url: "/clusters_overview", -@@ -77,8 +80,6 @@ Pcs = Ember.Application.createWithMixins({ - }); - if (data["not_current_data"]) { - self.update(); -- } else { -- Pcs.set('update_timeout', window.setTimeout(self.update,20000)); - } - hide_loading_screen(); - }, -@@ -93,15 +94,14 @@ Pcs = Ember.Application.createWithMixins({ - console.log("Error: Unable to parse json for clusters_overview"); - } - } -- Pcs.set('update_timeout', window.setTimeout(self.update,20000)); - hide_loading_screen(); -+ }, -+ complete: function() { -+ Pcs.get('updater').update_finished(); - } - }); - return; - } -- if (first_run) { -- show_loading_screen(); -- } - $.ajax({ - url: "cluster_status", - dataType: "json", -@@ -191,12 +191,84 @@ Pcs = Ember.Application.createWithMixins({ - }, - complete: function() { - hide_loading_screen(); -- Pcs.update_timeout = window.setTimeout(Pcs.update,20000); -+ Pcs.get('updater').update_finished(); - } - }); - } - }); - -+Pcs.Updater = Ember.Object.extend({ -+ timeout: 20000, -+ first_run: true, -+ async: true, -+ autostart: true, -+ started: false, -+ in_progress: false, -+ waiting: false, -+ update_function: null, -+ update_target: null, -+ timer: null, -+ -+ start: function() { -+ this.set('started', true); -+ this.update(); -+ }, -+ -+ stop: function() { -+ this.set('started', false); -+ this.cancel_timer(); -+ }, -+ -+ cancel_timer: function() { -+ var self = this; -+ var timer = self.get('timer'); -+ if (timer) { -+ self.set('timer', null); -+ Ember.run.cancel(timer); -+ } -+ }, -+ -+ update: function() { -+ var self = this; -+ if (!self.get('update_function')) { -+ console.log('No update_function defined!'); -+ return; -+ } -+ self.cancel_timer(); -+ self.set('waiting', false); -+ if (self.get('in_progress')) { -+ self.set('waiting', true); -+ } else { -+ self.set('in_progress', true); -+ self.get('update_function').apply(self.get('update_target'), [self.get('first_run')]); -+ self.set('first_run', false); -+ if (!self.get('async')) { -+ self.update_finished(); -+ } -+ } -+ }, -+ -+ update_finished: function() { -+ var self = this; -+ if (self.get('waiting')) { -+ Ember.run.next(self, self.update); -+ } else if (self.get('started')) { -+ self.set('timer', Ember.run.later(self, self.update, self.get('timeout'))); -+ } -+ self.set('in_progress', false); -+ }, -+ -+ init: function() { -+ var self = this; -+ if (!self.get('update_target')) { -+ self.set('update_target', self); -+ } -+ if (self.get('autostart')) { -+ self.start(); -+ } -+ } -+}); -+ - Pcs.resourcesContainer = Ember.Object.create({ - resource_map: {}, - top_level_resource_map: {}, -@@ -1742,4 +1814,8 @@ function myUpdate() { - // window.setTimeout(myUpdate,4000); - } - --Pcs.update(true); -+Pcs.set('updater', Pcs.Updater.create({ -+ timeout: 20000, -+ update_function: Pcs._update, -+ update_target: Pcs -+})); --- -1.9.1 - diff --git a/SOURCES/bz1189857-04-fix-constraints-removing-in-web-UI.patch b/SOURCES/bz1189857-04-fix-constraints-removing-in-web-UI.patch deleted file mode 100644 index 34ec1a9..0000000 --- a/SOURCES/bz1189857-04-fix-constraints-removing-in-web-UI.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 7e92db5789ad09f0e1184691ba69fb087402f24c Mon Sep 17 00:00:00 2001 -From: Ondrej Mular <omular@redhat.com> -Date: Wed, 2 Sep 2015 11:16:14 +0200 -Subject: [PATCH] fix constraints removing in web UI - ---- - pcsd/public/js/nodes-ember.js | 9 ++++++--- - 1 file changed, 6 insertions(+), 3 deletions(-) - -diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js -index d2f85bd..0943c65 100644 ---- a/pcsd/public/js/nodes-ember.js -+++ b/pcsd/public/js/nodes-ember.js -@@ -562,9 +562,12 @@ Pcs.resourcesContainer = Ember.Object.create({ - self.set('constraints', constraints); - var resource_map = self.get('resource_map'); - $.each(constraints, function(const_type, cons) { -- $.each(cons, function(resource_id, cons_list) { -- if (resource_id in resource_map) -- resource_map[resource_id].set(const_type, cons_list); -+ $.each(resource_map, function(resource_id, resource_obj) { -+ if (resource_id in cons) { -+ resource_obj.set(const_type, cons[resource_id]); -+ } else { -+ resource_obj.set(const_type, []); -+ } - }); - }); - } --- -1.9.1 - diff --git a/SOURCES/bz1189857-05-remove-removing-constriants-from-client-side-javascr.patch b/SOURCES/bz1189857-05-remove-removing-constriants-from-client-side-javascr.patch deleted file mode 100644 index d702fe2..0000000 --- a/SOURCES/bz1189857-05-remove-removing-constriants-from-client-side-javascr.patch +++ /dev/null @@ -1,73 +0,0 @@ -From 41e2d3e4f5ae0331d7984612485b3bbb84d41304 Mon Sep 17 00:00:00 2001 -From: Ondrej Mular <omular@redhat.com> -Date: Wed, 2 Sep 2015 12:39:06 +0200 -Subject: [PATCH] remove removing constriants from client-side (javascript) - -All changes are displayed after update. ---- - pcsd/public/js/nodes-ember.js | 24 ------------------------ - pcsd/public/js/pcsd.js | 6 ------ - 2 files changed, 30 deletions(-) - -diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js -index 0943c65..5fec386 100644 ---- a/pcsd/public/js/nodes-ember.js -+++ b/pcsd/public/js/nodes-ember.js -@@ -390,30 +390,6 @@ Pcs.resourcesContainer = Ember.Object.create({ - }; - }, - -- remove_constraint: function(constraint_id) { -- $.each(this.get('resource_map'), function(key, resource) { -- $.each( -- [ -- "location_constraints", -- "ordering_constraints", -- "ordering_set_constraints", -- "colocation_constraints" -- ], -- function(_, constraint_type) { -- if (resource.get(constraint_type)) { -- resource.set( -- constraint_type, -- $.grep( -- resource.get(constraint_type), -- function(value2, key) { return value2.id != constraint_id; } -- ) -- ); -- } -- } -- ); -- }); -- }, -- - update_meta_attr: function(resource_id, attr, value) { - value = typeof value !== 'undefined' ? value.trim() : ""; - var data = { -diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js -index 879b533..197cdd1 100644 ---- a/pcsd/public/js/pcsd.js -+++ b/pcsd/public/js/pcsd.js -@@ -1595,9 +1595,6 @@ function remove_constraint(id) { - url: get_cluster_remote_url() + 'remove_constraint_remote', - data: {"constraint_id": id}, - timeout: pcs_timeout, -- success: function (data) { -- Pcs.resourcesContainer.remove_constraint(id); -- }, - error: function (xhr, status, error) { - alert( - "Error removing constraint " -@@ -1617,9 +1614,6 @@ function remove_constraint_rule(id) { - url: get_cluster_remote_url() + 'remove_constraint_rule_remote', - data: {"rule_id": id}, - timeout: pcs_timeout, -- success: function (data) { -- Pcs.resourcesContainer.remove_constraint(id); -- }, - error: function (xhr, status, error) { - alert( - "Error removing constraint rule " --- -1.9.1 - diff --git a/SOURCES/bz1189857-06-web-UI-fixes-in-nodes-resources-fence-devices.patch b/SOURCES/bz1189857-06-web-UI-fixes-in-nodes-resources-fence-devices.patch deleted file mode 100644 index eb0098a..0000000 --- a/SOURCES/bz1189857-06-web-UI-fixes-in-nodes-resources-fence-devices.patch +++ /dev/null @@ -1,228 +0,0 @@ -From 7c12321d187ce5919ea5e443612321b404be8cab Mon Sep 17 00:00:00 2001 -From: Ondrej Mular <omular@redhat.com> -Date: Tue, 15 Sep 2015 11:03:59 +0200 -Subject: [PATCH] web UI: fixes in nodes, resources, fence devices - -- fix creating disabled resource -- add sorting for cluster list, resource list and fence device list -- hide resource (fence device) details when there is no resource (fence device) -- in resource list color of resource name depends on its status -- fix group selector -- disabled autocorrect for ordering set constraints -- fix status detection of master/slave resources ---- - pcsd/cluster_entity.rb | 2 +- - pcsd/pcsd.rb | 2 +- - pcsd/public/css/style.css | 8 +++++++ - pcsd/public/js/nodes-ember.js | 56 +++++++++++++++++++++++++++++++------------ - pcsd/public/js/pcsd.js | 5 +++- - pcsd/views/main.erb | 4 +++- - 6 files changed, 58 insertions(+), 19 deletions(-) - -diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb -index b5d2719..8f29a40 100644 ---- a/pcsd/cluster_entity.rb -+++ b/pcsd/cluster_entity.rb -@@ -914,7 +914,7 @@ module ClusterEntity - end - @masters, @slaves = get_masters_slaves(primitive_list) - if (@masters.empty? and -- @member.status != ClusterEntity::ResourceStatus.new(:disabled) -+ @member.status == ClusterEntity::ResourceStatus.new(:running) - ) - @status = ClusterEntity::ResourceStatus.new(:partially_running) - end -diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb -index 9a07ee8..b7c2a49 100644 ---- a/pcsd/pcsd.rb -+++ b/pcsd/pcsd.rb -@@ -178,7 +178,7 @@ helpers do - param_line << "#{myparam}=#{val}" - end - if param == "disabled" -- meta_options << "meta target-role=Stopped" -+ meta_options << 'meta' << 'target-role=Stopped' - end - } - return param_line + meta_options -diff --git a/pcsd/public/css/style.css b/pcsd/public/css/style.css -index a3f6638..1c003bd 100644 ---- a/pcsd/public/css/style.css -+++ b/pcsd/public/css/style.css -@@ -778,3 +778,11 @@ li.menuheader { - .issue_table { - margin-top: 1.5em; - } -+ -+.status-error { -+ color: red; -+} -+ -+.status-warning { -+ color: #ff6600; -+} -diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js -index bbeed55..1e00a94 100644 ---- a/pcsd/public/js/nodes-ember.js -+++ b/pcsd/public/js/nodes-ember.js -@@ -147,8 +147,10 @@ Pcs = Ember.Application.createWithMixins({ - } else { - if (self.get('fence_list').length > 0) { - cur_fence = self.get('fence_list')[0]; -- fence_change = true; -+ } else { -+ cur_fence = null; - } -+ fence_change = true; - } - - if (cur_resource && cur_resource.get('id') in resource_map) { -@@ -158,22 +160,28 @@ Pcs = Ember.Application.createWithMixins({ - } else { - if (self.get('resource_list').length > 0) { - cur_resource = self.get('resource_list')[0]; -- resource_change = true; -+ } else { -+ cur_resource = null; - } -+ resource_change = true; - } - - self.set('cur_fence', cur_fence); - self.set('cur_resource', cur_resource); - - Ember.run.scheduleOnce('afterRender', Pcs, function () { -- if (fence_change) -- tree_view_onclick(self.get('cur_fence').get('id'), true); -- if (resource_change) -- tree_view_onclick(self.get('cur_resource').get('id'), true); -- if (!fence_change && self.get('cur_fence')) -- tree_view_select(self.get('cur_fence').get('id')); -- if (!resource_change && self.get('cur_resource')) -- tree_view_select(self.get('cur_resource').get('id')); -+ if (self.get('cur_fence')) { -+ if (fence_change) -+ tree_view_onclick(self.get('cur_fence').get('id'), true); -+ else -+ tree_view_select(self.get('cur_fence').get('id')); -+ } -+ if (self.get('cur_resource')) { -+ if (resource_change) -+ tree_view_onclick(self.get('cur_resource').get('id'), true); -+ else -+ tree_view_select(self.get('cur_resource').get('id')); -+ } - Pcs.selectedNodeController.reset(); - disable_checkbox_clicks(); - }); -@@ -546,6 +554,11 @@ Pcs.resourcesContainer = Ember.Object.create({ - } - }); - }); -+ $.each(resource_map, function(resource_id, resource_obj) { -+ resource_obj.set('group_list', self.get('group_list')); -+ }); -+ self.set('resource_list', Ember.copy(self.get('resource_list')).sort(function(a,b){return a.get('id').localeCompare(b.get('id'))})); -+ self.set('fence_list', Ember.copy(self.get('fence_list')).sort(function(a,b){return a.get('id').localeCompare(b.get('id'))})); - } - }); - -@@ -565,6 +578,7 @@ Pcs.ResourceObj = Ember.Object.extend({ - disabled: false, - error_list: [], - warning_list: [], -+ group_list: [], - get_group_id: function() { - var self = this; - var p = self.get('parent'); -@@ -577,7 +591,7 @@ Pcs.ResourceObj = Ember.Object.extend({ - var self = this; - var cur_group = self.get('get_group_id'); - var html = '<select>\n<option value="">None</option>\n'; -- $.each(Pcs.resourcesContainer.get('group_list'), function(_, group) { -+ $.each(self.get('group_list'), function(_, group) { - html += '<option value="' + group + '"'; - if (cur_group === group) { - html += 'selected'; -@@ -586,7 +600,7 @@ Pcs.ResourceObj = Ember.Object.extend({ - }); - html += '</select><input type="button" value="Change group" onclick="resource_change_group(curResource(), $(this).prev().prop(\'value\'));">'; - return html; -- }.property('Pcs.resourceContainer.group_list', 'get_group_id'), -+ }.property('group_list', 'get_group_id'), - status: "unknown", - class_type: null, // property to determine type of the resource - resource_type: function() { // this property is just for displaying resource type in GUI -@@ -641,6 +655,17 @@ Pcs.ResourceObj = Ember.Object.extend({ - } - return out; - }.property("error_list.@each", "warning_list.@each"), -+ span_class: function() { -+ switch (this.get("status_val")) { -+ case get_status_value("failed"): -+ return "status-error"; -+ case get_status_value("warning"): -+ case get_status_value("disabled"): -+ return "status-warning"; -+ default: -+ return ""; -+ } -+ }.property("status_val"), - - location_constraints: [], - ordering_constraints: [], -@@ -1265,9 +1290,9 @@ Pcs.Cluster = Ember.Object.extend({ - - Pcs.clusterController = Ember.Object.create({ - cluster_list: Ember.ArrayController.create({ -- content: Ember.A(), sortProperties: ['status', 'name'], -- sortAscending: true, -- sortFunction: function(a,b){return status_comparator(a,b);} -+ content: Ember.A(), -+ sortProperties: ['name'], -+ sortAscending: true - }), - cur_cluster: null, - show_all_nodes: false, -@@ -1779,6 +1804,7 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({ - $.each(nodesToRemove, function(k,v) { - self.content.removeObject(v); - }); -+ self.set('content', Ember.copy(self.get('content').sort(function(a,b){return a.get('name').localeCompare(b.get('name'))}))); - } - }); - -diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js -index 84db292..23fd316 100644 ---- a/pcsd/public/js/pcsd.js -+++ b/pcsd/public/js/pcsd.js -@@ -1526,7 +1526,10 @@ function add_constraint(parent_id, c_type, force) { - } - - function add_constraint_set(parent_id, c_type, force) { -- var data = {'resources': []}; -+ var data = { -+ resources: [], -+ disable_autocorrect: true -+ }; - $(parent_id + " input[name='resource_ids[]']").each(function(index, element) { - var resources = element.value.trim(); - if (resources.length > 0) { -diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb -index b24c74a..e7e611d 100644 ---- a/pcsd/views/main.erb -+++ b/pcsd/views/main.erb -@@ -105,7 +105,9 @@ - <input type="checkbox" onchange="tree_view_checkbox_onchange(this)"> - </td> - <td style="width:18px;" class="node_list_sprite">{{{node.status_icon}}}</td> -- <td class="resource_name" nowrap {{bind-attr node=node.style}}>{{node._id}}</td> -+ <td class="resource_name" nowrap {{bind-attr node=node.style}}> -+ <span {{bind-attr class=node.span_class}}>{{node._id}}</span> -+ </td> - <td style="width:200px;" class="resource_type" {{bind-attr style=node.style}}>{{node.resource_type}}</td> - <td style="width:18px;"> - <div style="display: none;" class="arrow sprites"></div> --- -1.9.1 - diff --git a/SOURCES/bz1189857-07-web-UI-fixes.patch b/SOURCES/bz1189857-07-web-UI-fixes.patch deleted file mode 100644 index 9df183e..0000000 --- a/SOURCES/bz1189857-07-web-UI-fixes.patch +++ /dev/null @@ -1,99 +0,0 @@ -From c601e0f7e93db3e136eb9080fc2d4f4a0c999360 Mon Sep 17 00:00:00 2001 -From: Ondrej Mular <omular@redhat.com> -Date: Mon, 21 Sep 2015 17:53:51 +0200 -Subject: [PATCH] web UI fixes - -- fix loading resource optional argument form -- fix master/slave resource status from old pcsd -- fix status of failed resource ---- - pcsd/cluster_entity.rb | 10 ++++++---- - pcsd/public/js/pcsd.js | 4 +++- - pcsd/views/main.erb | 2 ++ - 3 files changed, 11 insertions(+), 5 deletions(-) - -diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb -index 8f29a40..c746544 100644 ---- a/pcsd/cluster_entity.rb -+++ b/pcsd/cluster_entity.rb -@@ -191,6 +191,7 @@ module ClusterEntity - mi = ClusterEntity::Clone.new - else - mi = ClusterEntity::MasterSlave.new -+ mi.masters_unknown = true - end - mi.id = mi_id - mi.meta_attr = ClusterEntity::get_meta_attr_from_status_v1( -@@ -539,7 +540,7 @@ module ClusterEntity - status = ClusterEntity::ResourceStatus.new(:disabled) - elsif running > 0 - status = ClusterEntity::ResourceStatus.new(:running) -- elsif failed > 0 -+ elsif failed > 0 or @error_list.length > 0 - status = ClusterEntity::ResourceStatus.new(:failed) - else - status = ClusterEntity::ResourceStatus.new(:blocked) -@@ -854,10 +855,11 @@ module ClusterEntity - - - class MasterSlave < MultiInstance -- attr_accessor :masters, :slaves -+ attr_accessor :masters, :slaves, :masters_unknown - - def initialize(master_cib_element=nil, crm_dom=nil, rsc_status=nil, parent=nil, operations=nil) - super(master_cib_element, crm_dom, rsc_status, parent, operations) -+ @masters_unknown = false - @class_type = 'master' - @masters = [] - @slaves = [] -@@ -869,7 +871,7 @@ module ClusterEntity - primitive_list = @member.members - end - @masters, @slaves = get_masters_slaves(primitive_list) -- if (@masters.empty? and -+ if (@masters.empty? and !@masters_unknown and - @status != ClusterEntity::ResourceStatus.new(:disabled) - ) - @warning_list << { -@@ -913,7 +915,7 @@ module ClusterEntity - primitive_list = @member.members - end - @masters, @slaves = get_masters_slaves(primitive_list) -- if (@masters.empty? and -+ if (@masters.empty? and !@masters_unknown and - @member.status == ClusterEntity::ResourceStatus.new(:running) - ) - @status = ClusterEntity::ResourceStatus.new(:partially_running) -diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js -index 23fd316..04bee0f 100644 ---- a/pcsd/public/js/pcsd.js -+++ b/pcsd/public/js/pcsd.js -@@ -2010,7 +2010,9 @@ function tree_view_onclick(resource_id, auto) { - - tree_view_select(resource_id); - -- load_agent_form(resource_id, resource_obj.get('stonith')); -+ Ember.run.next(Pcs, function() { -+ load_agent_form(resource_id, resource_obj.get('stonith')); -+ }); - } - - function tree_view_select(element_id) { -diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb -index e7e611d..b7260ad 100644 ---- a/pcsd/views/main.erb -+++ b/pcsd/views/main.erb -@@ -277,8 +277,10 @@ - {{#if stonith}} - <div style="clear:left; margin-top: 2em;" id="stonith_agent_form"></div> - {{else}} -+ {{#if resource.is_primitive}} - <div style="clear:left; margin-top: 2em;" id="resource_agent_form"></div> - {{/if}} -+ {{/if}} - {{else}} - {{#if stonith}} - NO FENCE DEVICE IN CLUSTER --- -1.9.1 - diff --git a/SOURCES/bz1198640-01-web-UI-allows-spaces-in-optional-arguments-when-crea.patch b/SOURCES/bz1198640-01-web-UI-allows-spaces-in-optional-arguments-when-crea.patch deleted file mode 100644 index 69de2d1..0000000 --- a/SOURCES/bz1198640-01-web-UI-allows-spaces-in-optional-arguments-when-crea.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 906780d7d61fef803c5e1adfa9d156e07e67c26a Mon Sep 17 00:00:00 2001 -From: Ondrej Mular <omular@redhat.com> -Date: Tue, 15 Sep 2015 11:14:04 +0200 -Subject: [PATCH] web UI: allows spaces in optional arguments when creating new - resource - ---- - pcsd/public/js/pcsd.js | 7 +++---- - 1 file changed, 3 insertions(+), 4 deletions(-) - -diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js -index cddf14e..84db292 100644 ---- a/pcsd/public/js/pcsd.js -+++ b/pcsd/public/js/pcsd.js -@@ -284,15 +284,14 @@ function disable_spaces(item) { - } - - function load_resource_form(item, ra, stonith) { -- data = { "new": true, resourcename: ra}; -+ var data = { new: true, resourcename: ra}; -+ var command; - if (!stonith) - command = "resource_metadata"; - else - command = "fence_device_metadata"; - -- item.load(get_cluster_remote_url() + command, data, function() { -- disable_spaces(this); -- }); -+ item.load(get_cluster_remote_url() + command, data); - } - - function update_resource_form_groups(form, group_list) { --- -1.9.1 - diff --git a/SOURCES/bz1225423-01-allow-to-remove-a-dead-node-from-a-cluster.patch b/SOURCES/bz1225423-01-allow-to-remove-a-dead-node-from-a-cluster.patch new file mode 100644 index 0000000..b3c10e4 --- /dev/null +++ b/SOURCES/bz1225423-01-allow-to-remove-a-dead-node-from-a-cluster.patch @@ -0,0 +1,122 @@ +From 2a080e5986331989a3164a35129e576641b2cca5 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Tue, 19 Jul 2016 16:42:44 +0200 +Subject: [PATCH 1/2] allow to remove a dead node from a cluster + +--- + pcs/cluster.py | 41 +++++++++++++++++++++++++++-------------- + 1 file changed, 27 insertions(+), 14 deletions(-) + +diff --git a/pcs/cluster.py b/pcs/cluster.py +index baa0f44..7a8615d 100644 +--- a/pcs/cluster.py ++++ b/pcs/cluster.py +@@ -1076,7 +1076,7 @@ def disable_cluster_nodes(nodes): + if len(error_list) > 0: + utils.err("unable to disable all nodes\n" + "\n".join(error_list)) + +-def destroy_cluster(argv): ++def destroy_cluster(argv, keep_going=False): + if len(argv) > 0: + # stop pacemaker and resources while cluster is still quorate + nodes = argv +@@ -1085,7 +1085,14 @@ def destroy_cluster(argv): + # destroy will stop any remaining cluster daemons + error_list = parallel_for_nodes(utils.destroyCluster, nodes, quiet=True) + if error_list: +- utils.err("unable to destroy cluster\n" + "\n".join(error_list)) ++ if keep_going: ++ print( ++ "Warning: unable to destroy cluster\n" ++ + ++ "\n".join(error_list) ++ ) ++ else: ++ utils.err("unable to destroy cluster\n" + "\n".join(error_list)) + + def stop_cluster(argv): + if len(argv) > 0: +@@ -1347,19 +1354,25 @@ def cluster_node(argv): + + node = argv[1] + node0, node1 = utils.parse_multiring_node(node) +- + if not node0: + utils.err("missing ring 0 address of the node") +- status,output = utils.checkAuthorization(node0) +- if status == 2: +- utils.err("pcsd is not running on %s" % node0) +- elif status == 3: +- utils.err( +- "%s is not yet authenticated (try pcs cluster auth %s)" +- % (node0, node0) +- ) +- elif status != 0: +- utils.err(output) ++ ++ # allow to continue if removing a node with --force ++ if add_node or "--force" not in utils.pcs_options: ++ status, output = utils.checkAuthorization(node0) ++ if status != 0: ++ if status == 2: ++ msg = "pcsd is not running on {0}".format(node0) ++ elif status == 3: ++ msg = ( ++ "{node} is not yet authenticated " ++ + " (try pcs cluster auth {node})" ++ ).format(node=node0) ++ else: ++ msg = output ++ if not add_node: ++ msg += ", use --force to override" ++ utils.err(msg) + + if add_node == True: + wait = False +@@ -1540,7 +1553,7 @@ def cluster_node(argv): + + nodesRemoved = False + c_nodes = utils.getNodesFromCorosyncConf() +- destroy_cluster([node0]) ++ destroy_cluster([node0], keep_going=("--force" in utils.pcs_options)) + for my_node in c_nodes: + if my_node == node0: + continue +-- +1.8.3.1 + + +From c48716233ace08c16e7e4b66075aebeca9366321 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Wed, 20 Jul 2016 10:01:13 +0200 +Subject: [PATCH 2/2] gui: allow to remove a dead node from a cluster + +--- + pcsd/remote.rb | 11 +++++++++-- + 1 file changed, 9 insertions(+), 2 deletions(-) + +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index 25fb74d..05a6d03 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -837,8 +837,15 @@ def remote_remove_nodes(params, request, auth_user) + stdout, stderr, retval = run_cmd( + auth_user, PCS, "cluster", "stop", *stop_params + ) +- if retval != 0 +- return [400, stderr.join] ++ if retval != 0 and not params['force'] ++ # If forced, keep going even if unable to stop all nodes (they may be dead). ++ # Add info this error is forceable if pcs did not do it (e.g. when unable ++ # to connect to some nodes). ++ message = stderr.join ++ if not message.include?(', use --force to override') ++ message += ', use --force to override' ++ end ++ return [400, message] + end + + node_list.each {|node| +-- +1.8.3.1 + diff --git a/SOURCES/bz1231858-01-web-UI-fix-occasional-issue-with-not-showing-optiona.patch b/SOURCES/bz1231858-01-web-UI-fix-occasional-issue-with-not-showing-optiona.patch new file mode 100644 index 0000000..6f3c99a --- /dev/null +++ b/SOURCES/bz1231858-01-web-UI-fix-occasional-issue-with-not-showing-optiona.patch @@ -0,0 +1,84 @@ +From 4fbf6a24492b0ac61be7822208275f1837165ae2 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular <omular@redhat.com> +Date: Fri, 22 Jul 2016 13:37:28 +0200 +Subject: [PATCH] web UI: fix occasional issue with not showing optional + arguments of resources + +--- + pcsd/public/js/nodes-ember.js | 12 ++++-------- + pcsd/public/js/pcsd.js | 17 +++++++---------- + 2 files changed, 11 insertions(+), 18 deletions(-) + +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index cb62806..2b43559 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -210,20 +210,16 @@ Pcs = Ember.Application.createWithMixins({ + Ember.run.scheduleOnce('afterRender', Pcs, function () { + if (self.get('cur_fence')) { + if (fence_change) { +- if (first_run) { +- update_instance_attributes(self.get('cur_fence').get('id')); +- } +- tree_view_onclick(self.get('cur_fence').get('id'), true); ++ tree_view_onclick(self.get('cur_fence').get('id'), first_run); + } else { + tree_view_select(self.get('cur_fence').get('id')); + } + } + if (self.get('cur_resource')) { + if (resource_change) { +- if (first_run) { +- update_instance_attributes(self.get('cur_resource').get('id')); +- } +- tree_view_onclick(self.get('cur_resource').get('id'), true); ++ tree_view_onclick( ++ self.get('cur_resource').get('id'), first_run ++ ); + } else { + tree_view_select(self.get('cur_resource').get('id')); + } +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index e763482..1ec0f1c 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -2108,29 +2108,26 @@ function update_instance_attributes(resource_id) { + }, res_obj.get("stonith")); + } + +-function tree_view_onclick(resource_id, auto) { +- auto = typeof auto !== 'undefined' ? auto : false; ++function tree_view_onclick(resource_id, first_run) { ++ first_run = typeof first_run !== 'undefined' ? first_run : false; + var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id); + if (!resource_obj) { + console.log("Resource " + resource_id + "not found."); + return; + } + if (resource_obj.get('stonith')) { +- Pcs.resourcesContainer.set('cur_fence', resource_obj); +- if (!auto) { ++ if (!first_run) { + window.location.hash = "/fencedevices/" + resource_id; +- update_instance_attributes(resource_id); + } ++ Pcs.resourcesContainer.set('cur_fence', resource_obj); + } else { +- Pcs.resourcesContainer.set('cur_resource', resource_obj); +- +- if (!auto) { ++ if (!first_run) { + window.location.hash = "/resources/" + resource_id; +- update_instance_attributes(resource_id); + } ++ Pcs.resourcesContainer.set('cur_resource', resource_obj); + auto_show_hide_constraints(); + } +- ++ update_instance_attributes(resource_id); + tree_view_select(resource_id); + } + +-- +1.8.3.1 + diff --git a/SOURCES/bz1231858-02-web-UI-don-t-change-current-resource-in-URL-if-not-i.patch b/SOURCES/bz1231858-02-web-UI-don-t-change-current-resource-in-URL-if-not-i.patch new file mode 100644 index 0000000..7f7c9a6 --- /dev/null +++ b/SOURCES/bz1231858-02-web-UI-don-t-change-current-resource-in-URL-if-not-i.patch @@ -0,0 +1,76 @@ +From 590157ae3e595560632ddc25c725b67c42a3f2ab Mon Sep 17 00:00:00 2001 +From: Ondrej Mular <omular@redhat.com> +Date: Wed, 27 Jul 2016 09:56:55 +0200 +Subject: [PATCH] web UI: don't change current resource in URL if not in + resources tab + +--- + pcsd/public/js/nodes-ember.js | 6 ++---- + pcsd/public/js/pcsd.js | 11 +++++------ + 2 files changed, 7 insertions(+), 10 deletions(-) + +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index 2b43559..efc0192 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -210,16 +210,14 @@ Pcs = Ember.Application.createWithMixins({ + Ember.run.scheduleOnce('afterRender', Pcs, function () { + if (self.get('cur_fence')) { + if (fence_change) { +- tree_view_onclick(self.get('cur_fence').get('id'), first_run); ++ tree_view_onclick(self.get('cur_fence').get('id')); + } else { + tree_view_select(self.get('cur_fence').get('id')); + } + } + if (self.get('cur_resource')) { + if (resource_change) { +- tree_view_onclick( +- self.get('cur_resource').get('id'), first_run +- ); ++ tree_view_onclick(self.get('cur_resource').get('id')); + } else { + tree_view_select(self.get('cur_resource').get('id')); + } +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index c8ed340..a646bed 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -1134,8 +1134,8 @@ function hover_out(o) { + } + + function reload_current_resource() { +- tree_view_onclick(curResource(), true); +- tree_view_onclick(curStonith(), true); ++ tree_view_onclick(curResource()); ++ tree_view_onclick(curStonith()); + } + + function load_row(node_row, ac, cur_elem, containing_elem, also_set, initial_load){ +@@ -2112,20 +2112,19 @@ function update_instance_attributes(resource_id) { + }, res_obj.get("stonith")); + } + +-function tree_view_onclick(resource_id, first_run) { +- first_run = typeof first_run !== 'undefined' ? first_run : false; ++function tree_view_onclick(resource_id) { + var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id); + if (!resource_obj) { + console.log("Resource " + resource_id + "not found."); + return; + } + if (resource_obj.get('stonith')) { +- if (!first_run) { ++ if (window.location.hash.startsWith("#/fencedevices")) { + window.location.hash = "/fencedevices/" + resource_id; + } + Pcs.resourcesContainer.set('cur_fence', resource_obj); + } else { +- if (!first_run) { ++ if (window.location.hash.startsWith("#/resources")) { + window.location.hash = "/resources/" + resource_id; + } + Pcs.resourcesContainer.set('cur_resource', resource_obj); +-- +1.8.3.1 + diff --git a/SOURCES/bz1231858-03-resourcefence-agent-options-form-needs-an-overhau.patch b/SOURCES/bz1231858-03-resourcefence-agent-options-form-needs-an-overhau.patch new file mode 100644 index 0000000..99c01e2 --- /dev/null +++ b/SOURCES/bz1231858-03-resourcefence-agent-options-form-needs-an-overhau.patch @@ -0,0 +1,401 @@ +From 0d440890ade31a2050ac861270a39be5c91d4bbb Mon Sep 17 00:00:00 2001 +From: Ivan Devat <idevat@redhat.com> +Date: Wed, 14 Sep 2016 15:29:06 +0200 +Subject: [PATCH] squash bz1231858 resource/fence agent options form + +6007fba70212 web UI: treat resource as managed by default + +f1b60c3a2bac WebUI: fix node standby for pcs 0.9.138 and older + +73adbedf268e webUI: allow change groups, clone and unclone of resource on clusters running older pcsd + +1302b4e62e19 webUI: fix group list when managing cluster running older pcsd + +f639c0dded12 webUI: don't show group selector in case cluster doesn't support it + +584092ce7d04 webUI: consolidate backward compatibility code +--- + pcsd/cluster_entity.rb | 2 +- + pcsd/pcs.rb | 20 ++++- + pcsd/pcsd.rb | 169 +++++++++++++++++++++++++++++++++++++----- + pcsd/public/js/nodes-ember.js | 11 ++- + pcsd/remote.rb | 6 +- + pcsd/views/main.erb | 20 ++--- + 6 files changed, 194 insertions(+), 34 deletions(-) + +diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb +index 4ffcd4b..b8f363a 100644 +--- a/pcsd/cluster_entity.rb ++++ b/pcsd/cluster_entity.rb +@@ -120,7 +120,7 @@ module ClusterEntity + status = ClusterEntity::CRMResourceStatus.new + status.id = primitive.id + status.resource_agent = primitive.agentname +- status.managed = false ++ status.managed = true + status.failed = resource[:failed] + status.role = nil + status.active = resource[:active] +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index 137bb3d..e05f3ef 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -1864,7 +1864,7 @@ end + def status_v1_to_v2(status) + new_status = status.select { |k,_| + [:cluster_name, :username, :is_cman_with_udpu_transport, +- :need_ring1_address, :cluster_settings, :constraints, :groups, ++ :need_ring1_address, :cluster_settings, :constraints, + :corosync_online, :corosync_offline, :pacemaker_online, :pacemaker_standby, + :pacemaker_offline, :acls, :fence_levels + ].include?(k) +@@ -1885,6 +1885,8 @@ def status_v1_to_v2(status) + ].include?(k) + } + ++ new_status[:groups] = get_group_list_from_tree_of_resources(resources) ++ + new_status[:node].update( + { + :id => status[:node_id], +@@ -1901,6 +1903,22 @@ def status_v1_to_v2(status) + return new_status + end + ++def get_group_list_from_tree_of_resources(tree) ++ group_list = [] ++ tree.each { |resource| ++ if resource.instance_of?(ClusterEntity::Group) ++ group_list << resource.id ++ end ++ if ( ++ resource.kind_of?(ClusterEntity::MultiInstance) and ++ resource.member.instance_of?(ClusterEntity::Group) ++ ) ++ group_list << resource.member.id ++ end ++ } ++ return group_list ++end ++ + def allowed_for_local_cluster(auth_user, action) + pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text()) + return pcs_config.permissions_local.allows?( +diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb +index 287cf03..dcfd5a0 100644 +--- a/pcsd/pcsd.rb ++++ b/pcsd/pcsd.rb +@@ -908,7 +908,7 @@ already been added to pcsd. You may not add two clusters with the same name int + 'type' => 'boolean', + 'shortdesc' => 'Should deleted actions be cancelled', + 'longdesc' => 'Should deleted actions be cancelled', +- 'readable_name' => 'top Orphan Actions', ++ 'readable_name' => 'Stop Orphan Actions', + 'advanced' => false + }, + 'start-failure-is-fatal' => { +@@ -1215,33 +1215,168 @@ already been added to pcsd. You may not add two clusters with the same name int + return [200, "Node added successfully."] + end + ++ def pcs_0_9_142_resource_change_group(auth_user, params) ++ parameters = { ++ :resource_id => params[:resource_id], ++ :resource_group => '', ++ :_orig_resource_group => '', ++ } ++ parameters[:resource_group] = params[:group_id] if params[:group_id] ++ if params[:old_group_id] ++ parameters[:_orig_resource_group] = params[:old_group_id] ++ end ++ return send_cluster_request_with_token( ++ auth_user, params[:cluster], 'update_resource', true, parameters ++ ) ++ end ++ ++ def pcs_0_9_142_resource_clone(auth_user, params) ++ parameters = { ++ :resource_id => params[:resource_id], ++ :resource_clone => true, ++ :_orig_resource_clone => 'false', ++ } ++ return send_cluster_request_with_token( ++ auth_user, params[:cluster], 'update_resource', true, parameters ++ ) ++ end ++ ++ def pcs_0_9_142_resource_unclone(auth_user, params) ++ parameters = { ++ :resource_id => params[:resource_id], ++ :resource_clone => nil, ++ :_orig_resource_clone => 'true', ++ } ++ return send_cluster_request_with_token( ++ auth_user, params[:cluster], 'update_resource', true, parameters ++ ) ++ end ++ ++ def pcs_0_9_142_resource_master(auth_user, params) ++ parameters = { ++ :resource_id => params[:resource_id], ++ :resource_ms => true, ++ :_orig_resource_ms => 'false', ++ } ++ return send_cluster_request_with_token( ++ auth_user, params[:cluster], 'update_resource', true, parameters ++ ) ++ end ++ ++ # There is a bug in pcs-0.9.138 and older in processing the standby and ++ # unstandby request. JS of that pcsd always sent nodename in "node" ++ # parameter, which caused pcsd daemon to run the standby command locally with ++ # param["node"] as node name. This worked fine if the local cluster was ++ # managed from JS, as pacemaker simply put the requested node into standby. ++ # However it didn't work for managing non-local clusters, as the command was ++ # run on the local cluster everytime. Pcsd daemon would send the request to a ++ # remote cluster if the param["name"] variable was set, and that never ++ # happened. That however wouldn't work either, as then the required parameter ++ # "node" wasn't sent in the request causing an exception on the receiving ++ # node. This is fixed in commit 053f63ca109d9ef9e7f0416e90aab8e140480f5b ++ # ++ # In order to be able to put nodes running pcs-0.9.138 into standby, the ++ # nodename must be sent in "node" param, and the "name" must not be sent. ++ def pcs_0_9_138_node_standby(auth_user, params) ++ translated_params = { ++ 'node' => params[:name], ++ } ++ return send_cluster_request_with_token( ++ auth_user, params[:cluster], 'node_standby', true, translated_params ++ ) ++ end ++ ++ def pcs_0_9_138_node_unstandby(auth_user, params) ++ translated_params = { ++ 'node' => params[:name], ++ } ++ return send_cluster_request_with_token( ++ auth_user, params[:cluster], 'node_unstandby', true, translated_params ++ ) ++ end ++ + post '/managec/:cluster/?*' do + auth_user = PCSAuth.sessionToAuthUser(session) + raw_data = request.env["rack.input"].read + if params[:cluster] + request = "/" + params[:splat].join("/") +- code, out = send_cluster_request_with_token( +- auth_user, params[:cluster], request, true, params, true, raw_data +- ) + + # backward compatibility layer BEGIN +- # This code correctly remove constraints on pcs/pcsd version 0.9.137 and older +- redirection = { +- "/remove_constraint_remote" => "/resource_cmd/rm_constraint", +- "/remove_constraint_rule_remote" => "/resource_cmd/rm_constraint_rule" ++ translate_for_version = { ++ '/node_standby' => [ ++ [[0, 9, 138], method(:pcs_0_9_138_node_standby)], ++ ], ++ '/node_unstandby' => [ ++ [[0, 9, 138], method(:pcs_0_9_138_node_unstandby)], ++ ], + } +- if code == 404 and redirection.key?(request) ++ if translate_for_version.key?(request) ++ target_pcsd_version = [0, 0, 0] ++ version_code, version_out = send_cluster_request_with_token( ++ auth_user, params[:cluster], 'get_sw_versions' ++ ) ++ if version_code == 200 ++ begin ++ versions = JSON.parse(version_out) ++ target_pcsd_version = versions['pcs'] if versions['pcs'] ++ rescue JSON::ParserError ++ end ++ end ++ translate_function = nil ++ translate_for_version[request].each { |pair| ++ if (target_pcsd_version <=> pair[0]) != 1 # target <= pair ++ translate_function = pair[1] ++ break ++ end ++ } ++ end ++ # backward compatibility layer END ++ ++ if translate_function ++ code, out = translate_function.call(auth_user, params) ++ else + code, out = send_cluster_request_with_token( +- auth_user, +- params[:cluster], +- redirection[request], +- true, +- params, +- false, +- raw_data ++ auth_user, params[:cluster], request, true, params, true, raw_data + ) + end +- # bcl END ++ ++ # backward compatibility layer BEGIN ++ if code == 404 ++ case request ++ # supported since pcs-0.9.143 (tree view of resources) ++ when '/resource_change_group' ++ code, out = pcs_0_9_142_resource_change_group(auth_user, params) ++ # supported since pcs-0.9.143 (tree view of resources) ++ when '/resource_clone' ++ code, out = pcs_0_9_142_resource_clone(auth_user, params) ++ # supported since pcs-0.9.143 (tree view of resources) ++ when '/resource_unclone' ++ code, out = pcs_0_9_142_resource_unclone(auth_user, params) ++ # supported since pcs-0.9.143 (tree view of resources) ++ when '/resource_master' ++ code, out = pcs_0_9_142_resource_master(auth_user, params) ++ else ++ redirection = { ++ # constraints removal for pcs-0.9.137 and older ++ "/remove_constraint_remote" => "/resource_cmd/rm_constraint", ++ # constraints removal for pcs-0.9.137 and older ++ "/remove_constraint_rule_remote" => "/resource_cmd/rm_constraint_rule" ++ } ++ if redirection.key?(request) ++ code, out = send_cluster_request_with_token( ++ auth_user, ++ params[:cluster], ++ redirection[request], ++ true, ++ params, ++ false, ++ raw_data ++ ) ++ end ++ end ++ end ++ # backward compatibility layer END ++ + return code, out + end + end +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index 19caf14..6ef49e2 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -922,6 +922,15 @@ Pcs.ResourceObj = Ember.Object.extend({ + return ""; + } + }.property("status_val"), ++ show_group_selector: function() { ++ var parent = this.get("parent"); ++ return !( ++ parent && ++ parent.is_group && ++ parent.get("parent") && ++ Pcs.resourcesContainer.get("is_version_1") ++ ); ++ }.property(), + + location_constraints: [], + ordering_constraints: [], +@@ -1012,7 +1021,7 @@ Pcs.PrimitiveObj = Pcs.ResourceObj.extend({ + is_unmanaged: function() { + var instance_status_list = this.get("instance_status"); + if (!instance_status_list) { +- return false; ++ return true; + } + var is_managed = true; + $.each(instance_status_list, function(_, instance_status) { +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index 7dc7951..97e63f1 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -334,9 +334,8 @@ end + def node_standby(params, request, auth_user) + if params[:name] + code, response = send_request_with_token( +- auth_user, params[:name], 'node_standby', true, {"node"=>params[:name]} ++ auth_user, params[:name], 'node_standby', true + ) +- # data={"node"=>params[:name]} for backward compatibility with older versions of pcs/pcsd + else + if not allowed_for_local_cluster(auth_user, Permissions::WRITE) + return 403, 'Permission denied' +@@ -350,9 +349,8 @@ end + def node_unstandby(params, request, auth_user) + if params[:name] + code, response = send_request_with_token( +- auth_user, params[:name], 'node_unstandby', true, {"node"=>params[:name]} ++ auth_user, params[:name], 'node_unstandby', true + ) +- # data={"node"=>params[:name]} for backward compatibility with older versions of pcs/pcsd + else + if not allowed_for_local_cluster(auth_user, Permissions::WRITE) + return 403, 'Permission denied' +diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb +index 8de1c60..a138f68 100644 +--- a/pcsd/views/main.erb ++++ b/pcsd/views/main.erb +@@ -246,7 +246,6 @@ + <td class="bold" nowrap>Current Location:</td> + <td id="cur_res_loc" class="reg">{{resource.nodes_running_on_string}}</td> + </tr> +- {{#unless old_pcsd}} + {{#unless resource.parent}} + <tr> + <td class="bold" nowrap>Clone:</td> +@@ -268,6 +267,7 @@ + </tr> + {{else}} + {{#if resource.parent.is_group}} ++ {{#if resource.show_group_selector}} + <tr> + <td class="bold" nowrap>Group:</td> + <td id="cur_res_loc" class="reg"> +@@ -275,11 +275,10 @@ + </td> + </tr> + {{/if}} +- {{/unless}} ++ {{/if}} + {{/unless}} + {{/if}} + {{/unless}} +- {{#unless old_pcsd}} + {{#if resource.is_group}} + {{#unless resource.parent}} + <tr> +@@ -294,12 +293,14 @@ + <input type="button" onclick="resource_master(curResource());" value="Create master/slave"> + </td> + </tr> +- <tr> +- <td class="bold" nowrap>Group:</td> +- <td id="cur_res_loc" class="reg"> +- <input type="button" onclick="resource_ungroup(curResource());" value="Ungroup"> +- </td> +- </tr> ++ {{#unless old_pcsd}} ++ <tr> ++ <td class="bold" nowrap>Group:</td> ++ <td id="cur_res_loc" class="reg"> ++ <input type="button" onclick="resource_ungroup(curResource());" value="Ungroup"> ++ </td> ++ </tr> ++ {{/unless}} + {{/unless}} + {{/if}} + {{#if resource.is_multi_instance}} +@@ -310,7 +311,6 @@ + </td> + </tr> + {{/if}} +- {{/unless}} + </table> + {{#unless resource.stonith}} + {{location_constraints-table constraints=resource.location_constraints}} +-- +1.8.3.1 + diff --git a/SOURCES/bz1235022-01-add-nagios-support-to-pcs-resource-list-and-web-UI.patch b/SOURCES/bz1235022-01-add-nagios-support-to-pcs-resource-list-and-web-UI.patch deleted file mode 100644 index 1850941..0000000 --- a/SOURCES/bz1235022-01-add-nagios-support-to-pcs-resource-list-and-web-UI.patch +++ /dev/null @@ -1,189 +0,0 @@ -From 082be752ee38c8d1314c2130a029e60648f7896b Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek <tojeline@redhat.com> -Date: Tue, 11 Aug 2015 16:34:02 +0200 -Subject: [PATCH] add nagios support to 'pcs resource list' and web UI - ---- - pcs/resource.py | 58 ++++++++++++++++++++++++++++++++++++++++++-------------- - pcsd/remote.rb | 4 ++++ - pcsd/resource.rb | 23 ++++++++++++++++++---- - pcsd/settings.rb | 1 + - 4 files changed, 68 insertions(+), 18 deletions(-) - -diff --git a/pcs/resource.py b/pcs/resource.py -index f7d8821..8e05aeb 100644 ---- a/pcs/resource.py -+++ b/pcs/resource.py -@@ -198,13 +198,28 @@ def parse_resource_options(argv, with_clone=False): - # List available resources - # TODO make location more easily configurable - def resource_list_available(argv): -+ def get_name_and_desc(full_res_name, metadata): -+ sd = "" -+ try: -+ dom = parseString(metadata) -+ shortdesc = dom.documentElement.getElementsByTagName("shortdesc") -+ if len(shortdesc) > 0: -+ sd = " - " + format_desc( -+ len(full_res_name + " - "), -+ shortdesc[0].firstChild.nodeValue.strip().replace("\n", " ") -+ ) -+ except xml.parsers.expat.ExpatError: -+ sd = "" -+ finally: -+ return full_res_name + sd + "\n" -+ - ret = "" - if len(argv) != 0: - filter_string = argv[0] - else: - filter_string = "" - --# ocf agents -+ # ocf agents - os.environ['OCF_ROOT'] = "/usr/lib/ocf/" - providers = sorted(os.listdir("/usr/lib/ocf/resource.d")) - for provider in providers: -@@ -223,32 +238,47 @@ def resource_list_available(argv): - metadata = utils.get_metadata("/usr/lib/ocf/resource.d/" + provider + "/" + resource) - if metadata == False: - continue -- sd = "" -- try: -- dom = parseString(metadata) -- shortdesc = dom.documentElement.getElementsByTagName("shortdesc") -- if len(shortdesc) > 0: -- sd = " - " + format_desc(full_res_name.__len__() + 3, shortdesc[0].firstChild.nodeValue.strip().replace("\n", " ")) -- except xml.parsers.expat.ExpatError: -- sd = "" -- finally: -- ret += full_res_name + sd + "\n" --# lsb agents -+ ret += get_name_and_desc( -+ "ocf:" + provider + ":" + resource, -+ metadata -+ ) -+ -+ # lsb agents - lsb_dir = "/etc/init.d/" - agents = sorted(os.listdir(lsb_dir)) - for agent in agents: - if os.access(lsb_dir + agent, os.X_OK): - ret += "lsb:" + agent + "\n" --# systemd agents -+ -+ # systemd agents - if utils.is_systemctl(): - agents, retval = utils.run(["systemctl", "list-unit-files", "--full"]) - agents = agents.split("\n") -- - for agent in agents: - match = re.search(r'^([\S]*)\.service',agent) - if match: - ret += "systemd:" + match.group(1) + "\n" - -+ # nagios metadata -+ nagios_metadata_path = "/usr/share/pacemaker/nagios/plugins-metadata" -+ for metadata_file in sorted(os.listdir(nagios_metadata_path)): -+ if metadata_file.startswith("."): -+ continue -+ full_res_name = "nagios:" + metadata_file -+ if full_res_name.lower().endswith(".xml"): -+ full_res_name = full_res_name[:-len(".xml")] -+ if "--nodesc" in utils.pcs_options: -+ ret += full_res_name + "\n" -+ continue -+ try: -+ ret += get_name_and_desc( -+ full_res_name, -+ open(os.path.join(nagios_metadata_path, metadata_file), "r").read() -+ ) -+ except EnvironmentError as e: -+ pass -+ -+ # output - if not ret: - utils.err( - "No resource agents available. " -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index 5b7c753..cb5b176 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -1373,6 +1373,8 @@ def resource_form(params, request, session) - @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(HEARTBEAT_AGENTS_DIR + @cur_resource.type) - elsif @cur_resource.provider == 'pacemaker' - @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(PACEMAKER_AGENTS_DIR + @cur_resource.type) -+ elsif @cur_resource._class == 'nagios' -+ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(NAGIOS_METADATA_DIR + @cur_resource.type + '.xml') - end - @existing_resource = true - if @resource -@@ -1546,6 +1548,8 @@ def resource_metadata(params, request, session) - @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(HEARTBEAT_AGENTS_DIR + resource_name) - elsif class_provider == "ocf:pacemaker" - @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(PACEMAKER_AGENTS_DIR + resource_name) -+ elsif class_provider == 'nagios' -+ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(NAGIOS_METADATA_DIR + resource_name + '.xml') - end - @new_resource = params[:new] - @resources, @groups = getResourcesGroups(session) -diff --git a/pcsd/resource.rb b/pcsd/resource.rb -index f375bae..c6b513b 100644 ---- a/pcsd/resource.rb -+++ b/pcsd/resource.rb -@@ -303,13 +303,28 @@ def getColocationConstraints(session, resource_id) - end - - def getResourceMetadata(resourcepath) -- ENV['OCF_ROOT'] = OCF_ROOT -- metadata = `#{resourcepath} meta-data` -- doc = REXML::Document.new(metadata) - options_required = {} - options_optional = {} - long_desc = "" - short_desc = "" -+ -+ if resourcepath.end_with?('.xml') -+ begin -+ metadata = IO.read(resourcepath) -+ rescue -+ metadata = "" -+ end -+ else -+ ENV['OCF_ROOT'] = OCF_ROOT -+ metadata = `#{resourcepath} meta-data` -+ end -+ -+ begin -+ doc = REXML::Document.new(metadata) -+ rescue REXML::ParseException -+ return [options_required, options_optional, [short_desc, long_desc]] -+ end -+ - doc.elements.each('resource-agent/longdesc') {|ld| - long_desc = ld.text ? ld.text.strip : ld.text - } -@@ -345,7 +360,7 @@ def getResourceMetadata(resourcepath) - options_optional[param.attributes["name"]] = temp_array - end - } -- [options_required, options_optional, [short_desc,long_desc]] -+ [options_required, options_optional, [short_desc, long_desc]] - end - - def getResourceAgents(session, resource_agent=nil) -diff --git a/pcsd/settings.rb b/pcsd/settings.rb -index 0cd3109..4cea800 100644 ---- a/pcsd/settings.rb -+++ b/pcsd/settings.rb -@@ -8,6 +8,7 @@ COOKIE_FILE = PCSD_VAR_LOCATION + 'pcsd.cookiesecret' - OCF_ROOT = "/usr/lib/ocf" - HEARTBEAT_AGENTS_DIR = "/usr/lib/ocf/resource.d/heartbeat/" - PACEMAKER_AGENTS_DIR = "/usr/lib/ocf/resource.d/pacemaker/" -+NAGIOS_METADATA_DIR = '/usr/share/pacemaker/nagios/plugins-metadata/' - PENGINE = "/usr/libexec/pacemaker/pengine" - CRM_MON = "/usr/sbin/crm_mon" - CRM_NODE = "/usr/sbin/crm_node" --- -1.9.1 - diff --git a/SOURCES/bz1235022-02-fix-crash-when-missing-nagios-metadata.patch b/SOURCES/bz1235022-02-fix-crash-when-missing-nagios-metadata.patch deleted file mode 100644 index 9fc4091..0000000 --- a/SOURCES/bz1235022-02-fix-crash-when-missing-nagios-metadata.patch +++ /dev/null @@ -1,59 +0,0 @@ -From 2c269bd74344dab5b55f398c90ab0077b3d31e21 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek <tojeline@redhat.com> -Date: Fri, 4 Sep 2015 12:59:41 +0200 -Subject: [PATCH] fix crash when missing nagios-metadata - ---- - pcs/resource.py | 36 ++++++++++++++++++++---------------- - 1 file changed, 20 insertions(+), 16 deletions(-) - -diff --git a/pcs/resource.py b/pcs/resource.py -index 8e05aeb..2dcddc3 100644 ---- a/pcs/resource.py -+++ b/pcs/resource.py -@@ -261,22 +261,26 @@ def resource_list_available(argv): - - # nagios metadata - nagios_metadata_path = "/usr/share/pacemaker/nagios/plugins-metadata" -- for metadata_file in sorted(os.listdir(nagios_metadata_path)): -- if metadata_file.startswith("."): -- continue -- full_res_name = "nagios:" + metadata_file -- if full_res_name.lower().endswith(".xml"): -- full_res_name = full_res_name[:-len(".xml")] -- if "--nodesc" in utils.pcs_options: -- ret += full_res_name + "\n" -- continue -- try: -- ret += get_name_and_desc( -- full_res_name, -- open(os.path.join(nagios_metadata_path, metadata_file), "r").read() -- ) -- except EnvironmentError as e: -- pass -+ if os.path.isdir(nagios_metadata_path): -+ for metadata_file in sorted(os.listdir(nagios_metadata_path)): -+ if metadata_file.startswith("."): -+ continue -+ full_res_name = "nagios:" + metadata_file -+ if full_res_name.lower().endswith(".xml"): -+ full_res_name = full_res_name[:-len(".xml")] -+ if "--nodesc" in utils.pcs_options: -+ ret += full_res_name + "\n" -+ continue -+ try: -+ ret += get_name_and_desc( -+ full_res_name, -+ open( -+ os.path.join(nagios_metadata_path, metadata_file), -+ "r" -+ ).read() -+ ) -+ except EnvironmentError as e: -+ pass - - # output - if not ret: --- -1.9.1 - diff --git a/SOURCES/bz1245264-01-Added-more-detailed-warnings-for-pcs-stonith-confirm.patch b/SOURCES/bz1245264-01-Added-more-detailed-warnings-for-pcs-stonith-confirm.patch deleted file mode 100644 index 509061d..0000000 --- a/SOURCES/bz1245264-01-Added-more-detailed-warnings-for-pcs-stonith-confirm.patch +++ /dev/null @@ -1,44 +0,0 @@ -From fc89908d91a2438f59dd08cf79aedfb85512091b Mon Sep 17 00:00:00 2001 -From: Chris Feist <cfeist@redhat.com> -Date: Fri, 18 Sep 2015 16:29:58 -0500 -Subject: [PATCH] Added more detailed warnings for 'pcs stonith confirm' - ---- - pcs/pcs.8 | 4 +++- - pcs/usage.py | 5 ++++- - 2 files changed, 7 insertions(+), 2 deletions(-) - -diff --git a/pcs/pcs.8 b/pcs/pcs.8 -index 70f0f6c..e89c813 100644 ---- a/pcs/pcs.8 -+++ b/pcs/pcs.8 -@@ -333,7 +333,9 @@ fence <node> [\fB\-\-off\fR] - Fence the node specified (if \fB\-\-off\fR is specified, use the 'off' API call to stonith which will turn the node off instead of rebooting it) - .TP - confirm <node> --Confirm that the host specified is currently down. WARNING: if this node is not actually down data corruption/cluster failure can occur. -+Confirm that the host specified is currently down. This command should \fBONLY\fR be used when the node specified has already been confirmed to be down. -+ -+.B WARNING: if this node is not actually down data corruption/cluster failure can occur. - .SS "acl" - .TP - [show] -diff --git a/pcs/usage.py b/pcs/usage.py -index c430965..63baa76 100644 ---- a/pcs/usage.py -+++ b/pcs/usage.py -@@ -833,7 +833,10 @@ Commands: - call to stonith which will turn the node off instead of rebooting it) - - confirm <node> -- Confirm that the host specified is currently down. -+ Confirm that the host specified is currently down. This command -+ should ONLY be used when the node specified has already been -+ confirmed to be down. -+ - WARNING: if this node is not actually down data corruption/cluster - failure can occur. - --- -1.9.1 - diff --git a/SOURCES/bz1247088-01-fix-error-message-in-node-maintenanceunmaintenance-commands.patch b/SOURCES/bz1247088-01-fix-error-message-in-node-maintenanceunmaintenance-commands.patch new file mode 100644 index 0000000..4eedb64 --- /dev/null +++ b/SOURCES/bz1247088-01-fix-error-message-in-node-maintenanceunmaintenance-commands.patch @@ -0,0 +1,102 @@ +From d1a31c8b887fc668eff8ef582124a84524a5b760 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular <omular@redhat.com> +Date: Mon, 22 Aug 2016 15:52:08 +0200 +Subject: [PATCH] fix error message in node maintenance/unmaintenance commands + +--- + pcs/node.py | 23 ++++++++++++++--------- + pcs/test/test_node.py | 10 ++++++++-- + 2 files changed, 22 insertions(+), 11 deletions(-) + +diff --git a/pcs/node.py b/pcs/node.py +index be2fb13..ed77d5d 100644 +--- a/pcs/node.py ++++ b/pcs/node.py +@@ -77,8 +77,8 @@ def node_maintenance(argv, on=True): + for node in argv: + if node not in cluster_nodes: + utils.err( +- "Node '%s' does not appear to exist in configuration" % +- argv[0], ++ "Node '{0}' does not appear to exist in " ++ "configuration".format(node), + False + ) + failed_count += 1 +@@ -87,25 +87,30 @@ def node_maintenance(argv, on=True): + else: + nodes.append("") + ++ if failed_count > 0: ++ sys.exit(1) ++ + for node in nodes: +- node = ["-N", node] if node else [] ++ node_attr = ["-N", node] if node else [] + output, retval = utils.run( + ["crm_attribute", "-t", "nodes", "-n", "maintenance"] + action + +- node ++ node_attr + ) + if retval != 0: +- node_name = ("node '%s'" % node) if argv else "current node" ++ node_name = ("node '{0}'".format(node)) if argv else "current node" + failed_count += 1 + if on: + utils.err( +- "Unable to put %s to maintenance mode.\n%s" % +- (node_name, output), ++ "Unable to put {0} to maintenance mode: {1}".format( ++ node_name, output ++ ), + False + ) + else: + utils.err( +- "Unable to remove %s from maintenance mode.\n%s" % +- (node_name, output), ++ "Unable to remove {0} from maintenance mode: {1}".format( ++ node_name, output ++ ), + False + ) + if failed_count > 0: +diff --git a/pcs/test/test_node.py b/pcs/test/test_node.py +index 6f03112..785c711 100644 +--- a/pcs/test/test_node.py ++++ b/pcs/test/test_node.py +@@ -88,11 +88,14 @@ Node Attributes: + """ + ac(expected_out, output) + +- output, returnVal = pcs(temp_cib, "node maintenance nonexistant-node") ++ output, returnVal = pcs( ++ temp_cib, "node maintenance nonexistant-node and-another" ++ ) + self.assertEqual(returnVal, 1) + self.assertEqual( + output, + "Error: Node 'nonexistant-node' does not appear to exist in configuration\n" ++ "Error: Node 'and-another' does not appear to exist in configuration\n" + ) + output, _ = pcs(temp_cib, "property") + expected_out = """\ +@@ -134,11 +137,14 @@ Cluster Properties: + """ + ac(expected_out, output) + +- output, returnVal = pcs(temp_cib, "node unmaintenance nonexistant-node") ++ output, returnVal = pcs( ++ temp_cib, "node unmaintenance nonexistant-node and-another" ++ ) + self.assertEqual(returnVal, 1) + self.assertEqual( + output, + "Error: Node 'nonexistant-node' does not appear to exist in configuration\n" ++ "Error: Node 'and-another' does not appear to exist in configuration\n" + ) + output, _ = pcs(temp_cib, "property") + expected_out = """\ +-- +1.8.3.1 + diff --git a/SOURCES/bz1253294-01-fixed-command-injection-vulnerability.patch b/SOURCES/bz1253294-01-fixed-command-injection-vulnerability.patch deleted file mode 100644 index 1b6aa4f..0000000 --- a/SOURCES/bz1253294-01-fixed-command-injection-vulnerability.patch +++ /dev/null @@ -1,259 +0,0 @@ -From b47f6196aaf405f17197d4bb312d94ec84042343 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek <tojeline@redhat.com> -Date: Tue, 25 Aug 2015 16:46:46 +0200 -Subject: [PATCH] fixed command injection vulnerability - ---- - pcsd/fenceagent.rb | 53 ++++++++++++++++++++++++++++++++++------------------- - pcsd/pcsd.rb | 6 +++--- - pcsd/remote.rb | 18 +++++++++--------- - pcsd/resource.rb | 27 +++++++++++++++++++++++---- - 4 files changed, 69 insertions(+), 35 deletions(-) - -diff --git a/pcsd/fenceagent.rb b/pcsd/fenceagent.rb -index b7674fd..b52ad6f 100644 ---- a/pcsd/fenceagent.rb -+++ b/pcsd/fenceagent.rb -@@ -1,4 +1,4 @@ --def getFenceAgents(fence_agent = nil) -+def getFenceAgents(session, fence_agent = nil) - fence_agent_list = {} - agents = Dir.glob('/usr/sbin/fence_' + '*') - agents.each { |a| -@@ -7,7 +7,7 @@ def getFenceAgents(fence_agent = nil) - next if fa.name == "fence_ack_manual" - - if fence_agent and a.sub(/.*\//,"") == fence_agent.sub(/.*:/,"") -- required_options, optional_options, advanced_options, info = getFenceAgentMetadata(fa.name) -+ required_options, optional_options, advanced_options, info = getFenceAgentMetadata(session, fa.name) - fa.required_options = required_options - fa.optional_options = optional_options - fa.advanced_options = advanced_options -@@ -18,13 +18,42 @@ def getFenceAgents(fence_agent = nil) - fence_agent_list - end - --def getFenceAgentMetadata(fenceagentname) -+def getFenceAgentMetadata(session, fenceagentname) -+ options_required = {} -+ options_optional = {} -+ options_advanced = { -+ "priority" => "", -+ "pcmk_host_argument" => "", -+ "pcmk_host_map" => "", -+ "pcmk_host_list" => "", -+ "pcmk_host_check" => "" -+ } -+ for a in ["reboot", "list", "status", "monitor", "off"] -+ options_advanced["pcmk_" + a + "_action"] = "" -+ options_advanced["pcmk_" + a + "_timeout"] = "" -+ options_advanced["pcmk_" + a + "_retries"] = "" -+ end -+ - # There are bugs in stonith_admin & the new fence_agents interaction - # eventually we'll want to switch back to this, but for now we directly - # call the agent to get metadata - #metadata = `stonith_admin --metadata -a #{fenceagentname}` -- metadata = `/usr/sbin/#{fenceagentname} -o metadata` -- doc = REXML::Document.new(metadata) -+ if not fenceagentname.start_with?('fence_') or fenceagentname.include?('/') -+ $logger.error "Invalid fence agent '#{fenceagentname}'" -+ return [options_required, options_optional, options_advanced] -+ end -+ stdout, stderr, retval = run_cmd( -+ session, "/usr/sbin/#{fenceagentname}", '-o', 'metadata' -+ ) -+ metadata = stdout.join -+ begin -+ doc = REXML::Document.new(metadata) -+ rescue REXML::ParseException => e -+ $logger.error( -+ "Unable to parse metadata of fence agent '#{resourcepath}': #{e}" -+ ) -+ return [options_required, options_optional, options_advanced] -+ end - - short_desc = "" - long_desc = "" -@@ -40,20 +69,6 @@ def getFenceAgentMetadata(fenceagentname) - long_desc = ld.text ? ld.text.strip : ld.text - } - -- options_required = {} -- options_optional = {} -- options_advanced = { -- "priority" => "", -- "pcmk_host_argument" => "", -- "pcmk_host_map" => "", -- "pcmk_host_list" => "", -- "pcmk_host_check" => "" -- } -- for a in ["reboot", "list", "status", "monitor", "off"] -- options_advanced["pcmk_" + a + "_action"] = "" -- options_advanced["pcmk_" + a + "_timeout"] = "" -- options_advanced["pcmk_" + a + "_retries"] = "" -- end - doc.elements.each('resource-agent/parameters/parameter') { |param| - temp_array = [] - if param.elements["shortdesc"] -diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb -index e4b4c25..1f26fe5 100644 ---- a/pcsd/pcsd.rb -+++ b/pcsd/pcsd.rb -@@ -401,7 +401,7 @@ if not DISABLE_GUI - - if @resources.length == 0 - @cur_resource = nil -- @resource_agents = getFenceAgents() -+ @resource_agents = getFenceAgents(session) - else - @cur_resource = @resources[0] - if params[:fencedevice] -@@ -413,7 +413,7 @@ if not DISABLE_GUI - end - end - @cur_resource.options = getResourceOptions(session, @cur_resource.id) -- @resource_agents = getFenceAgents(@cur_resource.agentname) -+ @resource_agents = getFenceAgents(session, @cur_resource.agentname) - end - erb :fencedevices, :layout => :main - end -@@ -477,7 +477,7 @@ if not DISABLE_GUI - # } - # } - @resource_agents = getResourceAgents(session) -- @stonith_agents = getFenceAgents() -+ @stonith_agents = getFenceAgents(session) - # @nodes = @nodes.sort_by{|k,v|k} - erb :nodes, :layout => :main - end -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index cb5b176..4655756 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -1370,11 +1370,11 @@ def resource_form(params, request, session) - @cur_resource_ms = @cur_resource.get_master - @resource = ResourceAgent.new(@cur_resource.agentname) - if @cur_resource.provider == 'heartbeat' -- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(HEARTBEAT_AGENTS_DIR + @cur_resource.type) -+ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, HEARTBEAT_AGENTS_DIR + @cur_resource.type) - elsif @cur_resource.provider == 'pacemaker' -- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(PACEMAKER_AGENTS_DIR + @cur_resource.type) -+ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, PACEMAKER_AGENTS_DIR + @cur_resource.type) - elsif @cur_resource._class == 'nagios' -- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(NAGIOS_METADATA_DIR + @cur_resource.type + '.xml') -+ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, NAGIOS_METADATA_DIR + @cur_resource.type + '.xml') - end - @existing_resource = true - if @resource -@@ -1395,7 +1395,7 @@ def fence_device_form(params, request, session) - @cur_resource = get_resource_by_id(params[:resource], get_cib_dom(session)) - - if @cur_resource.instance_of?(ClusterEntity::Primitive) and @cur_resource.stonith -- @resource_agents = getFenceAgents(@cur_resource.agentname) -+ @resource_agents = getFenceAgents(session, @cur_resource.agentname) - @existing_resource = true - @fenceagent = @resource_agents[@cur_resource.type] - erb :fenceagentform -@@ -1531,7 +1531,7 @@ def get_avail_fence_agents(params, request, session) - if not allowed_for_local_cluster(session, Permissions::READ) - return 403, 'Permission denied' - end -- agents = getFenceAgents() -+ agents = getFenceAgents(session) - return JSON.generate(agents) - end - -@@ -1545,11 +1545,11 @@ def resource_metadata(params, request, session) - - @resource = ResourceAgent.new(params[:resourcename]) - if class_provider == "ocf:heartbeat" -- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(HEARTBEAT_AGENTS_DIR + resource_name) -+ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, HEARTBEAT_AGENTS_DIR + resource_name) - elsif class_provider == "ocf:pacemaker" -- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(PACEMAKER_AGENTS_DIR + resource_name) -+ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, PACEMAKER_AGENTS_DIR + resource_name) - elsif class_provider == 'nagios' -- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(NAGIOS_METADATA_DIR + resource_name + '.xml') -+ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, NAGIOS_METADATA_DIR + resource_name + '.xml') - end - @new_resource = params[:new] - @resources, @groups = getResourcesGroups(session) -@@ -1563,7 +1563,7 @@ def fence_device_metadata(params, request, session) - end - return 200 if not params[:resourcename] or params[:resourcename] == "" - @fenceagent = FenceAgent.new(params[:resourcename]) -- @fenceagent.required_options, @fenceagent.optional_options, @fenceagent.advanced_options, @fenceagent.info = getFenceAgentMetadata(params[:resourcename]) -+ @fenceagent.required_options, @fenceagent.optional_options, @fenceagent.advanced_options, @fenceagent.info = getFenceAgentMetadata(session, params[:resourcename]) - @new_fenceagent = params[:new] - - erb :fenceagentform -diff --git a/pcsd/resource.rb b/pcsd/resource.rb -index c6b513b..6f8f7fe 100644 ---- a/pcsd/resource.rb -+++ b/pcsd/resource.rb -@@ -1,4 +1,5 @@ - require 'pp' -+require 'pathname' - - def getResourcesGroups(session, get_fence_devices = false, get_all_options = false, - get_operations=false -@@ -302,12 +303,24 @@ def getColocationConstraints(session, resource_id) - return together,apart - end - --def getResourceMetadata(resourcepath) -+def getResourceMetadata(session, resourcepath) - options_required = {} - options_optional = {} - long_desc = "" - short_desc = "" - -+ resourcepath = Pathname.new(resourcepath).cleanpath.to_s -+ resource_dirs = [ -+ HEARTBEAT_AGENTS_DIR, PACEMAKER_AGENTS_DIR, NAGIOS_METADATA_DIR, -+ ] -+ if not resource_dirs.any? { |allowed| resourcepath.start_with?(allowed) } -+ $logger.error( -+ "Unable to get metadata of resource agent '#{resourcepath}': " + -+ 'path not allowed' -+ ) -+ return [options_required, options_optional, [short_desc, long_desc]] -+ end -+ - if resourcepath.end_with?('.xml') - begin - metadata = IO.read(resourcepath) -@@ -316,12 +329,16 @@ def getResourceMetadata(resourcepath) - end - else - ENV['OCF_ROOT'] = OCF_ROOT -- metadata = `#{resourcepath} meta-data` -+ stdout, stderr, retval = run_cmd(session, resourcepath, 'meta-data') -+ metadata = stdout.join - end - - begin - doc = REXML::Document.new(metadata) -- rescue REXML::ParseException -+ rescue REXML::ParseException => e -+ $logger.error( -+ "Unable to parse metadata of resource agent '#{resourcepath}': #{e}" -+ ) - return [options_required, options_optional, [short_desc, long_desc]] - end - -@@ -381,7 +398,9 @@ def getResourceAgents(session, resource_agent=nil) - if resource_agent and (a.start_with?("ocf:heartbeat:") or a.start_with?("ocf:pacemaker:")) - split_agent = ra.name.split(/:/) - path = OCF_ROOT + '/resource.d/' + split_agent[1] + "/" + split_agent[2] -- required_options, optional_options, resource_info = getResourceMetadata(path) -+ required_options, optional_options, resource_info = getResourceMetadata( -+ session, path -+ ) - ra.required_options = required_options - ra.optional_options = optional_options - ra.info = resource_info --- -1.9.1 - diff --git a/SOURCES/bz1253491-01-fix-pcs-pcsd-path-detection.patch b/SOURCES/bz1253491-01-fix-pcs-pcsd-path-detection.patch deleted file mode 100644 index 3483ad3..0000000 --- a/SOURCES/bz1253491-01-fix-pcs-pcsd-path-detection.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 7323d4fb2454d65bb26839fd6fb4809d19258d34 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek <tojeline@redhat.com> -Date: Tue, 25 Aug 2015 14:51:19 +0200 -Subject: [PATCH] fix pcs/pcsd path detection - ---- - pcs/utils.py | 2 +- - pcsd/bootstrap.rb | 4 +++- - 2 files changed, 4 insertions(+), 2 deletions(-) - -diff --git a/pcs/utils.py b/pcs/utils.py -index cd33a27..761723b 100644 ---- a/pcs/utils.py -+++ b/pcs/utils.py -@@ -736,7 +736,7 @@ def run_pcsdcli(command, data=None): - env_var = dict() - if "--debug" in pcs_options: - env_var["PCSD_DEBUG"] = "true" -- pcs_dir = os.path.dirname(sys.argv[0]) -+ pcs_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - if pcs_dir == "/usr/sbin": - pcsd_dir_path = settings.pcsd_exec_location - else: -diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb -index 07a7d27..64c3b98 100644 ---- a/pcsd/bootstrap.rb -+++ b/pcsd/bootstrap.rb -@@ -1,4 +1,5 @@ - require 'logger' -+require 'pathname' - - require 'settings.rb' - -@@ -32,7 +33,8 @@ def is_systemctl() - end - - def get_pcs_path(pcsd_path) -- if PCSD_EXEC_LOCATION == pcsd_path or PCSD_EXEC_LOCATION == (pcsd_path + '/') -+ real_path = Pathname.new(pcsd_path).realpath.to_s -+ if PCSD_EXEC_LOCATION == real_path or PCSD_EXEC_LOCATION == (real_path + '/') - return '/usr/sbin/pcs' - else - return '../pcs/pcs' --- -1.9.1 - diff --git a/SOURCES/bz1257369-01-always-print-output-of-crm_resource-cleanup.patch b/SOURCES/bz1257369-01-always-print-output-of-crm_resource-cleanup.patch deleted file mode 100644 index a3c5cec..0000000 --- a/SOURCES/bz1257369-01-always-print-output-of-crm_resource-cleanup.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 122c7b6b5d31fdc0cf997aeb01252fb4c8801da5 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek <tojeline@redhat.com> -Date: Fri, 4 Sep 2015 17:12:27 +0200 -Subject: [PATCH] always print output of crm_resource --cleanup - ---- - pcs/resource.py | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/pcs/resource.py b/pcs/resource.py -index 2dcddc3..be1f1ba 100644 ---- a/pcs/resource.py -+++ b/pcs/resource.py -@@ -2559,14 +2559,14 @@ def resource_cleanup(res_id): - if retval != 0: - utils.err("Unable to cleanup resource: %s" % res_id + "\n" + output) - else: -- print "Resource: %s successfully cleaned up" % res_id -+ print output - - def resource_cleanup_all(): - (output, retval) = utils.run(["crm_resource", "-C"]) - if retval != 0: - utils.err("Unexpected error occured. 'crm_resource -C' err_code: %s\n%s" % (retval, output)) - else: -- print "All resources/stonith devices successfully cleaned up" -+ print output - - def resource_history(args): - dom = utils.get_cib_dom() --- -1.9.1 - diff --git a/SOURCES/bz1258619-01-fix-ruby-traceback-on-pcsd-startup.patch b/SOURCES/bz1258619-01-fix-ruby-traceback-on-pcsd-startup.patch deleted file mode 100644 index ae03878..0000000 --- a/SOURCES/bz1258619-01-fix-ruby-traceback-on-pcsd-startup.patch +++ /dev/null @@ -1,37 +0,0 @@ -commit 4d4ad9fc870998f4e70256ef62371f38da3a4855 -Author: Chris Feist <cfeist@redhat.com> -AuthorDate: Mon Aug 31 15:13:46 2015 -0500 -Commit: Chris Feist <cfeist@redhat.com> -CommitDate: Mon Aug 31 15:13:46 2015 -0500 - - Fix tracebacks during pcsd shutdowns - -diff --git a/pcsd/ssl.rb b/pcsd/ssl.rb -index e948aef..97d131e 100644 ---- a/pcsd/ssl.rb -+++ b/pcsd/ssl.rb -@@ -67,14 +67,20 @@ end - server = ::Rack::Handler::WEBrick - trap(:INT) do - puts "Shutting down (INT)" -- server.shutdown -- #exit -+ if server.instance_variable_get("@server") -+ server.shutdown -+ else -+ exit -+ end - end - - trap(:TERM) do - puts "Shutting down (TERM)" -- server.shutdown -- #exit -+ if server.instance_variable_get("@server") -+ server.shutdown -+ else -+ exit -+ end - end - - require 'pcsd' diff --git a/SOURCES/bz1264360-01-web-UI-add-support-for-unmanaged-resources.patch b/SOURCES/bz1264360-01-web-UI-add-support-for-unmanaged-resources.patch new file mode 100644 index 0000000..78764a3 --- /dev/null +++ b/SOURCES/bz1264360-01-web-UI-add-support-for-unmanaged-resources.patch @@ -0,0 +1,320 @@ +From cf1c95354a9db8b81712d7b98d0cc55e777e0516 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular <omular@redhat.com> +Date: Thu, 4 Aug 2016 00:59:11 +0200 +Subject: [PATCH] web UI: add support for unmanaged resources + +--- + pcsd/cluster_entity.rb | 13 ++++++++-- + pcsd/pcs.rb | 1 + + pcsd/public/js/nodes-ember.js | 22 +++++++++++++---- + pcsd/public/js/pcsd.js | 52 ++++++++++++++++++++++++++++++++++++++++ + pcsd/remote.rb | 55 +++++++++++++++++++++++++++++++++++++++---- + pcsd/views/main.erb | 26 ++++++++++++++++++++ + 6 files changed, 158 insertions(+), 11 deletions(-) + +diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb +index fa56fe2..7216626 100644 +--- a/pcsd/cluster_entity.rb ++++ b/pcsd/cluster_entity.rb +@@ -332,7 +332,11 @@ module ClusterEntity + :unknown => { + :val => 6, + :str => 'unknown' +- } ++ }, ++ :unmanaged => { ++ :val => 7, ++ :str => 'unmanaged' ++ }, + } + + def initialize(status=:unknown) +@@ -532,8 +536,11 @@ module ClusterEntity + def get_status + running = 0 + failed = 0 ++ unmanaged = 0 + @crm_status.each do |s| +- if s.active ++ if !s.managed ++ unmanaged += 1 ++ elsif s.active + running += 1 + elsif s.failed + failed += 1 +@@ -542,6 +549,8 @@ module ClusterEntity + + if disabled? + status = ClusterEntity::ResourceStatus.new(:disabled) ++ elsif unmanaged >0 ++ status = ClusterEntity::ResourceStatus.new(:unmanaged) + elsif running > 0 + status = ClusterEntity::ResourceStatus.new(:running) + elsif failed > 0 or @error_list.length > 0 +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index 1eb9e9e..553a20c 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -1703,6 +1703,7 @@ def get_node_status(auth_user, cib_dom) + 'sbd', + 'ticket_constraints', + 'moving_resource_in_group', ++ 'unmanaged_resource', + ] + } + +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index 3d4fe79..c51a341 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -57,6 +57,9 @@ Pcs = Ember.Application.createWithMixins({ + this.get("available_features").indexOf("moving_resource_in_group") != -1 + ); + }.property("available_features"), ++ is_supported_unmanaged_resource: function() { ++ return (this.get("available_features").indexOf("unmanaged_resource") != -1); ++ }.property("available_features"), + is_sbd_running: false, + is_sbd_enabled: false, + is_sbd_enabled_or_running: function() { +@@ -869,9 +872,17 @@ Pcs.ResourceObj = Ember.Object.extend({ + return '<span style="' + this.get('status_style') + '">' + this.get('status') + '</span>'; + }.property("status_style", "disabled"), + status_class: function() { +- var show = ((Pcs.clusterController.get("show_all_resources"))? "" : "hidden "); +- return ((this.get("status_val") == get_status_value("ok") || this.status == "disabled") ? show + "default-hidden" : ""); +- }.property("status_val"), ++ if ( ++ this.get("status_val") == get_status_value("ok") || ++ ["disabled", "unmanaged"].indexOf(this.get("status")) != -1 ++ ) { ++ return ( ++ Pcs.clusterController.get("show_all_resources") ? "" : "hidden " ++ ) + "default-hidden"; ++ } else { ++ return ""; ++ } ++ }.property("status_val", "status"), + status_class_fence: function() { + var show = ((Pcs.clusterController.get("show_all_fence"))? "" : "hidden "); + return ((this.get("status_val") == get_status_value("ok")) ? show + "default-hidden" : ""); +@@ -1681,8 +1692,9 @@ Pcs.Cluster = Ember.Object.extend({ + var num = 0; + $.each(this.get(type), function(key, value) { + if (value.get("status_val") < get_status_value("ok") && +- value.status != "disabled" && value.status != "standby" && +- value.status != "maintenance" ++ [ ++ "unmanaged", "disabled", "standby", "maintenance" ++ ].indexOf(value.status) == -1 + ) { + num++; + } +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index 82187ef..56219d4 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -1333,6 +1333,9 @@ function remove_resource(ids, force) { + message += "\n\n" + xhr.responseText.replace( + "--force", "'Enforce removal'" + ); ++ alert(message); ++ $("#verify_remove_submit_btn").button("option", "disabled", false); ++ return; + } + } + alert(message); +@@ -1957,6 +1960,7 @@ function get_status_value(status) { + maintenance: 2, + "partially running": 2, + disabled: 3, ++ unmanaged: 3, + unknown: 4, + ok: 5, + running: 5, +@@ -2987,3 +2991,51 @@ function sbd_status_dialog() { + buttons: buttonsOpts + }); + } ++ ++function unmanage_resource(resource_id) { ++ if (!resource_id) { ++ return; ++ } ++ fade_in_out("#resource_unmanage_link"); ++ ajax_wrapper({ ++ type: 'POST', ++ url: get_cluster_remote_url() + "unmanage_resource", ++ data: { ++ resource_list_json: JSON.stringify([resource_id]), ++ }, ++ timeout: pcs_timeout, ++ complete: function() { ++ Pcs.update(); ++ }, ++ error: function (xhr, status, error) { ++ alert( ++ `Unable to unmanage '${resource_id}': ` + ++ ajax_simple_error(xhr, status, error) ++ ); ++ }, ++ }); ++} ++ ++function manage_resource(resource_id) { ++ if (!resource_id) { ++ return; ++ } ++ fade_in_out("#resource_manage_link"); ++ ajax_wrapper({ ++ type: 'POST', ++ url: get_cluster_remote_url() + "manage_resource", ++ data: { ++ resource_list_json: JSON.stringify([resource_id]), ++ }, ++ timeout: pcs_timeout, ++ complete: function() { ++ Pcs.update(); ++ }, ++ error: function (xhr, status, error) { ++ alert( ++ `Unable to manage '${resource_id}': ` + ++ ajax_simple_error(xhr, status, error) ++ ); ++ } ++ }); ++} +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index 4844adf..ebf425c 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -116,7 +116,9 @@ def remote(params, request, auth_user) + :set_resource_utilization => method(:set_resource_utilization), + :set_node_utilization => method(:set_node_utilization), + :get_resource_agent_metadata => method(:get_resource_agent_metadata), +- :get_fence_agent_metadata => method(:get_fence_agent_metadata) ++ :get_fence_agent_metadata => method(:get_fence_agent_metadata), ++ :manage_resource => method(:manage_resource), ++ :unmanage_resource => method(:unmanage_resource), + } + + command = params[:command].to_sym +@@ -1575,10 +1577,10 @@ def remove_resource(params, request, auth_user) + end + cmd = [PCS, '-f', tmp_file.path, 'resource', 'disable'] + resource_list.each { |resource| +- _, err, retval = run_cmd(user, *(cmd + [resource])) ++ out, err, retval = run_cmd(user, *(cmd + [resource])) + if retval != 0 + unless ( +- err.join('').index('unable to find a resource') != -1 and ++ (out + err).join('').include?(' does not exist.') and + no_error_if_not_exists + ) + errors += "Unable to stop resource '#{resource}': #{err.join('')}" +@@ -1613,7 +1615,10 @@ def remove_resource(params, request, auth_user) + end + out, err, retval = run_cmd(auth_user, *cmd) + if retval != 0 +- unless out.index(' does not exist.') != -1 and no_error_if_not_exists ++ unless ( ++ (out + err).join('').include?(' does not exist.') and ++ no_error_if_not_exists ++ ) + errors += err.join(' ').strip + "\n" + end + end +@@ -2630,3 +2635,45 @@ def qdevice_client_start(param, request, auth_user) + return [400, msg] + end + end ++ ++def manage_resource(param, request, auth_user) ++ unless allowed_for_local_cluster(auth_user, Permissions::WRITE) ++ return 403, 'Permission denied' ++ end ++ unless param[:resource_list_json] ++ return [400, "Required parameter 'resource_list_json' is missing."] ++ end ++ begin ++ resource_list = JSON.parse(param[:resource_list_json]) ++ _, err, retval = run_cmd( ++ auth_user, PCS, 'resource', 'manage', *resource_list ++ ) ++ if retval != 0 ++ return [400, err.join('')] ++ end ++ return [200, ''] ++ rescue JSON::ParserError ++ return [400, 'Invalid input data format'] ++ end ++end ++ ++def unmanage_resource(param, request, auth_user) ++ unless allowed_for_local_cluster(auth_user, Permissions::WRITE) ++ return 403, 'Permission denied' ++ end ++ unless param[:resource_list_json] ++ return [400, "Required parameter 'resource_list_json' is missing."] ++ end ++ begin ++ resource_list = JSON.parse(param[:resource_list_json]) ++ _, err, retval = run_cmd( ++ auth_user, PCS, 'resource', 'unmanage', *resource_list ++ ) ++ if retval != 0 ++ return [400, err.join('')] ++ end ++ return [200, ''] ++ rescue JSON::ParserError ++ return [400, 'Invalid input data format'] ++ end ++end +diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb +index 1b21f92..64fe560 100644 +--- a/pcsd/views/main.erb ++++ b/pcsd/views/main.erb +@@ -160,6 +160,7 @@ + </table> + </div> + <div id="node_options_buttons"> ++ <div> + {{#if resource.stonith}} + <div class="xdark sprites" style="float: left"></div> + <div id="stonith_delete_link" class="link" onclick="verify_remove_fence_devices(curStonith());">Remove</div> +@@ -174,7 +175,32 @@ + <div id="resource_cleanup_link" class="link" onclick="cleanup_resource();">Cleanup</div> + <div class="xdark sprites" style="float: left"></div> + <div id="resource_delete_link" class="link" onclick="verify_remove_resources(curResource());">Remove</div> ++ </div> ++ <div> ++ {{#if Pcs.is_supported_unmanaged_resource}} ++ <div> ++ <div class="checkdark sprites" style="float: left"></div> ++ <div ++ id="resource_manage_link" ++ class="link" ++ onclick="manage_resource(curResource());" ++ > ++ Manage ++ </div> ++ </div> ++ <div> ++ <div class="cancel sprites" style="float: left"></div> ++ <div ++ id="resource_unmanage_link" ++ class="link" ++ onclick="unmanage_resource(curResource());" ++ > ++ Unmanage ++ </div> ++ </div> ++ {{/if}} + {{/if}} ++ </div> + <!-- + <div class="move sprites" style="float: left"></div> + <div id="resource_move_link" class="link">Move</div> +-- +1.8.3.1 + diff --git a/SOURCES/bz1264360-02-web-ui-change-way-of-displaying-status-of-unmanaged-primitive-resources.patch b/SOURCES/bz1264360-02-web-ui-change-way-of-displaying-status-of-unmanaged-primitive-resources.patch new file mode 100644 index 0000000..08e123f --- /dev/null +++ b/SOURCES/bz1264360-02-web-ui-change-way-of-displaying-status-of-unmanaged-primitive-resources.patch @@ -0,0 +1,185 @@ +From 563a2c51877b9cf2a5ae419fc6d4eeb680eed04f Mon Sep 17 00:00:00 2001 +From: Ondrej Mular <omular@redhat.com> +Date: Wed, 24 Aug 2016 10:04:01 +0200 +Subject: [PATCH] web UI: change way of displaying status of unmanaged + primitive resources + +--- + pcsd/cluster_entity.rb | 11 +---------- + pcsd/public/js/nodes-ember.js | 27 ++++++++++++++++++++++----- + pcsd/public/js/pcsd.js | 10 ++++++---- + pcsd/views/manage.erb | 15 ++++++++++----- + 4 files changed, 39 insertions(+), 24 deletions(-) + +diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb +index 7216626..4ffcd4b 100644 +--- a/pcsd/cluster_entity.rb ++++ b/pcsd/cluster_entity.rb +@@ -333,10 +333,6 @@ module ClusterEntity + :val => 6, + :str => 'unknown' + }, +- :unmanaged => { +- :val => 7, +- :str => 'unmanaged' +- }, + } + + def initialize(status=:unknown) +@@ -536,11 +532,8 @@ module ClusterEntity + def get_status + running = 0 + failed = 0 +- unmanaged = 0 + @crm_status.each do |s| +- if !s.managed +- unmanaged += 1 +- elsif s.active ++ if s.active + running += 1 + elsif s.failed + failed += 1 +@@ -549,8 +542,6 @@ module ClusterEntity + + if disabled? + status = ClusterEntity::ResourceStatus.new(:disabled) +- elsif unmanaged >0 +- status = ClusterEntity::ResourceStatus.new(:unmanaged) + elsif running > 0 + status = ClusterEntity::ResourceStatus.new(:running) + elsif failed > 0 or @error_list.length > 0 +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index f176c39..c650fe6 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -851,7 +851,9 @@ Pcs.ResourceObj = Ember.Object.extend({ + }.property("class_type"), + res_type: Ember.computed.alias('resource_type'), + status_icon: function() { +- var icon_class = get_status_icon_class(this.get("status_val")); ++ var icon_class = get_status_icon_class( ++ this.get("status_val"), this.get("is_unmanaged") ++ ); + return "<div style=\"float:left;margin-right:6px;height:16px;\" class=\"" + icon_class + " sprites\"></div>"; + }.property("status_val"), + status_val: function() { +@@ -867,19 +869,23 @@ Pcs.ResourceObj = Ember.Object.extend({ + } + }.property('status', 'error_list.@each.message', 'warning_list.@each.message'), + status_color: function() { +- return get_status_color(this.get("status_val")); ++ return get_status_color(this.get("status_val"), this.get("is_unmanaged")); + }.property("status_val"), + status_style: function() { +- var color = get_status_color(this.get("status_val")); ++ var color = get_status_color( ++ this.get("status_val"), this.get("is_unmanaged") ++ ); + return "color: " + color + ((color != "green")? "; font-weight: bold;" : ""); + }.property("status_val"), + show_status: function() { +- return '<span style="' + this.get('status_style') + '">' + this.get('status') + '</span>'; ++ return '<span style="' + this.get('status_style') + '">' ++ + this.get('status') + (this.get("is_unmanaged") ? " (unmanaged)" : "") ++ + '</span>'; + }.property("status_style", "disabled"), + status_class: function() { + if ( + this.get("status_val") == get_status_value("ok") || +- ["disabled", "unmanaged"].indexOf(this.get("status")) != -1 ++ this.get("status") == "disabled" + ) { + return ( + Pcs.clusterController.get("show_all_resources") ? "" : "hidden " +@@ -1003,6 +1009,17 @@ Pcs.PrimitiveObj = Pcs.ResourceObj.extend({ + instance_status: [], + operations: [], + utilization: [], ++ is_unmanaged: function() { ++ var instance_status_list = this.get("instance_status"); ++ if (!instance_status_list) { ++ return false; ++ } ++ var is_managed = true; ++ $.each(instance_status_list, function(_, instance_status) { ++ is_managed = is_managed && instance_status.get("managed"); ++ }); ++ return !is_managed; ++ }.property("instance_status.@each.managed"), + resource_type: function() { + var agent = this.get("agentname"); + if (agent) { +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index 1060bd3..67a0bdb 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -1977,7 +1977,8 @@ function status_comparator(a,b) { + return valA - valB; + } + +-function get_status_icon_class(status_val) { ++function get_status_icon_class(status_val, is_unmanaged) { ++ var is_unmanaged = typeof is_unmanaged !== 'undefined' ? is_unmanaged : false; + switch (status_val) { + case get_status_value("error"): + return "error"; +@@ -1985,15 +1986,16 @@ function get_status_icon_class(status_val) { + case get_status_value("warning"): + return "warning"; + case get_status_value("ok"): +- return "check"; ++ return is_unmanaged ? "warning" : "check"; + default: + return "x"; + } + } + +-function get_status_color(status_val) { ++function get_status_color(status_val, is_unmanaged) { ++ var is_unmanaged = typeof is_unmanaged !== 'undefined' ? is_unmanaged : false; + if (status_val == get_status_value("ok")) { +- return "green"; ++ return is_unmanaged? "orange" : "green"; + } + else if (status_val == get_status_value("warning") || status_val == get_status_value("unknown") || status_val == get_status_value('disabled')) { + return "orange"; +diff --git a/pcsd/views/manage.erb b/pcsd/views/manage.erb +index 885b327..39ab41f 100644 +--- a/pcsd/views/manage.erb ++++ b/pcsd/views/manage.erb +@@ -113,13 +113,18 @@ + <td> + <table class="datatable"> + <tr> +- <th style="width: 150px;">RESOURCE</th> +- <th style="width: 100px;">STATUS</th> ++ <th style="width: 170px;">RESOURCE</th> ++ <th style="width: 150px;">STATUS</th> + </tr> + {{#each r in Pcs.clusterController.cur_cluster.resource_list}} + <tr {{bind-attr title=r.tooltip}} {{bind-attr class=r.status_class}}> + <td><a {{bind-attr href=r.url_link}}>{{r.id}}</a></td> +- <td {{bind-attr style=r.status_style}}>{{{r.status_icon}}}{{r.status}}</td> ++ <td {{bind-attr style=r.status_style}}> ++ {{{r.status_icon}}}{{r.status}} ++ {{#if r.is_unmanaged}} ++ (unmanaged) ++ {{/if}} ++ </td> + </tr> + {{else}} + <tr> +@@ -144,8 +149,8 @@ + <td> + <table class="datatable"> + <tr> +- <th style="width: 150px;">FENCE-DEVICE</th> +- <th style="width: 100px;">STATUS</th> ++ <th style="width: 170px;">FENCE-DEVICE</th> ++ <th style="width: 150px;">STATUS</th> + </tr> + {{#each f in Pcs.clusterController.cur_cluster.fence_list}} + <tr {{bind-attr title=f.tooltip}} {{bind-attr class=f.status_class_fence}}> +-- +1.8.3.1 + diff --git a/SOURCES/bz1265425-01-Fix-for-crm_node-l-output-change.patch b/SOURCES/bz1265425-01-Fix-for-crm_node-l-output-change.patch deleted file mode 100644 index 9b3a454..0000000 --- a/SOURCES/bz1265425-01-Fix-for-crm_node-l-output-change.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 2d28901bb2eac1329e935b0d7f8418a27c0b0067 Mon Sep 17 00:00:00 2001 -From: Chris Feist <cfeist@redhat.com> -Date: Tue, 22 Sep 2015 17:19:37 -0500 -Subject: [PATCH] Fix for crm_node -l output change - -- crm_node -l now outputs a status after the node id and node name we - now ignore lines where the 3rd field is "lost". ---- - pcs/utils.py | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/pcs/utils.py b/pcs/utils.py -index 0b8d03f..88362b3 100644 ---- a/pcs/utils.py -+++ b/pcs/utils.py -@@ -1706,8 +1706,9 @@ def getPacemakerNodesID(allow_failure=False): - - pm_nodes = {} - for line in output.rstrip().split("\n"): -- node_info = line.rstrip().split(" ",1) -- pm_nodes[node_info[0]] = node_info[1] -+ node_info = line.rstrip().split(" ") -+ if len(node_info) <= 2 or node_info[2] != "lost": -+ pm_nodes[node_info[0]] = node_info[1] - - return pm_nodes - --- -1.9.1 - diff --git a/SOURCES/bz1268801-Fixed-issue-with-resource-manage-not-removing-meta-a.patch b/SOURCES/bz1268801-Fixed-issue-with-resource-manage-not-removing-meta-a.patch deleted file mode 100644 index 72a7b03..0000000 --- a/SOURCES/bz1268801-Fixed-issue-with-resource-manage-not-removing-meta-a.patch +++ /dev/null @@ -1,33 +0,0 @@ -From fedadee0788fc4841bd4b2df03cabd35c57d0f2d Mon Sep 17 00:00:00 2001 -From: Chris Feist <cfeist@redhat.com> -Date: Tue, 6 Oct 2015 15:54:25 -0500 -Subject: [PATCH] Fixed issue with 'resource manage' not removing meta - attribute from clones or masters - ---- - pcs/resource.py | 8 +++- - pcs/test/test_resource.py | 108 ++++++++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 114 insertions(+), 2 deletions(-) - -diff --git a/pcs/resource.py b/pcs/resource.py -index 4c4b8ee..e50e20b 100644 ---- a/pcs/resource.py -+++ b/pcs/resource.py -@@ -2349,11 +2349,15 @@ def resource_manage(argv, set_managed): - if retval != 0: - utils.err("error attempting to unmanage resource: %s" % output) - else: -- xpath = "(//primitive|//group)[@id='"+resource+"']/meta_attributes/nvpair[@name='is-managed']" -+ # Remove the meta attribute from the id specified -+ xpath = "(//primitive|//group|//clone|//master)[@id='"+resource+"']/meta_attributes/nvpair[@name='is-managed']" -+ utils.run(["cibadmin", "-D", "--xpath", xpath]) -+ # Remove the meta attribute from the parent of the id specified, if the parent is a clone or master -+ xpath = "(//master|//clone)[primitive[contains(@id, '"+resource+"')]]/meta_attributes/nvpair[@name='is-managed']" - utils.run(["cibadmin", "-D", "--xpath", xpath]) - if isGroup: - for res in res_to_manage: -- xpath = "(//primitive|//group)[@id='"+res+"']/meta_attributes/nvpair[@name='is-managed']" -+ xpath = "(//primitive|//group|//clone|//master)[@id='"+res+"']/meta_attributes/nvpair[@name='is-managed']" - utils.run(["cibadmin", "-D", "--xpath", xpath]) - - def is_managed(resource_id): diff --git a/SOURCES/bz1268801-Fixes-for-managing-special-cases-of-unmanaged-resour-2.patch b/SOURCES/bz1268801-Fixes-for-managing-special-cases-of-unmanaged-resour-2.patch deleted file mode 100644 index 9577232..0000000 --- a/SOURCES/bz1268801-Fixes-for-managing-special-cases-of-unmanaged-resour-2.patch +++ /dev/null @@ -1,26 +0,0 @@ -From ecef0b8d8be8f20225351b5af4448104937b2fea Mon Sep 17 00:00:00 2001 -From: Chris Feist <cfeist@redhat.com> -Date: Wed, 7 Oct 2015 17:54:13 -0500 -Subject: [PATCH] Remove all is-managed meta attributes when managing a - resource - ---- - pcs/resource.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/pcs/resource.py b/pcs/resource.py -index 7f2d4c3..c1c5f50 100644 ---- a/pcs/resource.py -+++ b/pcs/resource.py -@@ -2351,7 +2351,7 @@ def resource_manage(argv, set_managed): - else: - # Remove the meta attribute from the id specified (and all children) - xpath = "(//primitive|//group|//clone|//master)[@id='"+resource+"']//meta_attributes/nvpair[@name='is-managed']" -- utils.run(["cibadmin", "-D", "--xpath", xpath]) -+ utils.run(["cibadmin", "-d", "--xpath", xpath, "--force"]) - # Remove the meta attribute from the parent of the id specified, if the parent is a clone or master - xpath = "(//master|//clone)[(group|primitive)[@id='"+resource+"']]/meta_attributes/nvpair[@name='is-managed']" - utils.run(["cibadmin", "-D", "--xpath", xpath]) --- -2.4.3 - diff --git a/SOURCES/bz1268801-Fixes-for-managing-special-cases-of-unmanaged-resour.patch b/SOURCES/bz1268801-Fixes-for-managing-special-cases-of-unmanaged-resour.patch deleted file mode 100644 index ccb6efd..0000000 --- a/SOURCES/bz1268801-Fixes-for-managing-special-cases-of-unmanaged-resour.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 82f9be59e55c5e5fa01242fb7da59e3610da3674 Mon Sep 17 00:00:00 2001 -From: Chris Feist <cfeist@redhat.com> -Date: Wed, 7 Oct 2015 09:11:43 -0500 -Subject: [PATCH] Fixes for managing special cases of unmanaged resources - ---- - pcs/resource.py | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/pcs/resource.py b/pcs/resource.py -index e50e20b..7f2d4c3 100644 ---- a/pcs/resource.py -+++ b/pcs/resource.py -@@ -2349,11 +2349,11 @@ def resource_manage(argv, set_managed): - if retval != 0: - utils.err("error attempting to unmanage resource: %s" % output) - else: -- # Remove the meta attribute from the id specified -- xpath = "(//primitive|//group|//clone|//master)[@id='"+resource+"']/meta_attributes/nvpair[@name='is-managed']" -+ # Remove the meta attribute from the id specified (and all children) -+ xpath = "(//primitive|//group|//clone|//master)[@id='"+resource+"']//meta_attributes/nvpair[@name='is-managed']" - utils.run(["cibadmin", "-D", "--xpath", xpath]) - # Remove the meta attribute from the parent of the id specified, if the parent is a clone or master -- xpath = "(//master|//clone)[primitive[contains(@id, '"+resource+"')]]/meta_attributes/nvpair[@name='is-managed']" -+ xpath = "(//master|//clone)[(group|primitive)[@id='"+resource+"']]/meta_attributes/nvpair[@name='is-managed']" - utils.run(["cibadmin", "-D", "--xpath", xpath]) - if isGroup: - for res in res_to_manage: --- -2.4.3 - diff --git a/SOURCES/bz1269242-01-fix-displaying-cluster-config-when-cib-is-provided-a.patch b/SOURCES/bz1269242-01-fix-displaying-cluster-config-when-cib-is-provided-a.patch new file mode 100644 index 0000000..c583575 --- /dev/null +++ b/SOURCES/bz1269242-01-fix-displaying-cluster-config-when-cib-is-provided-a.patch @@ -0,0 +1,87 @@ +From 8696f5e4f072ac88a3e20b1b376ea8de823f7aa7 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Fri, 8 Jul 2016 12:20:59 +0200 +Subject: [PATCH] fix displaying cluster config when cib is provided as a file + +--- + pcs/config.py | 11 +++++++++-- + pcs/status.py | 8 +++++++- + pcs/utils.py | 13 +++++++------ + 3 files changed, 23 insertions(+), 9 deletions(-) + +diff --git a/pcs/config.py b/pcs/config.py +index 4659c5b..3d86b39 100644 +--- a/pcs/config.py ++++ b/pcs/config.py +@@ -94,7 +94,14 @@ def config_show(argv): + status.nodes_status(["config"]) + print() + config_show_cib() +- cluster.cluster_uidgid([], True) ++ if ( ++ utils.is_rhel6() ++ or ++ (not utils.usefile and "--corosync_conf" not in utils.pcs_options) ++ ): ++ # with corosync 1 and cman, uid gid is part of cluster.conf file ++ # with corosync 2, uid gid is in a separate directory ++ cluster.cluster_uidgid([], True) + if "--corosync_conf" in utils.pcs_options or not utils.is_rhel6(): + print() + print("Quorum:") +@@ -113,8 +120,8 @@ def config_show_cib(): + print("Stonith Devices:") + resource.resource_show([], True) + print("Fencing Levels:") +- print() + stonith.stonith_level_show() ++ print() + + lib = utils.get_library_wrapper() + constraint.location_show([]) +diff --git a/pcs/status.py b/pcs/status.py +index 0e5e0e7..e1f367f 100644 +--- a/pcs/status.py ++++ b/pcs/status.py +@@ -66,7 +66,13 @@ def full_status(): + if utils.stonithCheck(): + print("WARNING: no stonith devices and stonith-enabled is not false") + +- if not utils.is_rhel6() and utils.corosyncPacemakerNodeCheck(): ++ if ( ++ not utils.usefile ++ and ++ not utils.is_rhel6() ++ and ++ utils.corosyncPacemakerNodeCheck() ++ ): + print("WARNING: corosync and pacemaker node names do not match (IPs used in setup?)") + + print(output) +diff --git a/pcs/utils.py b/pcs/utils.py +index 171fbdd..01db081 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -1808,12 +1808,13 @@ def stonithCheck(): + if p.attrib["class"] == "stonith": + return False + +- # check if SBD daemon is running +- try: +- if is_service_running(cmd_runner(), "sbd"): +- return False +- except LibraryError: +- pass ++ if not usefile: ++ # check if SBD daemon is running ++ try: ++ if is_service_running(cmd_runner(), "sbd"): ++ return False ++ except LibraryError: ++ pass + + return True + +-- +1.8.3.1 + diff --git a/SOURCES/bz1272412-01-fix-setting-cluster-properties-in-web-UI.patch b/SOURCES/bz1272412-01-fix-setting-cluster-properties-in-web-UI.patch deleted file mode 100644 index a2e4508..0000000 --- a/SOURCES/bz1272412-01-fix-setting-cluster-properties-in-web-UI.patch +++ /dev/null @@ -1,172 +0,0 @@ -From 35bb4addbc04e8a8dea26aa2099d852ce084ec14 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek <tojeline@redhat.com> -Date: Wed, 21 Oct 2015 10:46:17 +0200 -Subject: [PATCH] fix setting cluster properties in web UI - -- set the properties correctly even if it is not possible to load the - properties' current values -- do not depend on cluster being imported in pcsd when loading the - properties -- fix loading default values of cluster properties ---- - pcsd/pcsd.rb | 39 +++++++++++++++----------------- - pcsd/remote.rb | 11 ++++++--- - pcsd/settings.rb | 1 + - pcsd/settings.rb.i386-linux-gnu.debian | 1 + - pcsd/settings.rb.x86_64-linux-gnu.debian | 1 + - 5 files changed, 29 insertions(+), 24 deletions(-) - -diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb -index b7c2a49..c42abb8 100644 ---- a/pcsd/pcsd.rb -+++ b/pcsd/pcsd.rb -@@ -573,7 +573,7 @@ if not DISABLE_GUI - end - @resource_agents = get_resource_agents_avail(session) - @stonith_agents = get_stonith_agents_avail(session) -- @config_options = getConfigOptions2(session, @cluster_name) -+ @config_options = getConfigOptions2(session, @nodes) - - erb :nodes, :layout => :main - end -@@ -895,7 +895,7 @@ def getLocationDeps(session, cur_node) - [deps_allow, deps_disallow] - end - --def getConfigOptions2(session, cluster_name) -+def getConfigOptions2(session, cluster_nodes) - config_options = {} - general_page = [] - # general_page << ConfigOption.new("Cluster Delay Time", "cluster-delay", "int", 4, "Seconds") -@@ -933,7 +933,7 @@ If checked, the cluster will refuse to start resources unless one or more STONIT - allconfigoptions = [] - config_options.each { |i,k| k.each { |j| allconfigoptions << j } } - ConfigOption.getDefaultValues(allconfigoptions) -- ConfigOption.loadValues(session, allconfigoptions, cluster_name) -+ ConfigOption.loadValues(session, allconfigoptions, cluster_nodes) - return config_options - end - -@@ -1005,16 +1005,8 @@ class ConfigOption - @desc = desc - end - -- def self.loadValues(session, cos, cluster_name, node_list=nil) -- if node_list -- code, output = send_nodes_request_with_token( -- session, node_list, "get_cib" -- ) -- else -- code, output = send_cluster_request_with_token( -- session, cluster_name, "get_cib" -- ) -- end -+ def self.loadValues(session, cos, node_list) -+ code, output = send_nodes_request_with_token(session, node_list, "get_cib") - $logger.info(code) - if code != 200 - $logger.info "Error: unable to load cib" -@@ -1037,14 +1029,19 @@ class ConfigOption - end - - def self.getDefaultValues(cos) -- metadata = `#{PENGINE} metadata` -- doc = REXML::Document.new(metadata) -- -- cos.each { |co| -- doc.elements.each("resource-agent/parameters/parameter[@name='#{co.configname}']/content") { |e| -- co.default = e.attributes["default"] -- break -- } -+ [PENGINE, CIB_BINARY].each { |command| -+ metadata = `#{command} metadata` -+ begin -+ doc = REXML::Document.new(metadata) -+ cos.each { |co| -+ doc.elements.each("resource-agent/parameters/parameter[@name='#{co.configname}']/content") { |e| -+ co.default = e.attributes["default"] -+ break -+ } -+ } -+ rescue -+ $logger.error("Failed to parse #{command} metadata") -+ end - } - end - -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index e65c8ac..dc90fc9 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -1921,9 +1921,13 @@ def update_cluster_settings(params, request, session) - binary_settings = [] - changed_settings = [] - old_settings = {} -- getConfigOptions2(PCSAuth.getSuperuserSession(), $cluster_name).values().flatten().each { |opt| -+ getConfigOptions2( -+ PCSAuth.getSuperuserSession(), get_nodes().flatten() -+ ).values().flatten().each { |opt| -+ binary_settings << opt.configname if "check" == opt.type -+ # if we don't know current value of an option, consider it changed -+ next if opt.value.nil? - if "check" == opt.type -- binary_settings << opt.configname - old_settings[opt.configname] = is_cib_true(opt.value) - else - old_settings[opt.configname] = opt.value -@@ -1931,6 +1935,7 @@ def update_cluster_settings(params, request, session) - } - settings.each { |key, val| - new_val = binary_settings.include?(key) ? is_cib_true(val) : val -+ # if we don't know current value of an option, consider it changed - if (not old_settings.key?(key)) or (old_settings[key] != new_val) - changed_settings << key.downcase() - end -@@ -1940,7 +1945,7 @@ def update_cluster_settings(params, request, session) - return 403, 'Permission denied' - end - end -- if changed_settings.count { |x| x != 'enable-acl'} > 0 -+ if changed_settings.count { |x| x != 'enable-acl' } > 0 - if not allowed_for_local_cluster(session, Permissions::WRITE) - return 403, 'Permission denied' - end -diff --git a/pcsd/settings.rb b/pcsd/settings.rb -index 4cea800..ff056a4 100644 ---- a/pcsd/settings.rb -+++ b/pcsd/settings.rb -@@ -10,6 +10,7 @@ HEARTBEAT_AGENTS_DIR = "/usr/lib/ocf/resource.d/heartbeat/" - PACEMAKER_AGENTS_DIR = "/usr/lib/ocf/resource.d/pacemaker/" - NAGIOS_METADATA_DIR = '/usr/share/pacemaker/nagios/plugins-metadata/' - PENGINE = "/usr/libexec/pacemaker/pengine" -+CIB_BINARY = '/usr/libexec/pacemaker/cib' - CRM_MON = "/usr/sbin/crm_mon" - CRM_NODE = "/usr/sbin/crm_node" - CRM_ATTRIBUTE = "/usr/sbin/crm_attribute" -diff --git a/pcsd/settings.rb.i386-linux-gnu.debian b/pcsd/settings.rb.i386-linux-gnu.debian -index 6366651..4db23e4 100644 ---- a/pcsd/settings.rb.i386-linux-gnu.debian -+++ b/pcsd/settings.rb.i386-linux-gnu.debian -@@ -7,6 +7,7 @@ OCF_ROOT = "/usr/lib/ocf" - HEARTBEAT_AGENTS_DIR = "/usr/lib/ocf/resource.d/heartbeat/" - PACEMAKER_AGENTS_DIR = "/usr/lib/ocf/resource.d/pacemaker/" - PENGINE = "/usr/lib/i386-linux-gnu/pacemaker/pengine" -+CIB_BINARY = '/usr/lib/i386-linux-gnu/pacemaker/cib' - CRM_NODE = "/usr/sbin/crm_node" - CRM_ATTRIBUTE = "/usr/sbin/crm_attribute" - COROSYNC_BINARIES = "/usr/sbin/" -diff --git a/pcsd/settings.rb.x86_64-linux-gnu.debian b/pcsd/settings.rb.x86_64-linux-gnu.debian -index 23a71ab..3f6d5c0 100644 ---- a/pcsd/settings.rb.x86_64-linux-gnu.debian -+++ b/pcsd/settings.rb.x86_64-linux-gnu.debian -@@ -7,6 +7,7 @@ OCF_ROOT = "/usr/lib/ocf" - HEARTBEAT_AGENTS_DIR = "/usr/lib/ocf/resource.d/heartbeat/" - PACEMAKER_AGENTS_DIR = "/usr/lib/ocf/resource.d/pacemaker/" - PENGINE = "/usr/lib/x86_64-linux-gnu/pacemaker/pengine" -+CIB_BINARY = '/usr/lib/x86_64-linux-gnu/pacemaker/cib' - CRM_NODE = "/usr/sbin/crm_node" - CRM_ATTRIBUTE = "/usr/sbin/crm_attribute" - COROSYNC_BINARIES = "/usr/sbin/" --- -1.9.1 - diff --git a/SOURCES/bz1281364-01-gui-add-constraint-colocation-set-support.patch b/SOURCES/bz1281364-01-gui-add-constraint-colocation-set-support.patch new file mode 100644 index 0000000..cf9130e --- /dev/null +++ b/SOURCES/bz1281364-01-gui-add-constraint-colocation-set-support.patch @@ -0,0 +1,190 @@ +From aeb87c63c2f37bdc241b2c9add7cf0e9be9d7789 Mon Sep 17 00:00:00 2001 +From: Marek Grac <mgrac@redhat.com> +Date: Thu, 7 Jul 2016 14:05:14 +0200 +Subject: [PATCH] gui: add constraint colocation set support + +--- + pcsd/pcs.rb | 19 ++++++++++++++++++- + pcsd/public/js/nodes-ember.js | 9 +++++++++ + pcsd/public/js/pcsd.js | 1 + + pcsd/remote.rb | 5 +++++ + pcsd/views/main.erb | 44 +++++++++++++++++++++++++++++++++++++++++++ + 5 files changed, 77 insertions(+), 1 deletion(-) + +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index 7c25e10..57082be 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -131,6 +131,19 @@ def add_order_set_constraint( + return retval, stderr.join(' ') + end + ++def add_colocation_set_constraint( ++ auth_user, resource_set_list, force=false, autocorrect=true ++) ++ command = [PCS, "constraint", "colocation"] ++ resource_set_list.each { |resource_set| ++ command << "set" ++ command.concat(resource_set) ++ } ++ command << '--force' if force ++ command << '--autocorrect' if autocorrect ++ stdout, stderr, retval = run_cmd(auth_user, *command) ++ return retval, stderr.join(' ') ++end + + def add_ticket_constraint( + auth_user, ticket, resource_id, role, loss_policy, +@@ -1681,7 +1694,11 @@ def get_node_status(auth_user, cib_dom) + :node_attr => node_attrs_to_v2(get_node_attributes(auth_user, cib_dom)), + :nodes_utilization => get_nodes_utilization(cib_dom), + :known_nodes => [], +- :available_features => ['sbd', 'ticket_constraints'] ++ :available_features => [ ++ 'constraint_colocation_set', ++ 'sbd', ++ 'ticket_constraints', ++ ] + } + + nodes = get_nodes_status() +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index bf1bb92..cb62806 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -47,6 +47,11 @@ Pcs = Ember.Application.createWithMixins({ + this.get("available_features").indexOf("ticket_constraints") != -1 + ); + }.property("available_features"), ++ is_supported_constraint_colocation_set: function() { ++ return ( ++ this.get("available_features").indexOf("constraint_colocation_set") != -1 ++ ); ++ }.property("available_features"), + is_sbd_running: false, + is_sbd_enabled: false, + is_sbd_enabled_or_running: function() { +@@ -767,6 +772,7 @@ Pcs.ResourceObj = Ember.Object.extend({ + ordering_constraints: [], + ordering_set_constraints: [], + colocation_constraints: [], ++ colocation_set_constraints: [], + + get_map: function() { + var self = this; +@@ -2381,6 +2387,7 @@ function constraint_resort(constraints){ + ordering_constraints: {}, + ordering_set_constraints: {}, + colocation_constraints: {}, ++ colocation_set_constraints: {}, + }; + } + +@@ -2391,6 +2398,7 @@ function constraint_resort(constraints){ + + var colocations = constraint_resort_part(constraints.rsc_colocation, { + plain: constraint_colocation_create_resource_keyed_map, ++ with_sets: constraint_set_create_resource_keyed_map, + }); + + var locations = constraint_resort_part(constraints.rsc_location, { +@@ -2409,5 +2417,6 @@ function constraint_resort(constraints){ + ticket_constraints: tickets.plain, + ticket_set_constraints: tickets.with_sets, + colocation_constraints: colocations.plain, ++ colocation_set_constraints: colocations.with_sets, + }; + } +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index 41c481e..6c88888 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -2046,6 +2046,7 @@ function auto_show_hide_constraints() { + "ordering_constraints", + "ordering_set_constraints", + "colocation_constraints", ++ "colocation_set_constraints", + "ticket_constraints", + "ticket_set_constraints", + "meta_attributes", +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index b1e00fa..75c9465 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -1821,6 +1821,11 @@ def add_constraint_set_remote(params, request, auth_user) + auth_user, + params["resources"].values, params["force"], !params['disable_autocorrect'] + ) ++ when "col" ++ retval, error = add_colocation_set_constraint( ++ auth_user, ++ params["resources"].values, params["force"], !params['disable_autocorrect'] ++ ) + when "ticket" + unless params["options"]["ticket"] + return [400, "Error adding constraint ticket: option ticket missing"] +diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb +index 5461515..52c1900 100644 +--- a/pcsd/views/main.erb ++++ b/pcsd/views/main.erb +@@ -291,6 +291,9 @@ + {{ordering_constraints-table constraints=resource.ordering_constraints resource_id=resource._id}} + {{ordering_set_constraints-table constraints=resource.ordering_set_constraints}} + {{colocation_constraints-table constraints=resource.colocation_constraints}} ++ {{#if Pcs.is_supported_constraint_colocation_set}} ++ {{colocation_set_constraints-table constraints=resource.colocation_set_constraints}} ++ {{/if}} + {{#if Pcs.is_ticket_constraints_supported}} + {{ticket_constraints-table constraints=resource.ticket_constraints resource_id=resource._id}} + {{ticket_set_constraints-table constraints=resource.ticket_set_constraints}} +@@ -696,6 +699,47 @@ Use the 'Add' button to submit the form."> + </table> + </script> + ++ <script type="text/x-handlebars" data-template-name="components/colocation_set_constraints-table"> ++ <table style="clear:left;float:left;"> ++ <tr><td style="display: block;" onclick="show_hide_constraints(this)" class="datatable_header hover-pointer" id="colocation_set_constraints"><span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span>Resource Colocation Set Preferences ({{#if constraints.length}}{{constraints.length}}{{else}}0{{/if}})</td></tr> ++ <tr><td> ++ <div id="locationdep"> ++ <table class="datatable"> ++ <tr> ++ <th>Preference Name/Set of Resources</th> ++ <th style="text-align: center;">Remove</th> ++ </tr> ++ {{#each cons in constraints}} ++ <tr> ++ <td>{{cons.id}}</td> ++ <td {{bind-attr constraint_id="cons.id"}} style="text-align:center;"> ++ <a onclick="return remove_constraint_action(this);" href="#" class="remove">X</a> ++ </td> ++ </tr> ++ {{#each set in cons.sets}} ++ <tr> ++ <td style="padding-left:2em;">Set:{{#each rsc in set.resources}} {{rsc}}{{/each}}</td> ++ <td></td> ++ </tr> ++ {{/each}} ++ {{else}} ++ <tr><td style="color: gray;">NONE</td><td></td></tr> ++ {{/each}} ++ <tr id="new_res_col_set" title="Enter the resources you want to be in one set into the 'Set' field separated by space. ++Use the 'New Set' button to create more sets. ++Use the 'Add' button to submit the form."> ++ <td>Set: <input type="text" name="resource_ids[]"></td> ++ <td style="vertical-align: bottom;"> ++ <button type="button" onclick="new_constraint_set_row('#new_res_col_set');" name="new-row">New Set</button> ++ <button type="button" onclick="add_constraint_set('#new_res_col_set', 'col', false);" name="add">Add</button> ++ </td> ++ </tr> ++ </table> ++ </div> ++ </td></tr> ++ </table> ++ </script> ++ + <script type="text/x-handlebars" data-template-name="components/meta_attributes-table"> + <table style="clear:left;float:left"> + <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer" id="meta_attributes"><span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span>Resource Meta Attributes ({{#if resource.meta_attr.length}}{{resource.meta_attr.length}}{{else}}0{{/if}})</td></tr> +-- +1.8.3.1 + diff --git a/SOURCES/bz1281391-01-web-UI-add-possibility-to-change-order-of-resources-.patch b/SOURCES/bz1281391-01-web-UI-add-possibility-to-change-order-of-resources-.patch new file mode 100644 index 0000000..8613126 --- /dev/null +++ b/SOURCES/bz1281391-01-web-UI-add-possibility-to-change-order-of-resources-.patch @@ -0,0 +1,600 @@ +From 0a96fde9b1d691268948091442c2f0075e81ab95 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular <omular@redhat.com> +Date: Thu, 28 Jul 2016 15:21:18 +0200 +Subject: [PATCH] web UI: add possibility to change order of resources in group + +--- + pcsd/pcs.rb | 1 + + pcsd/public/css/style.css | 10 +++ + pcsd/public/js/nodes-ember.js | 167 ++++++++++++++++++++++++++++++++++++++---- + pcsd/public/js/pcsd.js | 117 +++++++++++++++++------------ + pcsd/remote.rb | 47 ++++++++---- + pcsd/views/_dialogs.erb | 21 ++++++ + pcsd/views/_resource.erb | 6 -- + pcsd/views/main.erb | 51 +++++++++++-- + 8 files changed, 334 insertions(+), 86 deletions(-) + +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index ad54a75..1eb9e9e 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -1702,6 +1702,7 @@ def get_node_status(auth_user, cib_dom) + 'constraint_colocation_set', + 'sbd', + 'ticket_constraints', ++ 'moving_resource_in_group', + ] + } + +diff --git a/pcsd/public/css/style.css b/pcsd/public/css/style.css +index d41b164..0d744d5 100644 +--- a/pcsd/public/css/style.css ++++ b/pcsd/public/css/style.css +@@ -848,3 +848,13 @@ table.args-table td.reg { + .constraint-ticket-add-attribute { + vertical-align: top; + } ++ ++.cursor-move { ++ cursor: move; ++} ++ ++.sortable-table td { ++ height: 1.5em; ++ line-height: 1.2em; ++ background: black; ++} +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index efc0192..3d4fe79 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -52,6 +52,11 @@ Pcs = Ember.Application.createWithMixins({ + this.get("available_features").indexOf("constraint_colocation_set") != -1 + ); + }.property("available_features"), ++ is_supported_moving_resource_in_group: function() { ++ return ( ++ this.get("available_features").indexOf("moving_resource_in_group") != -1 ++ ); ++ }.property("available_features"), + is_sbd_running: false, + is_sbd_enabled: false, + is_sbd_enabled_or_running: function() { +@@ -245,6 +250,154 @@ Pcs = Ember.Application.createWithMixins({ + } + }); + ++Pcs.GroupSelectorComponent = Ember.Component.extend({ ++ resource_id: null, ++ resource: function() { ++ var id = this.get("resource_id"); ++ if (id) { ++ var resource = Pcs.resourcesContainer.get_resource_by_id(id); ++ if (resource) { ++ return resource; ++ } ++ } ++ return null; ++ }.property("resource_id"), ++ resource_change: function() { ++ this._refresh_fn(); ++ this._update_resource_select_content(); ++ this._update_resource_select_value(); ++ }.observes("resource", "resource_id"), ++ group_list: [], ++ group_select_content: function() { ++ var list = []; ++ $.each(this.get("group_list"), function(_, group) { ++ list.push({ ++ name: group, ++ value: group ++ }); ++ }); ++ return list; ++ }.property("group_list"), ++ group_select_value: null, ++ group: function() { ++ var id = this.get("group_select_value"); ++ if (id) { ++ var group = Pcs.resourcesContainer.get_resource_by_id(id); ++ if (group) { ++ return group; ++ } ++ } ++ return null; ++ }.property("group_select_value"), ++ position_select_content: [ ++ { ++ name: "before", ++ value: "before" ++ }, ++ { ++ name: "after", ++ value: "after" ++ } ++ ], ++ position_select_value: null, ++ position_select_value_changed: function() { ++ }.observes("position_select_value"), ++ resource_select_content: [], ++ resource_select_value: null, ++ group_select_value_changed: function () { ++ this._update_resource_select_content(); ++ this._update_resource_select_value(); ++ }.observes("group_select_value"), ++ actions: { ++ refresh: function() { ++ this.set("group_list", Pcs.resourcesContainer.get("group_list")); ++ this._refresh_fn(); ++ this._update_resource_select_content(); ++ this._update_resource_select_value(); ++ } ++ }, ++ _refresh_fn: function() { ++ var id = this.get("resource_id"); ++ if (id) { ++ var resource = Pcs.resourcesContainer.get_resource_by_id(id); ++ if (resource) { ++ var parent = resource.get("parent"); ++ if (parent && parent.get("is_group")) { ++ this.set("group_select_value", parent.get("id")); ++ return; ++ } ++ } ++ } ++ this.set("group_select_value", null); ++ }, ++ _update_resource_select_content: function() { ++ var self = this; ++ var group = self.get("group"); ++ if (!group) { ++ self.set("resource_select_content", []); ++ return; ++ } ++ var list = []; ++ var resource_id; ++ $.each(group.get("members"), function(_, resource) { ++ resource_id = resource.get("id"); ++ if (resource_id != self.get("resource_id")) { ++ list.push({ ++ name: resource_id, ++ value: resource_id ++ }); ++ } ++ }); ++ self.set("resource_select_content", list); ++ }, ++ _update_resource_select_value: function() { ++ var self = this; ++ var group = self.get("group"); ++ var resource = self.get("resource"); ++ if (!group) { ++ self.set("resource_select_value", null); ++ return; ++ } ++ var resource_list = group.get("members"); ++ if ( ++ !resource || ++ !resource.get("parent") || ++ resource.get("parent").get("id") != group.get("id") ++ ) { ++ self.set("position_select_value", "after"); ++ self.set("resource_select_value", resource_list.slice(-1)[0].get("id")); ++ } else { ++ var index = resource_list.findIndex(function(item) { ++ return item.get("id") == resource.get("id"); ++ }); ++ if (index == 0) { ++ self.set("position_select_value", "before"); ++ self.set( ++ "resource_select_value", ++ (resource_list[1]) ? resource_list[1].get("id") : null // second ++ ); ++ } else if (index == -1) { ++ self.set("position_select_value", "after"); ++ self.set("resource_select_value", resource_list.slice(-1)[0].get("id")); ++ } else { ++ self.set("position_select_value", "after"); ++ self.set("resource_select_value", resource_list[index-1].get("id")); ++ } ++ } ++ }, ++ group_input_name: "group_id", ++ classNames: "group-selector", ++ init: function() { ++ this._super(); ++ if (this.get("resource_id")) { ++ this.set("group_list", Pcs.resourcesContainer.get("group_list")); ++ } ++ this._refresh_fn(); ++ this._update_resource_select_content(); ++ this._update_resource_select_value(); ++ } ++}); ++ + Pcs.ValueSelectorComponent = Ember.Component.extend({ + tagName: 'select', + attributeBindings: ['name'], +@@ -682,20 +835,6 @@ Pcs.ResourceObj = Ember.Object.extend({ + } + return null; + }.property('parent'), +- group_selector: function() { +- var self = this; +- var cur_group = self.get('get_group_id'); +- var html = '<select>\n<option value="">None</option>\n'; +- $.each(self.get('group_list'), function(_, group) { +- html += '<option value="' + group + '"'; +- if (cur_group === group) { +- html += 'selected'; +- } +- html += '>' + group + '</option>\n'; +- }); +- html += '</select><input type="button" value="Change group" onclick="resource_change_group(curResource(), $(this).prev().prop(\'value\'));">'; +- return html; +- }.property('group_list', 'get_group_id'), + status: "unknown", + class_type: null, // property to determine type of the resource + resource_type: function() { // this property is just for displaying resource type in GUI +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index a646bed..82187ef 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -96,50 +96,77 @@ function select_menu(menu, item, initial) { + } + + function create_group() { +- var num_nodes = 0; +- var node_names = ""; +- $("#resource_list :checked").parent().parent().each(function (index,element) { +- if (element.getAttribute("nodeID")) { +- num_nodes++; +- node_names += element.getAttribute("nodeID") + " " +- } +- }); +- +- if (num_nodes == 0) { ++ var resource_list = get_checked_ids_from_nodelist("resource_list"); ++ if (resource_list.length == 0) { + alert("You must select at least one resource to add to a group"); + return; + } +- +- $("#resources_to_add_to_group").val(node_names); ++ var not_primitives = resource_list.filter(function(resource_id) { ++ return !Pcs.resourcesContainer.get_resource_by_id(resource_id).get( ++ "is_primitive" ++ ); ++ }); ++ if (not_primitives.length != 0) { ++ alert("Members of group have to be primitive resources. These resources" + ++ " are not primitives: " + not_primitives.join(", ")); ++ return; ++ } ++ var order_el = $("#new_group_resource_list tbody"); ++ order_el.empty(); ++ order_el.append(resource_list.map(function (item) { ++ return `<tr value="${item}" class="cursor-move"><td>${item}</td></tr>`; ++ })); ++ var order_obj = order_el.sortable(); ++ order_el.disableSelection(); + $("#add_group").dialog({ + title: 'Create Group', ++ width: 'auto', + modal: true, + resizable: false, +- buttons: { +- Cancel: function() { +- $(this).dialog("close"); ++ buttons: [ ++ { ++ text: "Cancel", ++ click: function() { ++ $(this).dialog("close"); ++ } + }, +- "Create Group": function() { +- var data = $('#add_group > form').serialize(); +- var url = get_cluster_remote_url() + "add_group"; +- ajax_wrapper({ +- type: "POST", +- url: url, +- data: data, +- success: function() { +- Pcs.update(); +- $("#add_group").dialog("close"); +- }, +- error: function (xhr, status, error) { +- alert( +- "Error creating group " +- + ajax_simple_error(xhr, status, error) +- ); +- $("#add_group").dialog("close"); +- } +- }); ++ { ++ text: "Create Group", ++ id: "add_group_submit_btn", ++ click: function() { ++ var dialog_obj = $(this); ++ var submit_btn_obj = dialog_obj.parent().find( ++ "#add_group_submit_btn" ++ ); ++ submit_btn_obj.button("option", "disabled", true); ++ ++ ajax_wrapper({ ++ type: "POST", ++ url: get_cluster_remote_url() + "add_group", ++ data: { ++ resource_group: $( ++ '#add_group:visible input[name=resource_group]' ++ ).val(), ++ resources: order_obj.sortable( ++ "toArray", {attribute: "value"} ++ ).join(" ") ++ }, ++ success: function() { ++ submit_btn_obj.button("option", "disabled", false); ++ Pcs.update(); ++ dialog_obj.dialog("close"); ++ }, ++ error: function (xhr, status, error) { ++ alert( ++ "Error creating group " ++ + ajax_simple_error(xhr, status, error) ++ ); ++ submit_btn_obj.button("option", "disabled", false); ++ } ++ }); ++ } + } +- } ++ ] + }); + } + +@@ -2257,24 +2284,24 @@ function resource_ungroup(group_id) { + }); + } + +-function resource_change_group(resource_id, group_id) { ++function resource_change_group(resource_id, form) { + if (resource_id == null) { + return; + } + show_loading_screen(); + var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id); + var data = { +- resource_id: resource_id, +- group_id: group_id ++ resource_id: resource_id + }; ++ $.each($(form).serializeArray(), function(_, item) { ++ data[item.name] = item.value; ++ }); + +- if (resource_obj.get('parent')) { +- if (resource_obj.get('parent').get('id') == group_id) { +- return; +- } +- if (resource_obj.get('parent').get('class_type') == 'group') { +- data['old_group_id'] = resource_obj.get('parent').get('id'); +- } ++ if ( ++ resource_obj.get('parent') && ++ resource_obj.get('parent').get('class_type') == 'group' ++ ) { ++ data['old_group_id'] = resource_obj.get('parent').get('id'); + } + + ajax_wrapper({ +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index 05a6d03..4844adf 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -1415,21 +1415,23 @@ def update_resource (params, request, auth_user) + + param_line = getParamList(params) + if not params[:resource_id] +- out, stderr, retval = run_cmd( +- auth_user, +- PCS, "resource", "create", params[:name], params[:resource_type], +- *param_line +- ) +- if retval != 0 +- return JSON.generate({"error" => "true", "stderr" => stderr, "stdout" => out}) +- end ++ cmd = [PCS, "resource", "create", params[:name], params[:resource_type]] ++ cmd += param_line + if params[:resource_group] and params[:resource_group] != "" +- run_cmd( +- auth_user, +- PCS, "resource","group", "add", params[:resource_group], params[:name] ++ cmd += ['--group', params[:resource_group]] ++ if ( ++ ['before', 'after'].include?(params[:in_group_position]) and ++ params[:in_group_reference_resource_id] + ) ++ cmd << "--#{params[:in_group_position]}" ++ cmd << params[:in_group_reference_resource_id] ++ end + resource_group = params[:resource_group] + end ++ out, stderr, retval = run_cmd(auth_user, *cmd) ++ if retval != 0 ++ return JSON.generate({"error" => "true", "stderr" => stderr, "stdout" => out}) ++ end + + if params[:resource_clone] and params[:resource_clone] != "" + name = resource_group ? resource_group : params[:name] +@@ -1461,10 +1463,18 @@ def update_resource (params, request, auth_user) + ) + end + else +- run_cmd( +- auth_user, PCS, "resource", "group", "add", params[:resource_group], ++ cmd = [ ++ PCS, "resource", "group", "add", params[:resource_group], + params[:resource_id] ++ ] ++ if ( ++ ['before', 'after'].include?(params[:in_group_position]) and ++ params[:in_group_reference_resource_id] + ) ++ cmd << "--#{params[:in_group_position]}" ++ cmd << params[:in_group_reference_resource_id] ++ end ++ run_cmd(auth_user, *cmd) + end + end + +@@ -2098,10 +2108,17 @@ def resource_change_group(params, request, auth_user) + end + return 200 + end +- _, stderr, retval = run_cmd( +- auth_user, ++ cmd = [ + PCS, 'resource', 'group', 'add', params[:group_id], params[:resource_id] ++ ] ++ if ( ++ ['before', 'after'].include?(params[:in_group_position]) and ++ params[:in_group_reference_resource_id] + ) ++ cmd << "--#{params[:in_group_position]}" ++ cmd << params[:in_group_reference_resource_id] ++ end ++ _, stderr, retval = run_cmd(auth_user, *cmd) + if retval != 0 + return [400, "Unable to add resource '#{params[:resource_id]}' to " + + "group '#{params[:group_id]}': #{stderr.join('')}" +diff --git a/pcsd/views/_dialogs.erb b/pcsd/views/_dialogs.erb +index 46e7fdb..d18ac71 100644 +--- a/pcsd/views/_dialogs.erb ++++ b/pcsd/views/_dialogs.erb +@@ -215,3 +215,24 @@ + </table> + {{/if}} + </div> ++ ++<div id="add_group" style="display: none;"> ++ <form method=POST onkeypress="if (event.keyCode == 13) {$(this).parent().parent().find('.ui-dialog-buttonpane button:eq(1)').trigger('click');return false;} " action="/resource_group_add"> ++ <table> ++ <tr> ++ <td>Group Name:</td> ++ <td> ++ <input name="resource_group" type="text" /> ++ </td> ++ </tr> ++ <tr> ++ <td style="vertical-align: top;">Change order of resources:</td> ++ <td> ++ <table id="new_group_resource_list" class="sortable-table"> ++ <tbody></tbody> ++ </table> ++ </td> ++ </tr> ++ </table> ++ </form> ++</div> +diff --git a/pcsd/views/_resource.erb b/pcsd/views/_resource.erb +index a337160..ad2251c 100644 +--- a/pcsd/views/_resource.erb ++++ b/pcsd/views/_resource.erb +@@ -116,10 +116,4 @@ + table_id_suffix="_new" + }} + </div> +- <div id="add_group" style="display: none;"> +- <form method=POST onkeypress="if (event.keyCode == 13) {$(this).parent().parent().find('.ui-dialog-buttonpane button:eq(1)').trigger('click');return false;} " action="/resource_group_add"> +- <p style="font-size:12px;">Group Name:</p><input name="resource_group" type=text> +- <input id="resources_to_add_to_group" type=hidden name="resources" value=""> +- </form> +- </div> + <% end %> +diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb +index 52c1900..1b21f92 100644 +--- a/pcsd/views/main.erb ++++ b/pcsd/views/main.erb +@@ -237,7 +237,7 @@ + <tr> + <td class="bold" nowrap>Group:</td> + <td id="cur_res_loc" class="reg"> +- {{{resource.group_selector}}} ++ {{group-selector resource_id=resource._id}} + </td> + </tr> + {{else}} +@@ -245,7 +245,7 @@ + <tr> + <td class="bold" nowrap>Group:</td> + <td id="cur_res_loc" class="reg"> +- {{{resource.group_selector}}} ++ {{group-selector resource_id=resource._id}} + </td> + </tr> + {{/if}} +@@ -909,10 +909,9 @@ Use the 'Add' button to submit the form."> + </div> + </td> + <td> +- {{value-selector +- prompt="None" +- content=groups +- name="resource_group" ++ {{group-selector ++ group_list=Pcs.resourcesContainer.group_list ++ group_input_name="resource_group" + }} + </td> + </tr> +@@ -1095,6 +1094,46 @@ Use the 'Add' button to submit the form."> + </td> + </script> + ++ <script type="text/x-handlebars" data-template-name="components/group-selector"> ++ {{value-selector ++ name=group_input_name ++ content=group_select_content ++ value=group_select_value ++ prompt="None" ++ }} ++ {{#if Pcs.is_supported_moving_resource_in_group}} ++ {{#if group_select_value}} ++ {{#if resource_select_content}} ++ {{value-selector ++ name="in_group_position" ++ content=position_select_content ++ value=position_select_value ++ prompt="" ++ }} ++ {{value-selector ++ name="in_group_reference_resource_id" ++ content=resource_select_content ++ value=resource_select_value ++ prompt="" ++ }} ++ {{/if}} ++ {{/if}} ++ {{/if}} ++ {{#if resource_id}} ++ <br/> ++ <button ++ onclick=" ++ resource_change_group(curResource(), $(this).parent().find('select')); ++ return false; ++ " ++ > ++ Update group ++ </button> ++ <button {{action refresh}}>Refresh</button> ++ {{/if}} ++ ++ </script> ++ + <script type="text/x-handlebars"> + <div id="wrapper"> + +-- +1.8.3.1 + diff --git a/SOURCES/bz1281391-02-web-ui-reset-selected-group-when-displaying-new-resource-dialog.patch b/SOURCES/bz1281391-02-web-ui-reset-selected-group-when-displaying-new-resource-dialog.patch new file mode 100644 index 0000000..df08815 --- /dev/null +++ b/SOURCES/bz1281391-02-web-ui-reset-selected-group-when-displaying-new-resource-dialog.patch @@ -0,0 +1,48 @@ +From d0731ed0ccbcb24e2bc080dba6ba05a8eb0eecc4 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular <omular@redhat.com> +Date: Tue, 13 Sep 2016 09:02:22 +0200 +Subject: [PATCH] web UI: reset selected group when displaying new resource + dialog + +--- + pcsd/public/js/pcsd.js | 13 +++++++++++++ + pcsd/views/_resource.erb | 2 +- + 2 files changed, 14 insertions(+), 1 deletion(-) + +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index 67a0bdb..371b76b 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -3046,3 +3046,16 @@ function manage_resource(resource_id) { + } + }); + } ++ ++function show_add_resource_dialog() { ++ var new_resource_group_selector_id = $( ++ "#new_resource_agent .group-selector" ++ ).attr("id"); ++ Ember.View.views[new_resource_group_selector_id].set( ++ "group_select_value", null ++ ); ++ $('#new_resource_agent').dialog({ ++ title: 'Add Resource', ++ modal:true, width: 'auto' ++ }); ++} +diff --git a/pcsd/views/_resource.erb b/pcsd/views/_resource.erb +index ad2251c..86e5567 100644 +--- a/pcsd/views/_resource.erb ++++ b/pcsd/views/_resource.erb +@@ -14,7 +14,7 @@ + Remove</a> </div> + <div class="plus sprites"></div><div class="link"> + <% if @myView == "resource" %> +- <a href="#" onclick="$('#new_resource_agent').dialog({title: 'Add Resource', modal:true, width: 'auto'});return false;"> ++ <a href="#" onclick="show_add_resource_dialog();return false;"> + <% else %> + <a href="#" onclick="$('#new_stonith_agent').dialog({title: 'Add Fence Device', modal:true, width: 'auto'});return false;"> + <% end %> +-- +1.8.3.1 + diff --git a/SOURCES/bz1289418-01-fixes-for-pcs-cli-running-on-a-remote-node.patch b/SOURCES/bz1289418-01-fixes-for-pcs-cli-running-on-a-remote-node.patch new file mode 100644 index 0000000..f0b86f4 --- /dev/null +++ b/SOURCES/bz1289418-01-fixes-for-pcs-cli-running-on-a-remote-node.patch @@ -0,0 +1,554 @@ +From 5d8bab038a7aa64c38b79e5de9579af4c73e70a2 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Thu, 14 Jul 2016 17:04:04 +0200 +Subject: [PATCH] fixes for pcs cli running on a remote node + +--- + pcs/acl.py | 2 +- + pcs/cluster.py | 13 ++++++- + pcs/config.py | 37 +++++++++++++----- + pcs/constraint.py | 2 +- + pcs/prop.py | 16 +------- + pcs/quorum.py | 3 +- + pcs/status.py | 29 +++++--------- + pcs/stonith.py | 12 +++++- + pcs/utils.py | 104 +++++++++++++++++++++++++++++++------------------ + pcsd/cluster_entity.rb | 4 +- + pcsd/pcs.rb | 4 ++ + pcsd/remote.rb | 14 ++++++- + 12 files changed, 149 insertions(+), 91 deletions(-) + +diff --git a/pcs/acl.py b/pcs/acl.py +index 118ceed..0378c10 100644 +--- a/pcs/acl.py ++++ b/pcs/acl.py +@@ -55,7 +55,7 @@ def acl_cmd(argv): + def acl_show(argv): + dom = utils.get_cib_dom() + +- properties = prop.get_set_properties(defaults=prop.get_default_properties()) ++ properties = utils.get_set_properties(defaults=prop.get_default_properties()) + acl_enabled = properties.get("enable-acl", "").lower() + if is_true(acl_enabled): + print("ACLs are enabled") +diff --git a/pcs/cluster.py b/pcs/cluster.py +index 4155103..13446d4 100644 +--- a/pcs/cluster.py ++++ b/pcs/cluster.py +@@ -1157,7 +1157,18 @@ def stop_cluster_corosync(): + utils.err("unable to stop {0}".format(service)) + + def kill_cluster(argv): +- daemons = ["crmd", "pengine", "attrd", "lrmd", "stonithd", "cib", "pacemakerd", "corosync-qdevice", "corosync"] ++ daemons = [ ++ "crmd", ++ "pengine", ++ "attrd", ++ "lrmd", ++ "stonithd", ++ "cib", ++ "pacemakerd", ++ "pacemaker_remoted", ++ "corosync-qdevice", ++ "corosync", ++ ] + dummy_output, dummy_retval = utils.run(["killall", "-9"] + daemons) + # if dummy_retval != 0: + # print "Error: unable to execute killall -9" +diff --git a/pcs/config.py b/pcs/config.py +index 3d86b39..9119c3c 100644 +--- a/pcs/config.py ++++ b/pcs/config.py +@@ -95,14 +95,22 @@ def config_show(argv): + print() + config_show_cib() + if ( +- utils.is_rhel6() +- or +- (not utils.usefile and "--corosync_conf" not in utils.pcs_options) ++ utils.hasCorosyncConf() ++ and ++ ( ++ utils.is_rhel6() ++ or ++ (not utils.usefile and "--corosync_conf" not in utils.pcs_options) ++ ) + ): + # with corosync 1 and cman, uid gid is part of cluster.conf file + # with corosync 2, uid gid is in a separate directory + cluster.cluster_uidgid([], True) +- if "--corosync_conf" in utils.pcs_options or not utils.is_rhel6(): ++ if ( ++ "--corosync_conf" in utils.pcs_options ++ or ++ (not utils.is_rhel6() and utils.hasCorosyncConf()) ++ ): + print() + print("Quorum:") + try: +@@ -267,7 +275,16 @@ def config_restore_remote(infile_name, infile_obj): + err_msgs.append(output) + continue + status = json.loads(output) +- if status["corosync"] or status["pacemaker"] or status["cman"]: ++ if ( ++ status["corosync"] ++ or ++ status["pacemaker"] ++ or ++ status["cman"] ++ or ++ # not supported by older pcsd, do not fail if not present ++ status.get("pacemaker_remote", False) ++ ): + err_msgs.append( + "Cluster is currently running on node %s. You need to stop " + "the cluster in order to restore the configuration." +@@ -286,7 +303,7 @@ def config_restore_remote(infile_name, infile_obj): + # If node returns HTTP 404 it does not support config syncing at all. + for node in node_list: + retval, output = utils.pauseConfigSyncing(node, 10 * 60) +- if not (retval == 0 or output.endswith("(HTTP error: 404)")): ++ if not (retval == 0 or "(HTTP error: 404)" in output): + utils.err(output) + + if infile_obj: +@@ -306,11 +323,13 @@ def config_restore_remote(infile_name, infile_obj): + + def config_restore_local(infile_name, infile_obj): + if ( +- status.is_cman_running() ++ status.is_service_running("cman") ++ or ++ status.is_service_running("corosync") + or +- status.is_corosyc_running() ++ status.is_service_running("pacemaker") + or +- status.is_pacemaker_running() ++ status.is_service_running("pacemaker_remote") + ): + utils.err( + "Cluster is currently running on this node. You need to stop " +diff --git a/pcs/constraint.py b/pcs/constraint.py +index 5d9b0df..e32f1a3 100644 +--- a/pcs/constraint.py ++++ b/pcs/constraint.py +@@ -593,7 +593,7 @@ def location_show(argv): + print(" Node: " + node) + + nodehash_label = ( +- (nodehashon, " Allowed to run:") ++ (nodehashon, " Allowed to run:"), + (nodehashoff, " Not allowed to run:") + ) + for nodehash, label in nodehash_label: +diff --git a/pcs/prop.py b/pcs/prop.py +index 3a65990..36eba60 100644 +--- a/pcs/prop.py ++++ b/pcs/prop.py +@@ -7,7 +7,6 @@ from __future__ import ( + + import sys + import json +-from xml.dom.minidom import parseString + + from pcs import usage + from pcs import utils +@@ -116,7 +115,7 @@ def list_property(argv): + properties = {} + + if "--defaults" not in utils.pcs_options: +- properties = get_set_properties( ++ properties = utils.get_set_properties( + None if print_all else argv[0], + properties + ) +@@ -141,16 +140,3 @@ def get_default_properties(): + parameters[name] = prop["default"] + return parameters + +-def get_set_properties(prop_name=None, defaults=None): +- properties = {} if defaults is None else dict(defaults) +- (output, retVal) = utils.run(["cibadmin","-Q","--scope", "crm_config"]) +- if retVal != 0: +- utils.err("unable to get crm_config\n"+output) +- dom = parseString(output) +- de = dom.documentElement +- crm_config_properties = de.getElementsByTagName("nvpair") +- for prop in crm_config_properties: +- if prop_name is None or (prop_name == prop.getAttribute("name")): +- properties[prop.getAttribute("name")] = prop.getAttribute("value") +- return properties +- +diff --git a/pcs/quorum.py b/pcs/quorum.py +index a849282..1c2d41d 100644 +--- a/pcs/quorum.py ++++ b/pcs/quorum.py +@@ -8,7 +8,6 @@ from __future__ import ( + import sys + + from pcs import ( +- prop, + stonith, + usage, + utils, +@@ -234,7 +233,7 @@ def quorum_unblock_cmd(argv): + utils.err("unable to cancel waiting for nodes") + print("Quorum unblocked") + +- startup_fencing = prop.get_set_properties().get("startup-fencing", "") ++ startup_fencing = utils.get_set_properties().get("startup-fencing", "") + utils.set_cib_property( + "startup-fencing", + "false" if startup_fencing.lower() != "false" else "true" +diff --git a/pcs/status.py b/pcs/status.py +index bdfcc85..86216ea 100644 +--- a/pcs/status.py ++++ b/pcs/status.py +@@ -103,7 +103,7 @@ def full_status(): + print(output) + + if not utils.usefile: +- if "--full" in utils.pcs_options: ++ if "--full" in utils.pcs_options and utils.hasCorosyncConf(): + print_pcsd_daemon_status() + print() + utils.serviceStatus(" ") +@@ -121,7 +121,10 @@ def nodes_status(argv): + return + + if len(argv) == 1 and (argv[0] == "config"): +- corosync_nodes = utils.getNodesFromCorosyncConf() ++ if utils.hasCorosyncConf(): ++ corosync_nodes = utils.getNodesFromCorosyncConf() ++ else: ++ corosync_nodes = [] + try: + pacemaker_nodes = sorted([ + node.attrs.name for node +@@ -244,7 +247,7 @@ def cluster_status(argv): + else: + print("",line) + +- if not utils.usefile: ++ if not utils.usefile and utils.hasCorosyncConf(): + print() + print_pcsd_daemon_status() + +@@ -262,25 +265,11 @@ def xml_status(): + utils.err("running crm_mon, is pacemaker running?") + print(output, end="") + +-def is_cman_running(): +- if utils.is_systemctl(): +- dummy_output, retval = utils.run(["systemctl", "status", "cman.service"]) +- else: +- dummy_output, retval = utils.run(["service", "cman", "status"]) +- return retval == 0 +- +-def is_corosyc_running(): +- if utils.is_systemctl(): +- dummy_output, retval = utils.run(["systemctl", "status", "corosync.service"]) +- else: +- dummy_output, retval = utils.run(["service", "corosync", "status"]) +- return retval == 0 +- +-def is_pacemaker_running(): ++def is_service_running(service): + if utils.is_systemctl(): +- dummy_output, retval = utils.run(["systemctl", "status", "pacemaker.service"]) ++ dummy_output, retval = utils.run(["systemctl", "status", service]) + else: +- dummy_output, retval = utils.run(["service", "pacemaker", "status"]) ++ dummy_output, retval = utils.run(["service", service, "status"]) + return retval == 0 + + def print_pcsd_daemon_status(): +diff --git a/pcs/stonith.py b/pcs/stonith.py +index ab9e926..c02f35a 100644 +--- a/pcs/stonith.py ++++ b/pcs/stonith.py +@@ -225,7 +225,11 @@ def stonith_level_add(level, node, devices): + for dev in devices.split(","): + if not utils.is_stonith_resource(dev): + utils.err("%s is not a stonith id (use --force to override)" % dev) +- if not utils.is_pacemaker_node(node) and not utils.is_corosync_node(node): ++ corosync_nodes = [] ++ if utils.hasCorosyncConf(): ++ corosync_nodes = utils.getNodesFromCorosyncConf() ++ pacemaker_nodes = utils.getNodesFromPacemaker() ++ if node not in corosync_nodes and node not in pacemaker_nodes: + utils.err("%s is not currently a node (use --force to override)" % node) + + ft = dom.getElementsByTagName("fencing-topology") +@@ -321,6 +325,10 @@ def stonith_level_clear(node = None): + + def stonith_level_verify(): + dom = utils.get_cib_dom() ++ corosync_nodes = [] ++ if utils.hasCorosyncConf(): ++ corosync_nodes = utils.getNodesFromCorosyncConf() ++ pacemaker_nodes = utils.getNodesFromPacemaker() + + fls = dom.getElementsByTagName("fencing-level") + for fl in fls: +@@ -329,7 +337,7 @@ def stonith_level_verify(): + for dev in devices.split(","): + if not utils.is_stonith_resource(dev): + utils.err("%s is not a stonith id" % dev) +- if not utils.is_corosync_node(node) and not utils.is_pacemaker_node(node): ++ if node not in corosync_nodes and node not in pacemaker_nodes: + utils.err("%s is not currently a node" % node) + + def stonith_level_show(): +diff --git a/pcs/utils.py b/pcs/utils.py +index 2cfb693..3970eff 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -301,6 +301,8 @@ def canAddNodeToCluster(node): + return (False, "unable to authenticate to node") + if "node_available" in myout and myout["node_available"] == True: + return (True, "") ++ elif myout.get("pacemaker_remote", False): ++ return (False, "node is running pacemaker_remote") + else: + return (False, "node is already in a cluster") + except ValueError: +@@ -465,6 +467,14 @@ def getNodesFromPacemaker(): + except LibraryError as e: + process_library_reports(e.args) + ++def hasCorosyncConf(conf=None): ++ if not conf: ++ if is_rhel6(): ++ conf = settings.cluster_conf_file ++ else: ++ conf = settings.corosync_conf_file ++ return os.path.isfile(conf) ++ + def getCorosyncConf(conf=None): + if not conf: + if is_rhel6(): +@@ -1071,18 +1081,6 @@ def does_exist(xpath_query): + return False + return True + +-def is_pacemaker_node(node): +- p_nodes = getNodesFromPacemaker() +- if node in p_nodes: +- return True +- return False +- +-def is_corosync_node(node): +- c_nodes = getNodesFromCorosyncConf() +- if node in c_nodes: +- return True +- return False +- + def get_group_children(group_id): + child_resources = [] + dom = get_cib_dom() +@@ -1838,7 +1836,7 @@ def getCorosyncNodesID(allow_failure=False): + err_msgs, retval, output, dummy_std_err = call_local_pcsd( + ['status', 'nodes', 'corosync-id'], True + ) +- if err_msgs: ++ if err_msgs and not allow_failure: + for msg in err_msgs: + err(msg, False) + sys.exit(1) +@@ -1866,6 +1864,7 @@ def getCorosyncNodesID(allow_failure=False): + + # Warning, if a node has never started the hostname may be '(null)' + #TODO This doesn't work on CMAN clusters at all and should be removed completely ++# Doesn't work on pacemaker-remote nodes either + def getPacemakerNodesID(allow_failure=False): + if os.getuid() == 0: + (output, retval) = run(['crm_node', '-l']) +@@ -1873,7 +1872,7 @@ def getPacemakerNodesID(allow_failure=False): + err_msgs, retval, output, dummy_std_err = call_local_pcsd( + ['status', 'nodes', 'pacemaker-id'], True + ) +- if err_msgs: ++ if err_msgs and not allow_failure: + for msg in err_msgs: + err(msg, False) + sys.exit(1) +@@ -1893,9 +1892,11 @@ def getPacemakerNodesID(allow_failure=False): + return pm_nodes + + def corosyncPacemakerNodeCheck(): +- # does not work on CMAN clusters +- pm_nodes = getPacemakerNodesID() +- cs_nodes = getCorosyncNodesID() ++ # does not work on CMAN clusters and pacemaker-remote nodes ++ # we do not want a failure to exit pcs as this is only a minor information ++ # function ++ pm_nodes = getPacemakerNodesID(allow_failure=True) ++ cs_nodes = getCorosyncNodesID(allow_failure=True) + + for node_id in pm_nodes: + if pm_nodes[node_id] == "(null)": +@@ -1920,10 +1921,9 @@ def getClusterName(): + if is_rhel6(): + try: + dom = parse(settings.cluster_conf_file) ++ return dom.documentElement.getAttribute("name") + except (IOError,xml.parsers.expat.ExpatError): +- return "" +- +- return dom.documentElement.getAttribute("name") ++ pass + else: + try: + f = open(settings.corosync_conf_file,'r') +@@ -1937,7 +1937,15 @@ def getClusterName(): + if cluster_name: + return cluster_name + except (IOError, corosync_conf_parser.CorosyncConfParserException): +- return "" ++ pass ++ ++ # there is no corosync.conf or cluster.conf on remote nodes, we can try to ++ # get cluster name from pacemaker ++ try: ++ return get_set_properties("cluster-name")["cluster-name"] ++ except: ++ # we need to catch SystemExit (from utils.err), parse errors and so on ++ pass + + return "" + +@@ -2024,23 +2032,30 @@ def serviceStatus(prefix): + if not is_systemctl(): + return + print("Daemon Status:") +- for service in ["corosync", "pacemaker", "pcsd"]: +- print('{0}{1}: {2}/{3}'.format( +- prefix, service, +- run(["systemctl", 'is-active', service])[0].strip(), +- run(["systemctl", 'is-enabled', service])[0].strip() +- )) +- try: +- sbd_running = is_service_running(cmd_runner(), "sbd") +- sbd_enabled = is_service_enabled(cmd_runner(), "sbd") +- if sbd_enabled or sbd_running: +- print("{prefix}sbd: {active}/{enabled}".format( +- prefix=prefix, +- active=("active" if sbd_running else "inactive"), +- enabled=("enabled" if sbd_enabled else "disabled") +- )) +- except LibraryError: +- pass ++ service_def = [ ++ # ( ++ # service name, ++ # display even if not enabled nor running ++ # ) ++ ("corosync", True), ++ ("pacemaker", True), ++ ("pacemaker_remote", False), ++ ("pcsd", True), ++ ("sbd", False), ++ ] ++ for service, display_always in service_def: ++ try: ++ running = is_service_running(cmd_runner(), service) ++ enabled = is_service_enabled(cmd_runner(), service) ++ if display_always or enabled or running: ++ print("{prefix}{service}: {active}/{enabled}".format( ++ prefix=prefix, ++ service=service, ++ active=("active" if running else "inactive"), ++ enabled=("enabled" if enabled else "disabled") ++ )) ++ except LibraryError: ++ pass + + def enableServices(): + # do NOT handle SBD in here, it is started by pacemaker not systemd or init +@@ -2677,3 +2692,16 @@ def exit_on_cmdline_input_errror(error, main_name, usage_name): + + def get_report_processor(): + return LibraryReportProcessorToConsole(debug=("--debug" in pcs_options)) ++ ++def get_set_properties(prop_name=None, defaults=None): ++ properties = {} if defaults is None else dict(defaults) ++ (output, retVal) = run(["cibadmin","-Q","--scope", "crm_config"]) ++ if retVal != 0: ++ err("unable to get crm_config\n"+output) ++ dom = parseString(output) ++ de = dom.documentElement ++ crm_config_properties = de.getElementsByTagName("nvpair") ++ for prop in crm_config_properties: ++ if prop_name is None or (prop_name == prop.getAttribute("name")): ++ properties[prop.getAttribute("name")] = prop.getAttribute("value") ++ return properties +diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb +index f54cd30..fa56fe2 100644 +--- a/pcsd/cluster_entity.rb ++++ b/pcsd/cluster_entity.rb +@@ -1011,7 +1011,9 @@ module ClusterEntity + @uptime = 'unknown' + @name = nil + @services = {} +- [:pacemaker, :corosync, :pcsd, :cman, :sbd].each do |service| ++ [ ++ :pacemaker, :pacemaker_remote, :corosync, :pcsd, :cman, :sbd ++ ].each do |service| + @services[service] = { + :installed => nil, + :running => nil, +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index 57082be..0956de9 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -892,6 +892,10 @@ def pacemaker_running?() + is_service_running?('pacemaker') + end + ++def pacemaker_remote_running?() ++ is_service_running?('pacemaker_remote') ++end ++ + def get_pacemaker_version() + begin + stdout, stderror, retval = run_cmd( +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index 75c9465..6a3a692 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -769,9 +769,19 @@ def get_sw_versions(params, request, auth_user) + end + + def remote_node_available(params, request, auth_user) +- if (not ISRHEL6 and File.exist?(Cfgsync::CorosyncConf.file_path)) or (ISRHEL6 and File.exist?(Cfgsync::ClusterConf.file_path)) or File.exist?("/var/lib/pacemaker/cib/cib.xml") ++ if ( ++ (not ISRHEL6 and File.exist?(Cfgsync::CorosyncConf.file_path)) or ++ (ISRHEL6 and File.exist?(Cfgsync::ClusterConf.file_path)) or ++ File.exist?("/var/lib/pacemaker/cib/cib.xml") ++ ) + return JSON.generate({:node_available => false}) + end ++ if pacemaker_remote_running?() ++ return JSON.generate({ ++ :node_available => false, ++ :pacemaker_remote => true, ++ }) ++ end + return JSON.generate({:node_available => true}) + end + +@@ -1038,6 +1048,8 @@ def node_status(params, request, auth_user) + :cman => node.cman, + :corosync_enabled => node.corosync_enabled, + :pacemaker_enabled => node.pacemaker_enabled, ++ :pacemaker_remote => node.services[:pacemaker_remote][:running], ++ :pacemaker_remote_enabled => node.services[:pacemaker_remote][:enabled], + :pcsd_enabled => node.pcsd_enabled, + :corosync_online => status[:corosync_online], + :corosync_offline => status[:corosync_offline], +-- +1.8.3.1 + diff --git a/SOURCES/bz1298585-01-add-possibility-to-hide-inactive-resources-in-pcs-resource-show.patch b/SOURCES/bz1298585-01-add-possibility-to-hide-inactive-resources-in-pcs-resource-show.patch new file mode 100644 index 0000000..43c4dd3 --- /dev/null +++ b/SOURCES/bz1298585-01-add-possibility-to-hide-inactive-resources-in-pcs-resource-show.patch @@ -0,0 +1,221 @@ +From 0cfbd1bd87d4484eca054d41aea1d8ac9b55e93c Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Mon, 8 Aug 2016 13:32:07 +0200 +Subject: [PATCH] add possibility to hide inactive resources in "pcs resource + show" + +--- + .pylintrc | 2 +- + pcs/pcs.8 | 12 ++++++------ + pcs/resource.py | 33 +++++++++++++++++++++++++++------ + pcs/test/test_resource.py | 21 ++++++++++++++------- + pcs/usage.py | 16 ++++++++++------ + 5 files changed, 58 insertions(+), 26 deletions(-) + +diff --git a/.pylintrc b/.pylintrc +index e378e6a..1dd6d5d 100644 +--- a/.pylintrc ++++ b/.pylintrc +@@ -92,7 +92,7 @@ dummy-variables-rgx=_$|dummy + + [FORMAT] + # Maximum number of lines in a module +-max-module-lines=4577 ++max-module-lines=4584 + # Maximum number of characters on a single line. + max-line-length=1291 + +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 52497a0..9064054 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -64,8 +64,8 @@ alert + Manage pacemaker alerts. + .SS "resource" + .TP +-[show [resource id]] [\fB\-\-full\fR] [\fB\-\-groups\fR] +-Show all currently configured resources or if a resource is specified show the options for the configured resource. If \fB\-\-full\fR is specified all configured resource options will be displayed. If \fB\-\-groups\fR is specified, only show groups (and their resources). ++[show [<resource id>] | \fB\-\-full\fR | \fB\-\-groups\fR | \fB\-\-hide\-inactive\fR] ++Show all currently configured resources or if a resource is specified show the options for the configured resource. If \fB\-\-full\fR is specified, all configured resource options will be displayed. If \fB\-\-groups\fR is specified, only show groups (and their resources). If \fB\-\-hide\-inactive\fR is specified, only show active resources. + .TP + list [<standard|provider|type>] [\fB\-\-nodesc\fR] + Show list of all available resources, optionally filtered by specified type, standard or provider. If \fB\-\-nodesc\fR is used then descriptions of resources are not printed. +@@ -627,11 +627,11 @@ stop + Stop booth arbitrator service. + .SS "status" + .TP +-[status] [\fB\-\-full\fR | \fB\-\-hide-inactive\fR] +-View all information about the cluster and resources (\fB\-\-full\fR provides more details, \fB\-\-hide-inactive\fR hides inactive resources). ++[status] [\fB\-\-full\fR | \fB\-\-hide\-inactive\fR] ++View all information about the cluster and resources (\fB\-\-full\fR provides more details, \fB\-\-hide\-inactive\fR hides inactive resources). + .TP +-resources +-View current status of cluster resources. ++resources [<resource id> | \fB\-\-full\fR | \fB\-\-groups\fR | \fB\-\-hide\-inactive\fR] ++Show all currently configured resources or if a resource is specified show the options for the configured resource. If \fB\-\-full\fR is specified, all configured resource options will be displayed. If \fB\-\-groups\fR is specified, only show groups (and their resources). If \fB\-\-hide\-inactive\fR is specified, only show active resources. + .TP + groups + View currently configured groups and their resources. +diff --git a/pcs/resource.py b/pcs/resource.py +index 66c743c..74adac6 100644 +--- a/pcs/resource.py ++++ b/pcs/resource.py +@@ -1993,6 +1993,17 @@ def resource_group_list(argv): + print(" ".join(line_parts)) + + def resource_show(argv, stonith=False): ++ mutually_exclusive_opts = ("--full", "--groups", "--hide-inactive") ++ modifiers = [ ++ key for key in utils.pcs_options if key in mutually_exclusive_opts ++ ] ++ if (len(modifiers) > 1) or (argv and modifiers): ++ utils.err( ++ "you can specify only one of resource id, {0}".format( ++ ", ".join(mutually_exclusive_opts) ++ ) ++ ) ++ + if "--groups" in utils.pcs_options: + resource_group_list(argv) + return +@@ -2009,15 +2020,28 @@ def resource_show(argv, stonith=False): + return + + if len(argv) == 0: +- output, retval = utils.run(["crm_mon", "-1", "-r"]) ++ monitor_command = ["crm_mon", "--one-shot"] ++ if "--hide-inactive" not in utils.pcs_options: ++ monitor_command.append('--inactive') ++ output, retval = utils.run(monitor_command) + if retval != 0: + utils.err("unable to get cluster status from crm_mon\n"+output.rstrip()) + preg = re.compile(r'.*(stonith:.*)') + resources_header = False + in_resources = False + has_resources = False ++ no_resources_line = ( ++ "NO stonith devices configured" if stonith ++ else "NO resources configured" ++ ) + for line in output.split('\n'): +- if line == "Full list of resources:": ++ if line == "No active resources": ++ print(line) ++ return ++ if line == "No resources": ++ print(no_resources_line) ++ return ++ if line in ("Full list of resources:", "Active resources:"): + resources_header = True + continue + if line == "": +@@ -2026,10 +2050,7 @@ def resource_show(argv, stonith=False): + in_resources = True + elif in_resources: + if not has_resources: +- if not stonith: +- print("NO resources configured") +- else: +- print("NO stonith devices configured") ++ print(no_resources_line) + return + continue + if in_resources: +diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py +index 2fa5088..614b895 100644 +--- a/pcs/test/test_resource.py ++++ b/pcs/test/test_resource.py +@@ -213,8 +213,7 @@ the health of a system via IPMI. + ClusterIP7\t(ocf::heartbeat:IPaddr2):\tStopped (disabled) + """) + +- output, returnVal = pcs(temp_cib, "resource show ClusterIP6 --full") +- assert returnVal == 0 ++ output, returnVal = pcs(temp_cib, "resource show --full") + ac(output, """\ + Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2) + Attributes: ip=192.168.0.99 cidr_netmask=32 +@@ -241,6 +240,7 @@ the health of a system via IPMI. + Meta Attrs: target-role=Stopped + Operations: monitor interval=30s (ClusterIP7-monitor-interval-30s) + """) ++ self.assertEqual(0, returnVal) + + output, returnVal = pcs( + temp_cib, +@@ -785,7 +785,7 @@ monitor interval=60s (state-monitor-interval-60s) + assert returnVal == 0 + assert output == "" + +- line = 'resource show ClusterIP --full' ++ line = 'resource show ClusterIP' + output, returnVal = pcs(temp_cib, line) + ac(output, """\ + Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2) +@@ -3463,16 +3463,23 @@ Error: Cannot remove more than one resource from cloned group + ac(o,"") + assert r == 0 + +- o,r = pcs(temp_cib, "resource show D1 --full") +- ac(o," Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Meta Attrs: target-role=Stopped \n Operations: monitor interval=60s (D1-monitor-interval-60s)\n") ++ o,r = pcs(temp_cib, "resource show D1") ++ ac(o, """\ ++ Resource: D1 (class=ocf provider=heartbeat type=Dummy) ++ Meta Attrs: target-role=Stopped ++ Operations: monitor interval=60s (D1-monitor-interval-60s) ++""") + assert r == 0 + + o,r = pcs(temp_cib, "resource enable D1") + ac(o,"") + assert r == 0 + +- o,r = pcs(temp_cib, "resource show D1 --full") +- ac(o," Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n Operations: monitor interval=60s (D1-monitor-interval-60s)\n") ++ o,r = pcs(temp_cib, "resource show D1") ++ ac(o, """\ ++ Resource: D1 (class=ocf provider=heartbeat type=Dummy) ++ Operations: monitor interval=60s (D1-monitor-interval-60s) ++""") + assert r == 0 + + # bad resource name +diff --git a/pcs/usage.py b/pcs/usage.py +index baa70d0..b11a5fa 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -189,12 +189,12 @@ Usage: pcs resource [commands]... + Manage pacemaker resources + + Commands: +- [show [resource id]] [--full] [--groups] ++ [show [<resource id>] | --full | --groups | --hide-inactive] + Show all currently configured resources or if a resource is specified +- show the options for the configured resource. If --full is specified ++ show the options for the configured resource. If --full is specified, + all configured resource options will be displayed. If --groups is +- specified, only show groups (and their resources). +- ++ specified, only show groups (and their resources). If --hide-inactive ++ is specified, only show active resources. + + list [<standard|provider|type>] [--nodesc] + Show list of all available resources, optionally filtered by specified +@@ -1108,8 +1108,12 @@ Commands: + View all information about the cluster and resources (--full provides + more details, --hide-inactive hides inactive resources). + +- resources +- View current status of cluster resources. ++ resources [<resource id> | --full | --groups | --hide-inactive] ++ Show all currently configured resources or if a resource is specified ++ show the options for the configured resource. If --full is specified, ++ all configured resource options will be displayed. If --groups is ++ specified, only show groups (and their resources). If --hide-inactive ++ is specified, only show active resources. + + groups + View currently configured groups and their resources. +-- +1.8.3.1 + diff --git a/SOURCES/bz1301993-01-improve-node-properties-commands.patch b/SOURCES/bz1301993-01-improve-node-properties-commands.patch new file mode 100644 index 0000000..5f812e7 --- /dev/null +++ b/SOURCES/bz1301993-01-improve-node-properties-commands.patch @@ -0,0 +1,570 @@ +From b221d83628cf1413abfa5d836c103a94184b3c46 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Fri, 22 Jul 2016 12:06:24 +0200 +Subject: [PATCH] improve node properties commands + +* added "pcs node attribute" command +* allow to list value of specified attribute / utilization from all nodes +--- + pcs/node.py | 64 +++++++++--- + pcs/pcs.8 | 7 +- + pcs/prop.py | 13 ++- + pcs/test/test_node.py | 278 ++++++++++++++++++++++++++++++++++++++++++++++++-- + pcs/usage.py | 21 ++-- + pcs/utils.py | 13 ++- + 6 files changed, 355 insertions(+), 41 deletions(-) + +diff --git a/pcs/node.py b/pcs/node.py +index ac154d4..be2fb13 100644 +--- a/pcs/node.py ++++ b/pcs/node.py +@@ -12,6 +12,8 @@ from pcs import ( + usage, + utils, + ) ++from pcs.cli.common.errors import CmdLineInputError ++from pcs.cli.common.parse_args import prepare_options + from pcs.lib.errors import LibraryError + import pcs.lib.pacemaker as lib_pacemaker + from pcs.lib.pacemaker_values import get_valid_timeout_seconds +@@ -33,11 +35,26 @@ def node_cmd(argv): + node_standby(argv) + elif sub_cmd == "unstandby": + node_standby(argv, False) ++ elif sub_cmd == "attribute": ++ if "--name" in utils.pcs_options and len(argv) > 1: ++ usage.node("attribute") ++ sys.exit(1) ++ filter_attr=utils.pcs_options.get("--name", None) ++ if len(argv) == 0: ++ attribute_show_cmd(filter_attr=filter_attr) ++ elif len(argv) == 1: ++ attribute_show_cmd(argv.pop(0), filter_attr=filter_attr) ++ else: ++ attribute_set_cmd(argv.pop(0), argv) + elif sub_cmd == "utilization": ++ if "--name" in utils.pcs_options and len(argv) > 1: ++ usage.node("utilization") ++ sys.exit(1) ++ filter_name=utils.pcs_options.get("--name", None) + if len(argv) == 0: +- print_nodes_utilization() ++ print_node_utilization(filter_name=filter_name) + elif len(argv) == 1: +- print_node_utilization(argv.pop(0)) ++ print_node_utilization(argv.pop(0), filter_name=filter_name) + else: + set_node_utilization(argv.pop(0), argv) + # pcs-to-pcsd use only +@@ -135,23 +152,16 @@ def set_node_utilization(node, argv): + ) + utils.replace_cib_configuration(cib) + +-def print_node_utilization(node): +- cib = utils.get_cib_dom() +- node_el = utils.dom_get_node(cib, node) +- if node_el is None: +- utils.err("Unable to find a node: {0}".format(node)) +- utilization = utils.get_utilization_str(node_el) +- +- print("Node Utilization:") +- print(" {0}: {1}".format(node, utilization)) +- +-def print_nodes_utilization(): ++def print_node_utilization(filter_node=None, filter_name=None): + cib = utils.get_cib_dom() + utilization = {} + for node_el in cib.getElementsByTagName("node"): +- u = utils.get_utilization_str(node_el) ++ node = node_el.getAttribute("uname") ++ if filter_node is not None and node != filter_node: ++ continue ++ u = utils.get_utilization_str(node_el, filter_name) + if u: +- utilization[node_el.getAttribute("uname")] = u ++ utilization[node] = u + print("Node Utilization:") + for node in sorted(utilization): + print(" {0}: {1}".format(node, utilization[node])) +@@ -163,3 +173,27 @@ def node_pacemaker_status(): + )) + except LibraryError as e: + utils.process_library_reports(e.args) ++ ++def attribute_show_cmd(filter_node=None, filter_attr=None): ++ node_attributes = utils.get_node_attributes( ++ filter_node=filter_node, ++ filter_attr=filter_attr ++ ) ++ print("Node Attributes:") ++ attribute_print(node_attributes) ++ ++def attribute_set_cmd(node, argv): ++ try: ++ attrs = prepare_options(argv) ++ except CmdLineInputError as e: ++ utils.exit_on_cmdline_input_errror(e, "node", "attribute") ++ for name, value in attrs.items(): ++ utils.set_node_attribute(name, value, node) ++ ++def attribute_print(node_attributes): ++ for node in sorted(node_attributes.keys()): ++ line_parts = [" " + node + ":"] ++ for name, value in sorted(node_attributes[node].items()): ++ line_parts.append("{0}={1}".format(name, value)) ++ print(" ".join(line_parts)) ++ +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 16c9331..f789df7 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -644,6 +644,9 @@ clear-auth [\fB\-\-local\fR] [\fB\-\-remote\fR] + Removes all system tokens which allow pcs/pcsd on the current system to authenticate with remote pcs/pcsd instances and vice\-versa. After this command is run this node will need to be re\-authenticated with other nodes (using 'pcs cluster auth'). Using \fB\-\-local\fR only removes tokens used by local pcs (and pcsd if root) to connect to other pcsd instances, using \fB\-\-remote\fR clears authentication tokens used by remote systems to connect to the local pcsd instance. + .SS "node" + .TP ++attribute [[<node>] [\fB\-\-name\fR <attr>] | <node> <name>=<value> ...] ++Manage node attributes. If no parameters are specified, show attributes of all nodes. If one parameter is specified, show attributes of specified node. If \fB\-\-name\fR is specified, show specified attribute's value from all nodes. If more parameters are specified, set attributes of specified node. Attributes can be removed by setting an attribute without a value. ++.TP + maintenance [\fB\-\-all\fR] | [<node>]... + Put specified node(s) into maintenance mode, if no node or options are specified the current node will be put into maintenance mode, if \fB\-\-all\fR is specified all nodes will be put into maintenace mode. + .TP +@@ -656,8 +659,8 @@ Put specified node into standby mode (the node specified will no longer be able + unstandby [\fB\-\-all\fR | <node>] [\fB\-\-wait\fR[=n]] + Remove node from standby mode (the node specified will now be able to host resources), if no node or options are specified the current node will be removed from standby mode, if \fB\-\-all\fR is specified all nodes will be removed from standby mode. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the node(s) to be removed from standby mode and then return 0 on success or 1 if the operation not succeeded yet. If 'n' is not specified it defaults to 60 minutes. + .TP +-utilization [<node> [<name>=<value> ...]] +-Add specified utilization options to specified node. If node is not specified, shows utilization of all nodes. If utilization options are not specified, shows utilization of specified node. Utilization option should be in format name=value, value has to be integer. Options may be removed by setting an option without a value. Example: pcs node utilization node1 cpu=4 ram= ++utilization [[<node>] [\fB\-\-name\fR <name>] | <node> <name>=<value> ...] ++Add specified utilization options to specified node. If node is not specified, shows utilization of all nodes. If \fB\-\-name\fR is specified, shows specified utilization value from all nodes. If utilization options are not specified, shows utilization of specified node. Utilization option should be in format name=value, value has to be integer. Options may be removed by setting an option without a value. Example: pcs node utilization node1 cpu=4 ram= + .SS "alert" + .TP + [config|show] +diff --git a/pcs/prop.py b/pcs/prop.py +index 92a953c..1089865 100644 +--- a/pcs/prop.py ++++ b/pcs/prop.py +@@ -8,8 +8,11 @@ from __future__ import ( + import sys + import json + +-from pcs import usage +-from pcs import utils ++from pcs import ( ++ node, ++ usage, ++ utils, ++) + + def property_cmd(argv): + if len(argv) == 0: +@@ -127,11 +130,7 @@ def list_property(argv): + ) + if node_attributes: + print("Node Attributes:") +- for node in sorted(node_attributes.keys()): +- line_parts = [" " + node + ":"] +- for name, value in sorted(node_attributes[node].items()): +- line_parts.append("{0}={1}".format(name, value)) +- print(" ".join(line_parts)) ++ node.attribute_print(node_attributes) + + def get_default_properties(): + parameters = {} +diff --git a/pcs/test/test_node.py b/pcs/test/test_node.py +index 023148c..6f03112 100644 +--- a/pcs/test/test_node.py ++++ b/pcs/test/test_node.py +@@ -8,11 +8,17 @@ from __future__ import ( + import shutil + import unittest + ++from pcs.test.tools.assertions import AssertPcsMixin + from pcs.test.tools.misc import ( + ac, + get_test_resource as rc, + ) +-from pcs.test.tools.pcs_runner import pcs ++from pcs.test.tools.pcs_runner import ( ++ pcs, ++ PcsRunner, ++) ++ ++from pcs import utils + + empty_cib = rc("cib-empty-withnodes.xml") + temp_cib = rc("temp-cib.xml") +@@ -182,7 +188,7 @@ Cluster Properties: + output, returnVal = pcs(temp_cib, "node utilization rh7-2") + expected_out = """\ + Node Utilization: +- rh7-2: \n""" ++""" + ac(expected_out, output) + self.assertEqual(0, returnVal) + +@@ -229,14 +235,33 @@ Node Utilization: + ac(expected_out, output) + self.assertEqual(0, returnVal) + +- def test_node_utilization_set_invalid(self): +- output, returnVal = pcs(temp_cib, "node utilization rh7-0") ++ output, returnVal = pcs( ++ temp_cib, "node utilization rh7-2 test1=-20" ++ ) ++ ac("", output) ++ self.assertEqual(0, returnVal) ++ ++ output, returnVal = pcs(temp_cib, "node utilization --name test1") + expected_out = """\ +-Error: Unable to find a node: rh7-0 ++Node Utilization: ++ rh7-1: test1=-10 ++ rh7-2: test1=-20 + """ + ac(expected_out, output) +- self.assertEqual(1, returnVal) ++ self.assertEqual(0, returnVal) + ++ output, returnVal = pcs( ++ temp_cib, ++ "node utilization --name test1 rh7-2" ++ ) ++ expected_out = """\ ++Node Utilization: ++ rh7-2: test1=-20 ++""" ++ ac(expected_out, output) ++ self.assertEqual(0, returnVal) ++ ++ def test_node_utilization_set_invalid(self): + output, returnVal = pcs(temp_cib, "node utilization rh7-0 test=10") + expected_out = """\ + Error: Unable to find a node: rh7-0 +@@ -252,3 +277,244 @@ Error: Value of utilization attribute must be integer: 'test=int' + """ + ac(expected_out, output) + self.assertEqual(1, returnVal) ++ ++ ++class NodeAttributeTest(unittest.TestCase, AssertPcsMixin): ++ def setUp(self): ++ shutil.copy(empty_cib, temp_cib) ++ self.pcs_runner = PcsRunner(temp_cib) ++ ++ def fixture_attrs(self, nodes, attrs=None): ++ attrs = dict() if attrs is None else attrs ++ xml_lines = ['<nodes>'] ++ for node_id, node_name in enumerate(nodes, 1): ++ xml_lines.extend([ ++ '<node id="{0}" uname="{1}">'.format(node_id, node_name), ++ '<instance_attributes id="nodes-{0}">'.format(node_id), ++ ]) ++ nv = '<nvpair id="nodes-{id}-{name}" name="{name}" value="{val}"/>' ++ for name, value in attrs.get(node_name, dict()).items(): ++ xml_lines.append(nv.format(id=node_id, name=name, val=value)) ++ xml_lines.extend([ ++ '</instance_attributes>', ++ '</node>' ++ ]) ++ xml_lines.append('</nodes>') ++ ++ utils.usefile = True ++ utils.filename = temp_cib ++ output, retval = utils.run([ ++ "cibadmin", "--modify", '--xml-text', "\n".join(xml_lines) ++ ]) ++ assert output == "" ++ assert retval == 0 ++ ++ def test_show_empty(self): ++ self.fixture_attrs(["rh7-1", "rh7-2"]) ++ self.assert_pcs_success( ++ "node attribute", ++ "Node Attributes:\n" ++ ) ++ ++ def test_show_nonempty(self): ++ self.fixture_attrs( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", }, ++ "rh7-2": {"IP": "192.168.1.2", }, ++ } ++ ) ++ self.assert_pcs_success( ++ "node attribute", ++ """\ ++Node Attributes: ++ rh7-1: IP=192.168.1.1 ++ rh7-2: IP=192.168.1.2 ++""" ++ ) ++ ++ def test_show_multiple_per_node(self): ++ self.fixture_attrs( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", "alias": "node1", }, ++ "rh7-2": {"IP": "192.168.1.2", "alias": "node2", }, ++ } ++ ) ++ self.assert_pcs_success( ++ "node attribute", ++ """\ ++Node Attributes: ++ rh7-1: IP=192.168.1.1 alias=node1 ++ rh7-2: IP=192.168.1.2 alias=node2 ++""" ++ ) ++ ++ def test_show_one_node(self): ++ self.fixture_attrs( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", "alias": "node1", }, ++ "rh7-2": {"IP": "192.168.1.2", "alias": "node2", }, ++ } ++ ) ++ self.assert_pcs_success( ++ "node attribute rh7-1", ++ """\ ++Node Attributes: ++ rh7-1: IP=192.168.1.1 alias=node1 ++""" ++ ) ++ ++ def test_show_missing_node(self): ++ self.fixture_attrs( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", "alias": "node1", }, ++ "rh7-2": {"IP": "192.168.1.2", "alias": "node2", }, ++ } ++ ) ++ self.assert_pcs_success( ++ "node attribute rh7-3", ++ """\ ++Node Attributes: ++""" ++ ) ++ ++ def test_show_name(self): ++ self.fixture_attrs( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", "alias": "node1", }, ++ "rh7-2": {"IP": "192.168.1.2", "alias": "node2", }, ++ } ++ ) ++ self.assert_pcs_success( ++ "node attribute --name alias", ++ """\ ++Node Attributes: ++ rh7-1: alias=node1 ++ rh7-2: alias=node2 ++""" ++ ) ++ ++ def test_show_missing_name(self): ++ self.fixture_attrs( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", "alias": "node1", }, ++ "rh7-2": {"IP": "192.168.1.2", "alias": "node2", }, ++ } ++ ) ++ self.assert_pcs_success( ++ "node attribute --name missing", ++ """\ ++Node Attributes: ++""" ++ ) ++ ++ def test_show_node_and_name(self): ++ self.fixture_attrs( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", "alias": "node1", }, ++ "rh7-2": {"IP": "192.168.1.2", "alias": "node2", }, ++ } ++ ) ++ self.assert_pcs_success( ++ "node attribute --name alias rh7-1", ++ """\ ++Node Attributes: ++ rh7-1: alias=node1 ++""" ++ ) ++ ++ def test_set_new(self): ++ self.fixture_attrs(["rh7-1", "rh7-2"]) ++ self.assert_pcs_success( ++ "node attribute rh7-1 IP=192.168.1.1" ++ ) ++ self.assert_pcs_success( ++ "node attribute", ++ """\ ++Node Attributes: ++ rh7-1: IP=192.168.1.1 ++""" ++ ) ++ self.assert_pcs_success( ++ "node attribute rh7-2 IP=192.168.1.2" ++ ) ++ self.assert_pcs_success( ++ "node attribute", ++ """\ ++Node Attributes: ++ rh7-1: IP=192.168.1.1 ++ rh7-2: IP=192.168.1.2 ++""" ++ ) ++ ++ def test_set_existing(self): ++ self.fixture_attrs( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", }, ++ "rh7-2": {"IP": "192.168.1.2", }, ++ } ++ ) ++ self.assert_pcs_success( ++ "node attribute rh7-2 IP=192.168.2.2" ++ ) ++ self.assert_pcs_success( ++ "node attribute", ++ """\ ++Node Attributes: ++ rh7-1: IP=192.168.1.1 ++ rh7-2: IP=192.168.2.2 ++""" ++ ) ++ ++ def test_unset(self): ++ self.fixture_attrs( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", }, ++ "rh7-2": {"IP": "192.168.1.2", }, ++ } ++ ) ++ self.assert_pcs_success( ++ "node attribute rh7-2 IP=" ++ ) ++ self.assert_pcs_success( ++ "node attribute", ++ """\ ++Node Attributes: ++ rh7-1: IP=192.168.1.1 ++""" ++ ) ++ ++ def test_unset_nonexisting(self): ++ self.fixture_attrs( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", }, ++ "rh7-2": {"IP": "192.168.1.2", }, ++ } ++ ) ++ self.assert_pcs_result( ++ "node attribute rh7-1 missing=", ++ "Error: attribute: 'missing' doesn't exist for node: 'rh7-1'\n", ++ returncode=2 ++ ) ++ ++ def test_unset_nonexisting_forced(self): ++ self.fixture_attrs( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", }, ++ "rh7-2": {"IP": "192.168.1.2", }, ++ } ++ ) ++ self.assert_pcs_success( ++ "node attribute rh7-1 missing= --force", ++ "" ++ ) +diff --git a/pcs/usage.py b/pcs/usage.py +index 0474324..2f8f855 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -1242,6 +1242,14 @@ Usage: pcs node <command> + Manage cluster nodes + + Commands: ++ attribute [[<node>] [--name <name>] | <node> <name>=<value> ...] ++ Manage node attributes. If no parameters are specified, show attributes ++ of all nodes. If one parameter is specified, show attributes ++ of specified node. If --name is specified, show specified attribute's ++ value from all nodes. If more parameters are specified, set attributes ++ of specified node. Attributes can be removed by setting an attribute ++ without a value. ++ + maintenance [--all] | [<node>]... + Put specified node(s) into maintenance mode, if no node or options are + specified the current node will be put into maintenance mode, if --all +@@ -1272,12 +1280,13 @@ Commands: + the operation not succeeded yet. If 'n' is not specified it defaults + to 60 minutes. + +- utilization [<node> [<name>=<value> ...]] +- Add specified utilization options to specified node. If node is not +- specified, shows utilization of all nodes. If utilization options are +- not specified, shows utilization of specified node. Utilization option +- should be in format name=value, value has to be integer. Options may be +- removed by setting an option without a value. ++ utilization [[<node>] [--name <name>] | <node> <name>=<value> ...] ++ Add specified utilization options to specified node. If node is not ++ specified, shows utilization of all nodes. If --name is specified, ++ shows specified utilization value from all nodes. If utilization options ++ are not specified, shows utilization of specified node. Utilization ++ option should be in format name=value, value has to be integer. Options ++ may be removed by setting an option without a value. + Example: pcs node utilization node1 cpu=4 ram= + """ + if pout: +diff --git a/pcs/utils.py b/pcs/utils.py +index c7d1759..079d916 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -1677,6 +1677,8 @@ def get_node_attributes(filter_node=None, filter_attr=None): + if nodename not in nas: + nas[nodename] = dict() + nas[nodename][attr_name] = nvp.getAttribute("value") ++ # Use just first element of attributes. We don't support ++ # attributes with rules just yet. + break + return nas + +@@ -2447,21 +2449,22 @@ def dom_update_meta_attr(dom_element, attributes): + meta_attributes.getAttribute("id") + "-" + ) + +-def get_utilization(element): ++def get_utilization(element, filter_name=None): + utilization = {} + for e in element.getElementsByTagName("utilization"): + for u in e.getElementsByTagName("nvpair"): + name = u.getAttribute("name") +- value = u.getAttribute("value") if u.hasAttribute("value") else "" +- utilization[name] = value ++ if filter_name is not None and name != filter_name: ++ continue ++ utilization[name] = u.getAttribute("value") + # Use just first element of utilization attributes. We don't support + # utilization with rules just yet. + break + return utilization + +-def get_utilization_str(element): ++def get_utilization_str(element, filter_name=None): + output = [] +- for name, value in sorted(get_utilization(element).items()): ++ for name, value in sorted(get_utilization(element, filter_name).items()): + output.append(name + "=" + value) + return " ".join(output) + +-- +1.8.3.1 + diff --git a/SOURCES/bz1302010-01-fix-filter-by-property-name-in-pcs-property-show.patch b/SOURCES/bz1302010-01-fix-filter-by-property-name-in-pcs-property-show.patch new file mode 100644 index 0000000..c2066bf --- /dev/null +++ b/SOURCES/bz1302010-01-fix-filter-by-property-name-in-pcs-property-show.patch @@ -0,0 +1,371 @@ +From 5921099626e3afde044027ed493bdee905db4415 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Thu, 21 Jul 2016 13:58:41 +0200 +Subject: [PATCH] fix filter by property name in "pcs property show" + +--- + pcs/prop.py | 14 +-- + pcs/test/test_properties.py | 263 ++++++++++++++++++++++++++++++++++---------- + pcs/utils.py | 13 ++- + 3 files changed, 223 insertions(+), 67 deletions(-) + +diff --git a/pcs/prop.py b/pcs/prop.py +index 36eba60..92a953c 100644 +--- a/pcs/prop.py ++++ b/pcs/prop.py +@@ -100,9 +100,7 @@ def unset_property(argv): + utils.replace_cib_configuration(cib_dom) + + def list_property(argv): +- print_all = False +- if len(argv) == 0: +- print_all = True ++ print_all = len(argv) == 0 + + if "--all" in utils.pcs_options and "--defaults" in utils.pcs_options: + utils.err("you cannot specify both --all and --defaults") +@@ -124,13 +122,15 @@ def list_property(argv): + for prop,val in sorted(properties.items()): + print(" " + prop + ": " + val) + +- node_attributes = utils.get_node_attributes() ++ node_attributes = utils.get_node_attributes( ++ filter_attr=(None if print_all else argv[0]) ++ ) + if node_attributes: + print("Node Attributes:") +- for node in sorted(node_attributes): ++ for node in sorted(node_attributes.keys()): + line_parts = [" " + node + ":"] +- for attr in node_attributes[node]: +- line_parts.append(attr) ++ for name, value in sorted(node_attributes[node].items()): ++ line_parts.append("{0}={1}".format(name, value)) + print(" ".join(line_parts)) + + def get_default_properties(): +diff --git a/pcs/test/test_properties.py b/pcs/test/test_properties.py +index 6cdd2e5..fbaf880 100644 +--- a/pcs/test/test_properties.py ++++ b/pcs/test/test_properties.py +@@ -8,11 +8,15 @@ from __future__ import ( + import shutil + import unittest + ++from pcs.test.tools.assertions import AssertPcsMixin + from pcs.test.tools.misc import ( + ac, + get_test_resource as rc, + ) +-from pcs.test.tools.pcs_runner import pcs ++from pcs.test.tools.pcs_runner import ( ++ pcs, ++ PcsRunner, ++) + + from pcs import utils + +@@ -66,61 +70,6 @@ class PropertyTest(unittest.TestCase): + assert "stonith-enabled: false" in output + assert output.startswith('Cluster Properties:\n batch-limit') + +- def testNodeProperties(self): +- utils.usefile = True +- utils.filename = temp_cib +- o,r = utils.run(["cibadmin","-M", '--xml-text', '<nodes><node id="1" uname="rh7-1"><instance_attributes id="nodes-1"/></node><node id="2" uname="rh7-2"><instance_attributes id="nodes-2"/></node></nodes>']) +- ac(o,"") +- assert r == 0 +- +- o,r = pcs("property set --node=rh7-1 IP=192.168.1.1") +- ac(o,"") +- assert r==0 +- +- o,r = pcs("property set --node=rh7-2 IP=192.168.2.2") +- ac(o,"") +- assert r==0 +- +- o,r = pcs("property") +- ac(o,"Cluster Properties:\nNode Attributes:\n rh7-1: IP=192.168.1.1\n rh7-2: IP=192.168.2.2\n") +- assert r==0 +- +- o,r = pcs("property set --node=rh7-2 IP=") +- ac(o,"") +- assert r==0 +- +- o,r = pcs("property") +- ac(o,"Cluster Properties:\nNode Attributes:\n rh7-1: IP=192.168.1.1\n") +- assert r==0 +- +- o,r = pcs("property set --node=rh7-1 IP=192.168.1.1") +- ac(o,"") +- assert r==0 +- +- o,r = pcs("property set --node=rh7-2 IP=192.168.2.2") +- ac(o,"") +- assert r==0 +- +- o,r = pcs("property") +- ac(o,"Cluster Properties:\nNode Attributes:\n rh7-1: IP=192.168.1.1\n rh7-2: IP=192.168.2.2\n") +- assert r==0 +- +- o,r = pcs("property unset --node=rh7-1 IP") +- ac(o,"") +- assert r==0 +- +- o,r = pcs("property") +- ac(o,"Cluster Properties:\nNode Attributes:\n rh7-2: IP=192.168.2.2\n") +- assert r==0 +- +- o,r = pcs("property unset --node=rh7-1 IP") +- ac(o,"Error: attribute: 'IP' doesn't exist for node: 'rh7-1'\n") +- assert r==2 +- +- o,r = pcs("property unset --node=rh7-1 IP --force") +- ac(o,"") +- assert r==0 +- + def testBadProperties(self): + o,r = pcs(temp_cib, "property set xxxx=zzzz") + self.assertEqual(r, 1) +@@ -329,3 +278,205 @@ class PropertyTest(unittest.TestCase): + default-resource-stickiness: 0.1 + """ + ) ++ ++ ++class NodePropertyTestBase(unittest.TestCase, AssertPcsMixin): ++ def setUp(self): ++ shutil.copy(empty_cib, temp_cib) ++ self.pcs_runner = PcsRunner(temp_cib) ++ ++ def fixture_nodes(self, nodes, attrs=None): ++ attrs = dict() if attrs is None else attrs ++ xml_lines = ['<nodes>'] ++ for node_id, node_name in enumerate(nodes, 1): ++ xml_lines.extend([ ++ '<node id="{0}" uname="{1}">'.format(node_id, node_name), ++ '<instance_attributes id="nodes-{0}">'.format(node_id), ++ ]) ++ nv = '<nvpair id="nodes-{id}-{name}" name="{name}" value="{val}"/>' ++ for name, value in attrs.get(node_name, dict()).items(): ++ xml_lines.append(nv.format(id=node_id, name=name, val=value)) ++ xml_lines.extend([ ++ '</instance_attributes>', ++ '</node>' ++ ]) ++ xml_lines.append('</nodes>') ++ ++ utils.usefile = True ++ utils.filename = temp_cib ++ output, retval = utils.run([ ++ "cibadmin", "--modify", '--xml-text', "\n".join(xml_lines) ++ ]) ++ assert output == "" ++ assert retval == 0 ++ ++class NodePropertyShowTest(NodePropertyTestBase): ++ def test_empty(self): ++ self.fixture_nodes(["rh7-1", "rh7-2"]) ++ self.assert_pcs_success( ++ "property", ++ "Cluster Properties:\n" ++ ) ++ ++ def test_nonempty(self): ++ self.fixture_nodes( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", }, ++ "rh7-2": {"IP": "192.168.1.2", }, ++ } ++ ) ++ self.assert_pcs_success( ++ "property", ++ """\ ++Cluster Properties: ++Node Attributes: ++ rh7-1: IP=192.168.1.1 ++ rh7-2: IP=192.168.1.2 ++""" ++ ) ++ ++ def test_multiple_per_node(self): ++ self.fixture_nodes( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", "alias": "node1", }, ++ "rh7-2": {"IP": "192.168.1.2", "alias": "node2", }, ++ } ++ ) ++ self.assert_pcs_success( ++ "property", ++ """\ ++Cluster Properties: ++Node Attributes: ++ rh7-1: IP=192.168.1.1 alias=node1 ++ rh7-2: IP=192.168.1.2 alias=node2 ++""" ++ ) ++ ++ def test_name_filter_not_exists(self): ++ self.fixture_nodes( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", }, ++ "rh7-2": {"IP": "192.168.1.2", }, ++ } ++ ) ++ self.assert_pcs_success( ++ "property show alias", ++ """\ ++Cluster Properties: ++""" ++ ) ++ ++ def test_name_filter_exists(self): ++ self.fixture_nodes( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", "alias": "node1", }, ++ "rh7-2": {"IP": "192.168.1.2", }, ++ } ++ ) ++ self.assert_pcs_success( ++ "property show alias", ++ """\ ++Cluster Properties: ++Node Attributes: ++ rh7-1: alias=node1 ++""" ++ ) ++ ++class NodePropertySetTest(NodePropertyTestBase): ++ def test_set_new(self): ++ self.fixture_nodes(["rh7-1", "rh7-2"]) ++ self.assert_pcs_success( ++ "property set --node=rh7-1 IP=192.168.1.1" ++ ) ++ self.assert_pcs_success( ++ "property", ++ """\ ++Cluster Properties: ++Node Attributes: ++ rh7-1: IP=192.168.1.1 ++""" ++ ) ++ self.assert_pcs_success( ++ "property set --node=rh7-2 IP=192.168.1.2" ++ ) ++ self.assert_pcs_success( ++ "property", ++ """\ ++Cluster Properties: ++Node Attributes: ++ rh7-1: IP=192.168.1.1 ++ rh7-2: IP=192.168.1.2 ++""" ++ ) ++ ++ def test_set_existing(self): ++ self.fixture_nodes( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", }, ++ "rh7-2": {"IP": "192.168.1.2", }, ++ } ++ ) ++ self.assert_pcs_success( ++ "property set --node=rh7-2 IP=192.168.2.2" ++ ) ++ self.assert_pcs_success( ++ "property", ++ """\ ++Cluster Properties: ++Node Attributes: ++ rh7-1: IP=192.168.1.1 ++ rh7-2: IP=192.168.2.2 ++""" ++ ) ++ ++ def test_unset(self): ++ self.fixture_nodes( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", }, ++ "rh7-2": {"IP": "192.168.1.2", }, ++ } ++ ) ++ self.assert_pcs_success( ++ "property set --node=rh7-2 IP=" ++ ) ++ self.assert_pcs_success( ++ "property", ++ """\ ++Cluster Properties: ++Node Attributes: ++ rh7-1: IP=192.168.1.1 ++""" ++ ) ++ ++ def test_unset_nonexisting(self): ++ self.fixture_nodes( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", }, ++ "rh7-2": {"IP": "192.168.1.2", }, ++ } ++ ) ++ self.assert_pcs_result( ++ "property unset --node=rh7-1 missing", ++ "Error: attribute: 'missing' doesn't exist for node: 'rh7-1'\n", ++ returncode=2 ++ ) ++ ++ def test_unset_nonexisting_forced(self): ++ self.fixture_nodes( ++ ["rh7-1", "rh7-2"], ++ { ++ "rh7-1": {"IP": "192.168.1.1", }, ++ "rh7-2": {"IP": "192.168.1.2", }, ++ } ++ ) ++ self.assert_pcs_success( ++ "property unset --node=rh7-1 missing --force", ++ "" ++ ) +diff --git a/pcs/utils.py b/pcs/utils.py +index 981a186..c7d1759 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -1659,19 +1659,24 @@ def set_unmanaged(resource): + "is-managed", "--meta", "--parameter-value", "false"] + return run(args) + +-def get_node_attributes(): ++def get_node_attributes(filter_node=None, filter_attr=None): + node_config = get_cib_xpath("//nodes") +- nas = {} + if (node_config == ""): + err("unable to get crm_config, is pacemaker running?") + dom = parseString(node_config).documentElement ++ nas = dict() + for node in dom.getElementsByTagName("node"): + nodename = node.getAttribute("uname") ++ if filter_node is not None and nodename != filter_node: ++ continue + for attributes in node.getElementsByTagName("instance_attributes"): + for nvp in attributes.getElementsByTagName("nvpair"): ++ attr_name = nvp.getAttribute("name") ++ if filter_attr is not None and attr_name != filter_attr: ++ continue + if nodename not in nas: +- nas[nodename] = [] +- nas[nodename].append(nvp.getAttribute("name") + "=" + nvp.getAttribute("value")) ++ nas[nodename] = dict() ++ nas[nodename][attr_name] = nvp.getAttribute("value") + break + return nas + +-- +1.8.3.1 + diff --git a/SOURCES/bz1303136-01-fix-check-if-id-exists-in-cib.patch b/SOURCES/bz1303136-01-fix-check-if-id-exists-in-cib.patch new file mode 100644 index 0000000..bc71bfc --- /dev/null +++ b/SOURCES/bz1303136-01-fix-check-if-id-exists-in-cib.patch @@ -0,0 +1,116 @@ +From cb2347ad79fe30076fad1579d1f5ee27a1835963 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Fri, 22 Jul 2016 16:29:04 +0200 +Subject: [PATCH] fix check if id exists in cib + +--- + pcs/lib/cib/tools.py | 11 ++++++++++- + pcs/test/test_lib_cib_tools.py | 24 ++++++++++++++++++++++++ + pcs/utils.py | 30 +++++++++++++++++++++++++----- + 3 files changed, 59 insertions(+), 6 deletions(-) + +diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py +index b59d50d..f86b63b 100644 +--- a/pcs/lib/cib/tools.py ++++ b/pcs/lib/cib/tools.py +@@ -21,7 +21,16 @@ def does_id_exist(tree, check_id): + tree cib etree node + check_id id to check + """ +- return tree.find('.//*[@id="{0}"]'.format(check_id)) is not None ++ # ElementTree has getroot, Elemet has getroottree ++ root = tree.getroot() if hasattr(tree, "getroot") else tree.getroottree() ++ # do not search in /cib/status, it may contain references to previously ++ # existing and deleted resources and thus preventing creating them again ++ existing = root.xpath( ++ '(/cib/*[name()!="status"]|/*[name()!="cib"])//*[@id="{0}"]'.format( ++ check_id ++ ) ++ ) ++ return len(existing) > 0 + + def validate_id_does_not_exist(tree, id): + """ +diff --git a/pcs/test/test_lib_cib_tools.py b/pcs/test/test_lib_cib_tools.py +index 1149a3f..e1f2313 100644 +--- a/pcs/test/test_lib_cib_tools.py ++++ b/pcs/test/test_lib_cib_tools.py +@@ -48,6 +48,30 @@ class DoesIdExistTest(CibToolsTest): + self.assertFalse(lib.does_id_exist(self.cib.tree, "myId ")) + self.assertFalse(lib.does_id_exist(self.cib.tree, "my Id")) + ++ def test_ignore_status_section(self): ++ self.cib.append_to_first_tag_name( ++ "status", ++ """\ ++<elem1 id="status-1"> ++ <elem1a id="status-1a"> ++ <elem1aa id="status-1aa"/> ++ <elem1ab id="status-1ab"/> ++ </elem1a> ++ <elem1b id="status-1b"> ++ <elem1ba id="status-1ba"/> ++ <elem1bb id="status-1bb"/> ++ </elem1b> ++</elem1> ++""" ++ ) ++ self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1")) ++ self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1a")) ++ self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1aa")) ++ self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1ab")) ++ self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1b")) ++ self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1ba")) ++ self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1bb")) ++ + class FindUniqueIdTest(CibToolsTest): + def test_already_unique(self): + self.fixture_add_primitive_with_id("myId") +diff --git a/pcs/utils.py b/pcs/utils.py +index 079d916..a7ed975 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -1589,15 +1589,35 @@ def is_valid_cib_scope(scope): + # Checks to see if id exists in the xml dom passed + # DEPRECATED use lxml version available in pcs.lib.cib.tools + def does_id_exist(dom, check_id): ++ # do not search in /cib/status, it may contain references to previously ++ # existing and deleted resources and thus preventing creating them again + if is_etree(dom): +- for elem in dom.findall(str(".//*")): ++ for elem in dom.findall(str( ++ '(/cib/*[name()!="status"]|/*[name()!="cib"])/*' ++ )): + if elem.get("id") == check_id: + return True + else: +- all_elem = dom.getElementsByTagName("*") +- for elem in all_elem: +- if elem.getAttribute("id") == check_id: +- return True ++ document = ( ++ dom ++ if isinstance(dom, xml.dom.minidom.Document) ++ else dom.ownerDocument ++ ) ++ cib_found = False ++ for cib in dom_get_children_by_tag_name(document, "cib"): ++ cib_found = True ++ for section in cib.childNodes: ++ if section.nodeType != xml.dom.minidom.Node.ELEMENT_NODE: ++ continue ++ if section.tagName == "status": ++ continue ++ for elem in section.getElementsByTagName("*"): ++ if elem.getAttribute("id") == check_id: ++ return True ++ if not cib_found: ++ for elem in document.getElementsByTagName("*"): ++ if elem.getAttribute("id") == check_id: ++ return True + return False + + # Returns check_id if it doesn't exist in the dom, otherwise it adds an integer +-- +1.8.3.1 + diff --git a/SOURCES/bz1305049-01-pcs-does-not-support-ticket-constraints.patch b/SOURCES/bz1305049-01-pcs-does-not-support-ticket-constraints.patch new file mode 100644 index 0000000..38e1b01 --- /dev/null +++ b/SOURCES/bz1305049-01-pcs-does-not-support-ticket-constraints.patch @@ -0,0 +1,351 @@ +From be8876832da345e7e16827bd6c50e262380d6979 Mon Sep 17 00:00:00 2001 +From: Ivan Devat <idevat@redhat.com> +Date: Wed, 14 Sep 2016 09:04:57 +0200 +Subject: [PATCH] squash bz1305049 pcs does not support "ticket" con + +d147ba4a51d0 do not use suffix "no-role" in ticket constraints + +066cf217ec45 add constraint ticket remove command +--- + pcs/cli/common/lib_wrapper.py | 1 + + pcs/cli/constraint_ticket/command.py | 6 ++ + pcs/cli/constraint_ticket/test/test_command.py | 22 +++++++ + pcs/constraint.py | 1 + + pcs/lib/cib/constraint/ticket.py | 29 ++++++++- + pcs/lib/cib/test/test_constraint_ticket.py | 89 +++++++++++++++++++++++++- + pcs/lib/commands/constraint/ticket.py | 12 ++++ + pcs/pcs.8 | 3 + + pcs/test/test_constraints.py | 36 +++++++++++ + pcs/usage.py | 3 + + 10 files changed, 199 insertions(+), 3 deletions(-) + +diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py +index 94a1311..99bfe35 100644 +--- a/pcs/cli/common/lib_wrapper.py ++++ b/pcs/cli/common/lib_wrapper.py +@@ -132,6 +132,7 @@ def load_module(env, middleware_factory, name): + 'set': constraint_ticket.create_with_set, + 'show': constraint_ticket.show, + 'add': constraint_ticket.create, ++ 'remove': constraint_ticket.remove, + } + ) + +diff --git a/pcs/cli/constraint_ticket/command.py b/pcs/cli/constraint_ticket/command.py +index ab70434..0ed4fdd 100644 +--- a/pcs/cli/constraint_ticket/command.py ++++ b/pcs/cli/constraint_ticket/command.py +@@ -52,6 +52,12 @@ def add(lib, argv, modificators): + duplication_alowed=modificators["force"], + ) + ++def remove(lib, argv, modificators): ++ if len(argv) != 2: ++ raise CmdLineInputError() ++ ticket, resource_id = argv ++ lib.constraint_ticket.remove(ticket, resource_id) ++ + def show(lib, argv, modificators): + """ + show all ticket constraints +diff --git a/pcs/cli/constraint_ticket/test/test_command.py b/pcs/cli/constraint_ticket/test/test_command.py +index d40d421..9ca7817 100644 +--- a/pcs/cli/constraint_ticket/test/test_command.py ++++ b/pcs/cli/constraint_ticket/test/test_command.py +@@ -65,3 +65,25 @@ class AddTest(TestCase): + resource_in_clone_alowed=True, + duplication_alowed=True, + ) ++ ++class RemoveTest(TestCase): ++ def test_refuse_args_count(self): ++ self.assertRaises(CmdLineInputError, lambda: command.remove( ++ mock.MagicMock(), ++ ["TICKET"], ++ {}, ++ )) ++ self.assertRaises(CmdLineInputError, lambda: command.remove( ++ mock.MagicMock(), ++ ["TICKET", "RESOURCE", "SOMETHING_ELSE"], ++ {}, ++ )) ++ ++ def test_call_library_remove_with_correct_attrs(self): ++ lib = mock.MagicMock( ++ constraint_ticket=mock.MagicMock(remove=mock.Mock()) ++ ) ++ command.remove(lib, ["TICKET", "RESOURCE"], {}) ++ lib.constraint_ticket.remove.assert_called_once_with( ++ "TICKET", "RESOURCE", ++ ) +diff --git a/pcs/constraint.py b/pcs/constraint.py +index e32f1a3..d8415b6 100644 +--- a/pcs/constraint.py ++++ b/pcs/constraint.py +@@ -90,6 +90,7 @@ def constraint_cmd(argv): + command_map = { + "set": ticket_command.create_with_set, + "add": ticket_command.add, ++ "remove": ticket_command.remove, + "show": ticket_command.show, + } + sub_command = argv[0] if argv else "show" +diff --git a/pcs/lib/cib/constraint/ticket.py b/pcs/lib/cib/constraint/ticket.py +index 4154aac..c708794 100644 +--- a/pcs/lib/cib/constraint/ticket.py ++++ b/pcs/lib/cib/constraint/ticket.py +@@ -39,7 +39,8 @@ def _validate_options_common(options): + def _create_id(cib, ticket, resource_id, resource_role): + return tools.find_unique_id( + cib, +- "-".join(('ticket', ticket, resource_id, resource_role)) ++ "-".join(('ticket', ticket, resource_id)) ++ +("-{0}".format(resource_role) if resource_role else "") + ) + + def prepare_options_with_set(cib, options, resource_set_list): +@@ -93,7 +94,7 @@ def prepare_options_plain(cib, options, ticket, resource_id): + cib, + options["ticket"], + resource_id, +- options["rsc-role"] if "rsc-role" in options else "no-role" ++ options.get("rsc-role", "") + ), + partial(tools.check_new_id_applicable, cib, DESCRIPTION) + ) +@@ -103,6 +104,30 @@ def create_plain(constraint_section, options): + element.attrib.update(options) + return element + ++def remove_plain(constraint_section, ticket_key, resource_id): ++ ticket_element_list = constraint_section.xpath( ++ './/rsc_ticket[@ticket="{0}" and @rsc="{1}"]' ++ .format(ticket_key, resource_id) ++ ) ++ ++ for ticket_element in ticket_element_list: ++ ticket_element.getparent().remove(ticket_element) ++ ++def remove_with_resource_set(constraint_section, ticket_key, resource_id): ++ ref_element_list = constraint_section.xpath( ++ './/rsc_ticket[@ticket="{0}"]/resource_set/resource_ref[@id="{1}"]' ++ .format(ticket_key, resource_id) ++ ) ++ ++ for ref_element in ref_element_list: ++ set_element = ref_element.getparent() ++ set_element.remove(ref_element) ++ if not len(set_element): ++ ticket_element = set_element.getparent() ++ ticket_element.remove(set_element) ++ if not len(ticket_element): ++ ticket_element.getparent().remove(ticket_element) ++ + def are_duplicate_plain(element, other_element): + return all( + element.attrib.get(name, "") == other_element.attrib.get(name, "") +diff --git a/pcs/lib/cib/test/test_constraint_ticket.py b/pcs/lib/cib/test/test_constraint_ticket.py +index ede748e..d3da004 100644 +--- a/pcs/lib/cib/test/test_constraint_ticket.py ++++ b/pcs/lib/cib/test/test_constraint_ticket.py +@@ -8,10 +8,15 @@ from __future__ import ( + from functools import partial + from pcs.test.tools.pcs_unittest import TestCase + ++from lxml import etree ++ + from pcs.common import report_codes + from pcs.lib.cib.constraint import ticket + from pcs.lib.errors import ReportItemSeverity as severities +-from pcs.test.tools.assertions import assert_raise_library_error ++from pcs.test.tools.assertions import ( ++ assert_raise_library_error, ++ assert_xml_equal, ++) + from pcs.test.tools.pcs_unittest import mock + + +@@ -306,3 +311,85 @@ class AreDuplicateWithResourceSet(TestCase): + Element({"ticket": "ticket_key"}), + Element({"ticket": "X"}), + )) ++ ++class RemovePlainTest(TestCase): ++ def test_remove_tickets_constraints_for_resource(self): ++ constraint_section = etree.fromstring(""" ++ <constraints> ++ <rsc_ticket id="t1" ticket="tA" rsc="rA"/> ++ <rsc_ticket id="t2" ticket="tA" rsc="rB"/> ++ <rsc_ticket id="t3" ticket="tA" rsc="rA"/> ++ <rsc_ticket id="t4" ticket="tB" rsc="rA"/> ++ <rsc_ticket id="t5" ticket="tB" rsc="rB"/> ++ </constraints> ++ """) ++ ++ ticket.remove_plain( ++ constraint_section, ++ ticket_key="tA", ++ resource_id="rA", ++ ) ++ ++ assert_xml_equal(etree.tostring(constraint_section).decode(), """ ++ <constraints> ++ <rsc_ticket id="t2" ticket="tA" rsc="rB"/> ++ <rsc_ticket id="t4" ticket="tB" rsc="rA"/> ++ <rsc_ticket id="t5" ticket="tB" rsc="rB"/> ++ </constraints> ++ """) ++ ++class RemoveWithSetTest(TestCase): ++ def test_remove_resource_references_and_empty_remaining_parents(self): ++ constraint_section = etree.fromstring(""" ++ <constraints> ++ <rsc_ticket id="t1" ticket="tA"> ++ <resource_set id="rs1"> ++ <resource_ref id="rA"/> ++ </resource_set> ++ <resource_set id="rs2"> ++ <resource_ref id="rA"/> ++ </resource_set> ++ </rsc_ticket> ++ ++ <rsc_ticket id="t2" ticket="tA"> ++ <resource_set id="rs3"> ++ <resource_ref id="rA"/> ++ <resource_ref id="rB"/> ++ </resource_set> ++ <resource_set id="rs4"> ++ <resource_ref id="rA"/> ++ </resource_set> ++ </rsc_ticket> ++ ++ <rsc_ticket id="t3" ticket="tB"> ++ <resource_set id="rs5"> ++ <resource_ref id="rA"/> ++ </resource_set> ++ </rsc_ticket> ++ </constraints> ++ """) ++ ++ ticket.remove_with_resource_set( ++ constraint_section, ++ ticket_key="tA", ++ resource_id="rA" ++ ) ++ ++ assert_xml_equal( ++ """ ++ <constraints> ++ <rsc_ticket id="t2" ticket="tA"> ++ <resource_set id="rs3"> ++ <resource_ref id="rB"/> ++ </resource_set> ++ </rsc_ticket> ++ ++ <rsc_ticket id="t3" ticket="tB"> ++ <resource_set id="rs5"> ++ <resource_ref id="rA"/> ++ </resource_set> ++ </rsc_ticket> ++ </constraints> ++ """, ++ etree.tostring(constraint_section).decode() ++ ) +diff --git a/pcs/lib/commands/constraint/ticket.py b/pcs/lib/commands/constraint/ticket.py +index e6960d5..2ea7afc 100644 +--- a/pcs/lib/commands/constraint/ticket.py ++++ b/pcs/lib/commands/constraint/ticket.py +@@ -68,3 +68,15 @@ def create( + ) + + env.push_cib(cib) ++ ++def remove(env, ticket_key, resource_id): ++ """ ++ remove all ticket constraint from resource ++ If resource is in resource set with another resources then only resource ref ++ is removed. If resource is alone in resource set whole constraint is removed. ++ """ ++ cib = env.get_cib() ++ constraint_section = get_constraints(cib) ++ ticket.remove_plain(constraint_section, ticket_key, resource_id) ++ ticket.remove_with_resource_set(constraint_section, ticket_key, resource_id) ++ env.push_cib(cib) +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 61abe67..40b146f 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -490,6 +490,9 @@ Create a ticket constraint for <resource id>. Available option is loss-policy=fe + ticket set <resource1> [resourceN]... [options] [set <resourceX> ... [options]] [setoptions [constraint_options]] + Create a ticket constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Required constraint option is ticket=<ticket>. Optional constraint options are id=<constraint-id> and loss-policy=fence/stop/freeze/demote. + .TP ++ticket remove <ticket> <resource id> ++Remove all ticket constraints with <ticket> from <resource id>. ++.TP + remove [constraint id]... + Remove constraint(s) or constraint rules with the specified id(s). + .TP +diff --git a/pcs/test/test_constraints.py b/pcs/test/test_constraints.py +index 7c76e09..4007e90 100644 +--- a/pcs/test/test_constraints.py ++++ b/pcs/test/test_constraints.py +@@ -2686,6 +2686,42 @@ class TicketAdd(ConstraintBaseTest): + " Master A loss-policy=fence ticket=T", + ]) + ++class TicketRemoveTest(ConstraintBaseTest): ++ def test_remove_multiple_tickets(self): ++ #fixture ++ self.assert_pcs_success('constraint ticket add T A') ++ self.assert_pcs_success( ++ 'constraint ticket add T A --force', ++ stdout_full=[ ++ "Warning: duplicate constraint already exists", ++ " A ticket=T (id:ticket-T-A)" ++ ] ++ ) ++ self.assert_pcs_success( ++ 'constraint ticket set A B setoptions ticket=T' ++ ) ++ self.assert_pcs_success( ++ 'constraint ticket set A setoptions ticket=T' ++ ) ++ self.assert_pcs_success("constraint ticket show", stdout_full=[ ++ "Ticket Constraints:", ++ " A ticket=T", ++ " A ticket=T", ++ " Resource Sets:", ++ " set A B setoptions ticket=T", ++ " set A setoptions ticket=T", ++ ]) ++ ++ #test ++ self.assert_pcs_success("constraint ticket remove T A") ++ ++ self.assert_pcs_success("constraint ticket show", stdout_full=[ ++ "Ticket Constraints:", ++ " Resource Sets:", ++ " set B setoptions ticket=T", ++ ]) ++ ++ + class TicketShow(ConstraintBaseTest): + def test_show_set(self): + self.assert_pcs_success('constraint ticket set A B setoptions ticket=T') +diff --git a/pcs/usage.py b/pcs/usage.py +index 9d4617f..764e3fc 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -1011,6 +1011,9 @@ Commands: + Required constraint option is ticket=<ticket>. Optional constraint + options are id=<constraint-id> and loss-policy=fence/stop/freeze/demote. + ++ ticket remove <ticket> <resource id> ++ Remove all ticket constraints with <ticket> from <resource id>. ++ + remove [constraint id]... + Remove constraint(s) or constraint rules with the specified id(s). + +-- +1.8.3.1 + diff --git a/SOURCES/bz1305049-02-pcs-does-not-support-ticket-constraints.patch b/SOURCES/bz1305049-02-pcs-does-not-support-ticket-constraints.patch new file mode 100644 index 0000000..6655172 --- /dev/null +++ b/SOURCES/bz1305049-02-pcs-does-not-support-ticket-constraints.patch @@ -0,0 +1,287 @@ +From deac91b1fc74065d01342420accfd1af88237237 Mon Sep 17 00:00:00 2001 +From: Ivan Devat <idevat@redhat.com> +Date: Tue, 20 Sep 2016 08:20:29 +0200 +Subject: [PATCH] squash bz1305049 pcs does not support "ticket" con + +07ae6704fff5 fail when no matching ticket constraint for remove + +d25ed3d9bc65 fix help for constraint ticket commands + +66b91ba0da7e fix manpage for booth ticket add command + +2710bc2e15c2 fix manpage for constraint ticket set +--- + pcs/cli/constraint_ticket/command.py | 4 ++- + pcs/lib/cib/constraint/ticket.py | 4 +++ + pcs/lib/cib/test/test_constraint_ticket.py | 53 +++++++++++++++++++++++++++--- + pcs/lib/commands/constraint/ticket.py | 15 +++++++-- + pcs/lib/commands/test/test_ticket.py | 20 +++++++++++ + pcs/pcs.8 | 6 ++-- + pcs/test/test_constraints.py | 8 +++++ + pcs/usage.py | 10 +++--- + 8 files changed, 105 insertions(+), 15 deletions(-) + +diff --git a/pcs/cli/constraint_ticket/command.py b/pcs/cli/constraint_ticket/command.py +index 0ed4fdd..583ba9e 100644 +--- a/pcs/cli/constraint_ticket/command.py ++++ b/pcs/cli/constraint_ticket/command.py +@@ -8,6 +8,7 @@ from __future__ import ( + from pcs.cli.common.errors import CmdLineInputError + from pcs.cli.constraint import command + from pcs.cli.constraint_ticket import parse_args, console_report ++from pcs.cli.common.console_report import error + + def create_with_set(lib, argv, modificators): + """ +@@ -56,7 +57,8 @@ def remove(lib, argv, modificators): + if len(argv) != 2: + raise CmdLineInputError() + ticket, resource_id = argv +- lib.constraint_ticket.remove(ticket, resource_id) ++ if not lib.constraint_ticket.remove(ticket, resource_id): ++ raise error("no matching ticket constraint found") + + def show(lib, argv, modificators): + """ +diff --git a/pcs/lib/cib/constraint/ticket.py b/pcs/lib/cib/constraint/ticket.py +index c708794..85d045c 100644 +--- a/pcs/lib/cib/constraint/ticket.py ++++ b/pcs/lib/cib/constraint/ticket.py +@@ -113,6 +113,8 @@ def remove_plain(constraint_section, ticket_key, resource_id): + for ticket_element in ticket_element_list: + ticket_element.getparent().remove(ticket_element) + ++ return len(ticket_element_list) > 0 ++ + def remove_with_resource_set(constraint_section, ticket_key, resource_id): + ref_element_list = constraint_section.xpath( + './/rsc_ticket[@ticket="{0}"]/resource_set/resource_ref[@id="{1}"]' +@@ -128,6 +130,8 @@ def remove_with_resource_set(constraint_section, ticket_key, resource_id): + if not len(ticket_element): + ticket_element.getparent().remove(ticket_element) + ++ return len(ref_element_list) > 0 ++ + def are_duplicate_plain(element, other_element): + return all( + element.attrib.get(name, "") == other_element.attrib.get(name, "") +diff --git a/pcs/lib/cib/test/test_constraint_ticket.py b/pcs/lib/cib/test/test_constraint_ticket.py +index d3da004..b720b55 100644 +--- a/pcs/lib/cib/test/test_constraint_ticket.py ++++ b/pcs/lib/cib/test/test_constraint_ticket.py +@@ -324,11 +324,34 @@ class RemovePlainTest(TestCase): + </constraints> + """) + +- ticket.remove_plain( ++ self.assertTrue(ticket.remove_plain( + constraint_section, + ticket_key="tA", + resource_id="rA", +- ) ++ )) ++ ++ assert_xml_equal(etree.tostring(constraint_section).decode(), """ ++ <constraints> ++ <rsc_ticket id="t2" ticket="tA" rsc="rB"/> ++ <rsc_ticket id="t4" ticket="tB" rsc="rA"/> ++ <rsc_ticket id="t5" ticket="tB" rsc="rB"/> ++ </constraints> ++ """) ++ ++ def test_remove_nothing_when_no_matching_found(self): ++ constraint_section = etree.fromstring(""" ++ <constraints> ++ <rsc_ticket id="t2" ticket="tA" rsc="rB"/> ++ <rsc_ticket id="t4" ticket="tB" rsc="rA"/> ++ <rsc_ticket id="t5" ticket="tB" rsc="rB"/> ++ </constraints> ++ """) ++ ++ self.assertFalse(ticket.remove_plain( ++ constraint_section, ++ ticket_key="tA", ++ resource_id="rA", ++ )) + + assert_xml_equal(etree.tostring(constraint_section).decode(), """ + <constraints> +@@ -369,11 +392,11 @@ class RemoveWithSetTest(TestCase): + </constraints> + """) + +- ticket.remove_with_resource_set( ++ self.assertTrue(ticket.remove_with_resource_set( + constraint_section, + ticket_key="tA", + resource_id="rA" +- ) ++ )) + + assert_xml_equal( + """ +@@ -393,3 +416,25 @@ class RemoveWithSetTest(TestCase): + """, + etree.tostring(constraint_section).decode() + ) ++ ++ def test_remove_nothing_when_no_matching_found(self): ++ constraint_section = etree.fromstring(""" ++ <constraints> ++ <rsc_ticket id="t2" ticket="tA"> ++ <resource_set id="rs3"> ++ <resource_ref id="rB"/> ++ </resource_set> ++ </rsc_ticket> ++ ++ <rsc_ticket id="t3" ticket="tB"> ++ <resource_set id="rs5"> ++ <resource_ref id="rA"/> ++ </resource_set> ++ </rsc_ticket> ++ </constraints> ++ """) ++ self.assertFalse(ticket.remove_with_resource_set( ++ constraint_section, ++ ticket_key="tA", ++ resource_id="rA" ++ )) +diff --git a/pcs/lib/commands/constraint/ticket.py b/pcs/lib/commands/constraint/ticket.py +index 2ea7afc..a14c5ad 100644 +--- a/pcs/lib/commands/constraint/ticket.py ++++ b/pcs/lib/commands/constraint/ticket.py +@@ -77,6 +77,17 @@ def remove(env, ticket_key, resource_id): + """ + cib = env.get_cib() + constraint_section = get_constraints(cib) +- ticket.remove_plain(constraint_section, ticket_key, resource_id) +- ticket.remove_with_resource_set(constraint_section, ticket_key, resource_id) ++ any_plain_removed = ticket.remove_plain( ++ constraint_section, ++ ticket_key, ++ resource_id ++ ) ++ any_with_resource_set_removed = ticket.remove_with_resource_set( ++ constraint_section, ++ ticket_key, ++ resource_id ++ ) ++ + env.push_cib(cib) ++ ++ return any_plain_removed or any_with_resource_set_removed +diff --git a/pcs/lib/commands/test/test_ticket.py b/pcs/lib/commands/test/test_ticket.py +index 586ca4b..edf592a 100644 +--- a/pcs/lib/commands/test/test_ticket.py ++++ b/pcs/lib/commands/test/test_ticket.py +@@ -6,6 +6,8 @@ from __future__ import ( + ) + + from pcs.test.tools.pcs_unittest import TestCase ++from pcs.test.tools.pcs_unittest import mock ++from pcs.test.tools.misc import create_patcher + + from pcs.common import report_codes + from pcs.lib.commands.constraint import ticket as ticket_command +@@ -18,6 +20,7 @@ from pcs.test.tools.assertions import ( + from pcs.test.tools.misc import get_test_resource as rc + from pcs.test.tools.xml import get_xml_manipulation_creator_from_file + ++patch_commands = create_patcher("pcs.lib.commands.constraint.ticket") + + class CreateTest(TestCase): + def setUp(self): +@@ -65,3 +68,20 @@ class CreateTest(TestCase): + {"resource_id": "resourceA"}, + ), + ) ++ ++@patch_commands("get_constraints", mock.Mock) ++class RemoveTest(TestCase): ++ @patch_commands("ticket.remove_plain", mock.Mock(return_value=1)) ++ @patch_commands("ticket.remove_with_resource_set",mock.Mock(return_value=0)) ++ def test_successfully_remove_plain(self): ++ self.assertTrue(ticket_command.remove(mock.MagicMock(), "T", "R")) ++ ++ @patch_commands("ticket.remove_plain", mock.Mock(return_value=0)) ++ @patch_commands("ticket.remove_with_resource_set",mock.Mock(return_value=1)) ++ def test_successfully_remove_with_resource_set(self): ++ self.assertTrue(ticket_command.remove(mock.MagicMock(), "T", "R")) ++ ++ @patch_commands("ticket.remove_plain", mock.Mock(return_value=0)) ++ @patch_commands("ticket.remove_with_resource_set",mock.Mock(return_value=0)) ++ def test_raises_library_error_when_no_matching_constraint_found(self): ++ self.assertFalse(ticket_command.remove(mock.MagicMock(), "T", "R")) +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 40b146f..1efe8f4 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -484,10 +484,10 @@ Remove colocation constraints with specified resources. + ticket [show] [\fB\-\-full\fR] + List all current ticket constraints (if \fB\-\-full\fR is specified show the internal constraint id's as well). + .TP +-ticket add <ticket> [<role>] <resource id> [options] [id=constraint\-id] ++ticket add <ticket> [<role>] <resource id> [<options>] [id=<constraint\-id>] + Create a ticket constraint for <resource id>. Available option is loss-policy=fence/stop/freeze/demote. A role can be master, slave, started or stopped. + .TP +-ticket set <resource1> [resourceN]... [options] [set <resourceX> ... [options]] [setoptions [constraint_options]] ++ticket set <resource1> [<resourceN>]... [<options>] [set <resourceX> ... [<options>]] setoptions <constraint_options> + Create a ticket constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Required constraint option is ticket=<ticket>. Optional constraint options are id=<constraint-id> and loss-policy=fence/stop/freeze/demote. + .TP + ticket remove <ticket> <resource id> +@@ -587,7 +587,7 @@ Write new booth configuration with specified sites and arbitrators. Total numbe + destroy + Remove booth configuration files. + .TP +-ticket add <ticket> ++ticket add <ticket> [<name>=<value> ...] + Add new ticket to the current configuration. Ticket options are specified in booth manpage. + + .TP +diff --git a/pcs/test/test_constraints.py b/pcs/test/test_constraints.py +index 4007e90..fee7093 100644 +--- a/pcs/test/test_constraints.py ++++ b/pcs/test/test_constraints.py +@@ -2721,6 +2721,14 @@ class TicketRemoveTest(ConstraintBaseTest): + " set B setoptions ticket=T", + ]) + ++ def test_fail_when_no_matching_ticket_constraint_here(self): ++ self.assert_pcs_success("constraint ticket show", stdout_full=[ ++ "Ticket Constraints:", ++ ]) ++ self.assert_pcs_fail("constraint ticket remove T A", [ ++ "Error: no matching ticket constraint found" ++ ]) ++ + + class TicketShow(ConstraintBaseTest): + def test_show_set(self): +diff --git a/pcs/usage.py b/pcs/usage.py +index 764e3fc..ea407c3 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -996,15 +996,15 @@ Commands: + List all current ticket constraints (if --full is specified show + the internal constraint id's as well). + +- ticket add <ticket> [<role>] <resource id> [options] +- [id=constraint-id] ++ ticket add <ticket> [<role>] <resource id> [<options>] ++ [id=<constraint-id>] + Create a ticket constraint for <resource id>. + Available option is loss-policy=fence/stop/freeze/demote. + A role can be master, slave, started or stopped. + +- ticket set <resource1> [resourceN]... [options] +- [set <resourceX> ... [options]] +- [setoptions [constraint_options]] ++ ticket set <resource1> [<resourceN>]... [<options>] ++ [set <resourceX> ... [<options>]] ++ setoptions <constraint_options> + Create a ticket constraint with a resource set. + Available options are sequential=true/false, require-all=true/false, + action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. +-- +1.8.3.1 + diff --git a/SOURCES/bz1308514-01-add-booth-support.patch b/SOURCES/bz1308514-01-add-booth-support.patch new file mode 100644 index 0000000..c096bbe --- /dev/null +++ b/SOURCES/bz1308514-01-add-booth-support.patch @@ -0,0 +1,8684 @@ +From 2c8d74653e3217ba1458d65854e3a448fcedfc5d Mon Sep 17 00:00:00 2001 +From: Ivan Devat <idevat@redhat.com> +Date: Tue, 28 Jun 2016 15:36:30 +0200 +Subject: [PATCH] add booth support + +--- + pcs/alert.py | 22 +- + pcs/app.py | 7 + + pcs/booth.py | 76 ++ + pcs/cli/booth/__init__.py | 0 + pcs/cli/booth/command.py | 177 ++++ + pcs/cli/booth/env.py | 121 +++ + pcs/cli/booth/test/__init__.py | 0 + pcs/cli/booth/test/test_command.py | 44 + + pcs/cli/booth/test/test_env.py | 118 +++ + pcs/cli/common/console_report.py | 13 +- + pcs/cli/common/env.py | 2 + + pcs/cli/common/lib_wrapper.py | 78 +- + pcs/cli/common/middleware.py | 9 +- + pcs/cli/common/parse_args.py | 27 + + pcs/cli/common/test/test_lib_wrapper.py | 28 +- + pcs/cli/common/test/test_middleware.py | 6 +- + pcs/cli/common/test/test_parse_args.py | 84 +- + pcs/cluster.py | 10 + + pcs/common/env_file_role_codes.py | 9 + + pcs/common/report_codes.py | 40 + + pcs/common/test/__init__.py | 0 + pcs/common/tools.py | 5 + + pcs/lib/booth/__init__.py | 0 + pcs/lib/booth/config_exchange.py | 43 + + pcs/lib/booth/config_files.py | 97 +++ + pcs/lib/booth/config_parser.py | 90 ++ + pcs/lib/booth/config_structure.py | 111 +++ + pcs/lib/booth/env.py | 149 ++++ + pcs/lib/booth/reports.py | 409 +++++++++ + pcs/lib/booth/resource.py | 116 +++ + pcs/lib/booth/status.py | 41 + + pcs/lib/booth/sync.py | 208 +++++ + pcs/lib/booth/test/__init__.py | 0 + pcs/lib/booth/test/test_config_exchange.py | 70 ++ + pcs/lib/booth/test/test_config_files.py | 272 ++++++ + pcs/lib/booth/test/test_config_parser.py | 169 ++++ + pcs/lib/booth/test/test_config_structure.py | 224 +++++ + pcs/lib/booth/test/test_env.py | 228 +++++ + pcs/lib/booth/test/test_resource.py | 203 +++++ + pcs/lib/booth/test/test_status.py | 137 +++ + pcs/lib/booth/test/test_sync.py | 1215 +++++++++++++++++++++++++++ + pcs/lib/cib/tools.py | 7 + + pcs/lib/commands/booth.py | 349 ++++++++ + pcs/lib/commands/test/test_booth.py | 614 ++++++++++++++ + pcs/lib/commands/test/test_ticket.py | 15 +- + pcs/lib/corosync/live.py | 3 + + pcs/lib/env.py | 44 +- + pcs/lib/env_file.py | 122 +++ + pcs/lib/errors.py | 14 + + pcs/lib/external.py | 66 +- + pcs/lib/reports.py | 215 ++++- + pcs/lib/test/misc.py | 20 + + pcs/lib/test/test_env_file.py | 187 +++++ + pcs/lib/test/test_errors.py | 20 + + pcs/pcs.8 | 52 ++ + pcs/resource.py | 19 +- + pcs/settings_default.py | 2 + + pcs/stonith.py | 3 +- + pcs/test/resources/.gitignore | 1 + + pcs/test/resources/tmp_keyfile | 1 + + pcs/test/suite.py | 16 +- + pcs/test/test_alert.py | 8 +- + pcs/test/test_booth.py | 342 ++++++++ + pcs/test/test_lib_cib_tools.py | 21 + + pcs/test/test_lib_external.py | 86 ++ + pcs/test/tools/color_text_runner.py | 9 +- + pcs/test/tools/pcs_unittest.py | 7 + + pcs/usage.py | 72 ++ + pcs/utils.py | 68 +- + pcsd/pcs.rb | 76 +- + pcsd/remote.rb | 144 ++++ + pcsd/settings.rb | 1 + + 72 files changed, 7093 insertions(+), 169 deletions(-) + create mode 100644 pcs/booth.py + create mode 100644 pcs/cli/booth/__init__.py + create mode 100644 pcs/cli/booth/command.py + create mode 100644 pcs/cli/booth/env.py + create mode 100644 pcs/cli/booth/test/__init__.py + create mode 100644 pcs/cli/booth/test/test_command.py + create mode 100644 pcs/cli/booth/test/test_env.py + create mode 100644 pcs/common/env_file_role_codes.py + create mode 100644 pcs/common/test/__init__.py + create mode 100644 pcs/lib/booth/__init__.py + create mode 100644 pcs/lib/booth/config_exchange.py + create mode 100644 pcs/lib/booth/config_files.py + create mode 100644 pcs/lib/booth/config_parser.py + create mode 100644 pcs/lib/booth/config_structure.py + create mode 100644 pcs/lib/booth/env.py + create mode 100644 pcs/lib/booth/reports.py + create mode 100644 pcs/lib/booth/resource.py + create mode 100644 pcs/lib/booth/status.py + create mode 100644 pcs/lib/booth/sync.py + create mode 100644 pcs/lib/booth/test/__init__.py + create mode 100644 pcs/lib/booth/test/test_config_exchange.py + create mode 100644 pcs/lib/booth/test/test_config_files.py + create mode 100644 pcs/lib/booth/test/test_config_parser.py + create mode 100644 pcs/lib/booth/test/test_config_structure.py + create mode 100644 pcs/lib/booth/test/test_env.py + create mode 100644 pcs/lib/booth/test/test_resource.py + create mode 100644 pcs/lib/booth/test/test_status.py + create mode 100644 pcs/lib/booth/test/test_sync.py + create mode 100644 pcs/lib/commands/booth.py + create mode 100644 pcs/lib/commands/test/test_booth.py + create mode 100644 pcs/lib/env_file.py + create mode 100644 pcs/lib/test/misc.py + create mode 100644 pcs/lib/test/test_env_file.py + create mode 100644 pcs/lib/test/test_errors.py + create mode 100644 pcs/test/resources/tmp_keyfile + create mode 100644 pcs/test/test_booth.py + create mode 100644 pcs/test/tools/pcs_unittest.py + +diff --git a/pcs/alert.py b/pcs/alert.py +index 4786f57..693bb8d 100644 +--- a/pcs/alert.py ++++ b/pcs/alert.py +@@ -6,16 +6,18 @@ from __future__ import ( + ) + + import sys ++from functools import partial + + from pcs import ( + usage, + utils, + ) + from pcs.cli.common.errors import CmdLineInputError +-from pcs.cli.common.parse_args import prepare_options ++from pcs.cli.common.parse_args import prepare_options, group_by_keywords + from pcs.cli.common.console_report import indent + from pcs.lib.errors import LibraryError + ++parse_cmd_sections = partial(group_by_keywords, implicit_first_keyword="main") + + def alert_cmd(*args): + argv = args[1] +@@ -67,16 +69,6 @@ def recipient_cmd(*args): + ) + + +-def parse_cmd_sections(arg_list, section_list): +- output = dict([(section, []) for section in section_list + ["main"]]) +- cur_section = "main" +- for arg in arg_list: +- if arg in section_list: +- cur_section = arg +- continue +- output[cur_section].append(arg) +- +- return output + + + def ensure_only_allowed_options(parameter_dict, allowed_list): +@@ -91,7 +83,7 @@ def alert_add(lib, argv, modifiers): + if not argv: + raise CmdLineInputError() + +- sections = parse_cmd_sections(argv, ["options", "meta"]) ++ sections = parse_cmd_sections(argv, set(["options", "meta"])) + main_args = prepare_options(sections["main"]) + ensure_only_allowed_options(main_args, ["id", "description", "path"]) + +@@ -110,7 +102,7 @@ def alert_update(lib, argv, modifiers): + + alert_id = argv[0] + +- sections = parse_cmd_sections(argv[1:], ["options", "meta"]) ++ sections = parse_cmd_sections(argv[1:], set(["options", "meta"])) + main_args = prepare_options(sections["main"]) + ensure_only_allowed_options(main_args, ["description", "path"]) + +@@ -137,7 +129,7 @@ def recipient_add(lib, argv, modifiers): + alert_id = argv[0] + recipient_value = argv[1] + +- sections = parse_cmd_sections(argv[2:], ["options", "meta"]) ++ sections = parse_cmd_sections(argv[2:], set(["options", "meta"])) + main_args = prepare_options(sections["main"]) + ensure_only_allowed_options(main_args, ["description", "id"]) + +@@ -158,7 +150,7 @@ def recipient_update(lib, argv, modifiers): + + recipient_id = argv[0] + +- sections = parse_cmd_sections(argv[1:], ["options", "meta"]) ++ sections = parse_cmd_sections(argv[1:], set(["options", "meta"])) + main_args = prepare_options(sections["main"]) + ensure_only_allowed_options(main_args, ["description", "value"]) + +diff --git a/pcs/app.py b/pcs/app.py +index 3758ee4..ab9e970 100644 +--- a/pcs/app.py ++++ b/pcs/app.py +@@ -13,6 +13,7 @@ logging.basicConfig() + + from pcs import ( + acl, ++ booth, + cluster, + config, + constraint, +@@ -97,6 +98,7 @@ def main(argv=None): + "token=", "token_coefficient=", "consensus=", "join=", + "miss_count_const=", "fail_recv_const=", + "corosync_conf=", "cluster_conf=", ++ "booth-conf=", "booth-key=", + "remote", "watchdog=", + #in pcs status - do not display resorce status on inactive node + "hide-inactive", +@@ -199,6 +201,11 @@ def main(argv=None): + args, + utils.get_modificators() + ), ++ "booth": lambda argv: booth.booth_cmd( ++ utils.get_library_wrapper(), ++ argv, ++ utils.get_modificators() ++ ), + } + if command not in cmd_map: + usage.main() +diff --git a/pcs/booth.py b/pcs/booth.py +new file mode 100644 +index 0000000..764dcd8 +--- /dev/null ++++ b/pcs/booth.py +@@ -0,0 +1,76 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import sys ++ ++from pcs import usage ++from pcs import utils ++from pcs.cli.booth import command ++from pcs.cli.common.errors import CmdLineInputError ++from pcs.lib.errors import LibraryError ++from pcs.resource import resource_create, resource_remove ++ ++ ++def booth_cmd(lib, argv, modifiers): ++ """ ++ routes booth command ++ """ ++ if len(argv) < 1: ++ usage.booth() ++ sys.exit(1) ++ ++ sub_cmd, argv_next = argv[0], argv[1:] ++ try: ++ if sub_cmd == "help": ++ usage.booth(argv) ++ elif sub_cmd == "config": ++ command.config_show(lib, argv_next, modifiers) ++ elif sub_cmd == "setup": ++ command.config_setup(lib, argv_next, modifiers) ++ elif sub_cmd == "destroy": ++ command.config_destroy(lib, argv_next, modifiers) ++ elif sub_cmd == "ticket": ++ if len(argv_next) < 1: ++ raise CmdLineInputError() ++ if argv_next[0] == "add": ++ command.config_ticket_add(lib, argv_next[1:], modifiers) ++ elif argv_next[0] == "remove": ++ command.config_ticket_remove(lib, argv_next[1:], modifiers) ++ elif argv_next[0] == "grant": ++ command.ticket_grant(lib, argv_next[1:], modifiers) ++ elif argv_next[0] == "revoke": ++ command.ticket_revoke(lib, argv_next[1:], modifiers) ++ else: ++ raise CmdLineInputError() ++ elif sub_cmd == "create": ++ command.get_create_in_cluster(resource_create)( ++ lib, argv_next, modifiers ++ ) ++ elif sub_cmd == "remove": ++ command.get_remove_from_cluster(resource_remove)( ++ lib, argv_next, modifiers ++ ) ++ elif sub_cmd == "sync": ++ command.sync(lib, argv_next, modifiers) ++ elif sub_cmd == "pull": ++ command.pull(lib, argv_next, modifiers) ++ elif sub_cmd == "enable": ++ command.enable(lib, argv_next, modifiers) ++ elif sub_cmd == "disable": ++ command.disable(lib, argv_next, modifiers) ++ elif sub_cmd == "start": ++ command.start(lib, argv_next, modifiers) ++ elif sub_cmd == "stop": ++ command.stop(lib, argv_next, modifiers) ++ elif sub_cmd == "status": ++ command.status(lib, argv_next, modifiers) ++ else: ++ raise CmdLineInputError() ++ except LibraryError as e: ++ utils.process_library_reports(e.args) ++ except CmdLineInputError as e: ++ utils.exit_on_cmdline_input_errror(e, "booth", sub_cmd) +diff --git a/pcs/cli/booth/__init__.py b/pcs/cli/booth/__init__.py +new file mode 100644 +index 0000000..e69de29 +diff --git a/pcs/cli/booth/command.py b/pcs/cli/booth/command.py +new file mode 100644 +index 0000000..bea6582 +--- /dev/null ++++ b/pcs/cli/booth/command.py +@@ -0,0 +1,177 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from pcs.cli.common.errors import CmdLineInputError ++from pcs.cli.common.parse_args import group_by_keywords ++ ++ ++DEFAULT_BOOTH_NAME = "booth" ++ ++def __get_name(modifiers): ++ return modifiers["name"] if modifiers["name"] else DEFAULT_BOOTH_NAME ++ ++def config_setup(lib, arg_list, modifiers): ++ """ ++ create booth config ++ """ ++ booth_configuration = group_by_keywords( ++ arg_list, ++ set(["sites", "arbitrators"]), ++ keyword_repeat_allowed=False ++ ) ++ if "sites" not in booth_configuration or not booth_configuration["sites"]: ++ raise CmdLineInputError() ++ ++ lib.booth.config_setup(booth_configuration, modifiers["force"]) ++ ++def config_destroy(lib, arg_list, modifiers): ++ """ ++ destroy booth config ++ """ ++ if arg_list: ++ raise CmdLineInputError() ++ lib.booth.config_destroy(ignore_config_load_problems=modifiers["force"]) ++ ++ ++def config_show(lib, arg_list, modifiers): ++ """ ++ print booth config ++ """ ++ booth_configuration = lib.booth.config_show() ++ authfile_lines = [] ++ if booth_configuration["authfile"]: ++ authfile_lines.append( ++ "authfile = {0}".format(booth_configuration["authfile"]) ++ ) ++ ++ line_list = ( ++ ["site = {0}".format(site) for site in booth_configuration["sites"]] ++ + ++ [ ++ "arbitrator = {0}".format(arbitrator) ++ for arbitrator in booth_configuration["arbitrators"] ++ ] ++ + authfile_lines + ++ [ ++ 'ticket = "{0}"'.format(ticket) ++ for ticket in booth_configuration["tickets"] ++ ] ++ ) ++ for line in line_list: ++ print(line) ++ ++def config_ticket_add(lib, arg_list, modifiers): ++ """ ++ add ticket to current configuration ++ """ ++ if len(arg_list) != 1: ++ raise CmdLineInputError ++ lib.booth.config_ticket_add(arg_list[0]) ++ ++def config_ticket_remove(lib, arg_list, modifiers): ++ """ ++ add ticket to current configuration ++ """ ++ if len(arg_list) != 1: ++ raise CmdLineInputError ++ lib.booth.config_ticket_remove(arg_list[0]) ++ ++def ticket_operation(lib_call, arg_list, modifiers): ++ site_ip = None ++ if len(arg_list) == 2: ++ site_ip = arg_list[1] ++ elif len(arg_list) != 1: ++ raise CmdLineInputError() ++ ++ ticket = arg_list[0] ++ lib_call(__get_name(modifiers), ticket, site_ip) ++ ++def ticket_revoke(lib, arg_list, modifiers): ++ ticket_operation(lib.booth.ticket_revoke, arg_list, modifiers) ++ ++def ticket_grant(lib, arg_list, modifiers): ++ ticket_operation(lib.booth.ticket_grant, arg_list, modifiers) ++ ++def get_create_in_cluster(resource_create): ++ #TODO resource_remove is provisional hack until resources are not moved to ++ #lib ++ def create_in_cluster(lib, arg_list, modifiers): ++ if len(arg_list) != 2 or arg_list[0] != "ip": ++ raise CmdLineInputError() ++ ip = arg_list[1] ++ ++ lib.booth.create_in_cluster( ++ __get_name(modifiers), ++ ip, ++ resource_create, ++ ) ++ return create_in_cluster ++ ++def get_remove_from_cluster(resource_remove): ++ #TODO resource_remove is provisional hack until resources are not moved to ++ #lib ++ def remove_from_cluster(lib, arg_list, modifiers): ++ if arg_list: ++ raise CmdLineInputError() ++ ++ lib.booth.remove_from_cluster(__get_name(modifiers), resource_remove) ++ ++ return remove_from_cluster ++ ++ ++def sync(lib, arg_list, modifiers): ++ if arg_list: ++ raise CmdLineInputError() ++ lib.booth.config_sync( ++ DEFAULT_BOOTH_NAME, ++ skip_offline_nodes=modifiers["skip_offline_nodes"] ++ ) ++ ++ ++def enable(lib, arg_list, modifiers): ++ if arg_list: ++ raise CmdLineInputError() ++ lib.booth.enable(DEFAULT_BOOTH_NAME) ++ ++ ++def disable(lib, arg_list, modifiers): ++ if arg_list: ++ raise CmdLineInputError() ++ lib.booth.disable(DEFAULT_BOOTH_NAME) ++ ++ ++def start(lib, arg_list, modifiers): ++ if arg_list: ++ raise CmdLineInputError() ++ lib.booth.start(DEFAULT_BOOTH_NAME) ++ ++ ++def stop(lib, arg_list, modifiers): ++ if arg_list: ++ raise CmdLineInputError() ++ lib.booth.stop(DEFAULT_BOOTH_NAME) ++ ++ ++def pull(lib, arg_list, modifiers): ++ if len(arg_list) != 1: ++ raise CmdLineInputError() ++ lib.booth.pull(arg_list[0], DEFAULT_BOOTH_NAME) ++ ++ ++def status(lib, arg_list, modifiers): ++ if arg_list: ++ raise CmdLineInputError() ++ booth_status = lib.booth.status(DEFAULT_BOOTH_NAME) ++ if booth_status.get("ticket"): ++ print("TICKETS:") ++ print(booth_status["ticket"]) ++ if booth_status.get("peers"): ++ print("PEERS:") ++ print(booth_status["peers"]) ++ if booth_status.get("status"): ++ print("DAEMON STATUS:") ++ print(booth_status["status"]) +diff --git a/pcs/cli/booth/env.py b/pcs/cli/booth/env.py +new file mode 100644 +index 0000000..918e487 +--- /dev/null ++++ b/pcs/cli/booth/env.py +@@ -0,0 +1,121 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import os.path ++ ++from pcs.cli.common import console_report ++from pcs.common import report_codes, env_file_role_codes as file_role_codes ++from pcs.lib.errors import LibraryEnvError ++ ++ ++def read_env_file(path): ++ try: ++ return { ++ "content": open(path).read() if os.path.isfile(path) else None ++ } ++ except EnvironmentError as e: ++ raise console_report.error( ++ "Unable to read {0}: {1}".format(path, e.strerror) ++ ) ++ ++def write_env_file(env_file, file_path): ++ try: ++ f = open(file_path, "wb" if env_file.get("is_binary", False) else "w") ++ f.write(env_file["content"]) ++ f.close() ++ except EnvironmentError as e: ++ raise console_report.error( ++ "Unable to write {0}: {1}".format(file_path, e.strerror) ++ ) ++ ++def process_no_existing_file_expectation(file_role, env_file, file_path): ++ if( ++ env_file["no_existing_file_expected"] ++ and ++ os.path.exists(file_path) ++ ): ++ msg = "{0} {1} already exists".format(file_role, file_path) ++ if not env_file["can_overwrite_existing_file"]: ++ raise console_report.error( ++ "{0}, use --force to override".format(msg) ++ ) ++ console_report.warn(msg) ++ ++def is_missing_file_report(report, file_role_code): ++ return ( ++ report.code == report_codes.FILE_DOES_NOT_EXIST ++ and ++ report.info["file_role"] == file_role_code ++ ) ++ ++def report_missing_file(file_role, file_path): ++ console_report.error( ++ "{0} '{1}' does not exist".format(file_role, file_path) ++ ) ++ ++def middleware_config(name, config_path, key_path): ++ if config_path and not key_path: ++ raise console_report.error( ++ "With --booth-conf must be specified --booth-key as well" ++ ) ++ ++ if key_path and not config_path: ++ raise console_report.error( ++ "With --booth-key must be specified --booth-conf as well" ++ ) ++ ++ is_mocked_environment = config_path and key_path ++ ++ def create_booth_env(): ++ if not is_mocked_environment: ++ return {"name": name} ++ return { ++ "name": name, ++ "config_file": read_env_file(config_path), ++ "key_file": read_env_file(key_path), ++ "key_path": key_path, ++ } ++ ++ def flush(modified_env): ++ if not is_mocked_environment: ++ return ++ if not modified_env: ++ #TODO now this would not happen ++ #for more information see comment in ++ #pcs.cli.common.lib_wrapper.lib_env_to_cli_env ++ raise console_report.error("Error during library communication") ++ ++ process_no_existing_file_expectation( ++ "booth config file", ++ modified_env["config_file"], ++ config_path ++ ) ++ process_no_existing_file_expectation( ++ "booth key file", ++ modified_env["key_file"], ++ key_path ++ ) ++ write_env_file(modified_env["key_file"], key_path) ++ write_env_file(modified_env["config_file"], config_path) ++ ++ def apply(next_in_line, env, *args, **kwargs): ++ env.booth = create_booth_env() ++ try: ++ result_of_next = next_in_line(env, *args, **kwargs) ++ except LibraryEnvError as e: ++ for report in e.args: ++ if is_missing_file_report(report, file_role_codes.BOOTH_CONFIG): ++ report_missing_file("Booth config file", config_path) ++ e.sign_processed(report) ++ if is_missing_file_report(report, file_role_codes.BOOTH_KEY): ++ report_missing_file("Booth key file", key_path) ++ e.sign_processed(report) ++ raise e ++ flush(env.booth["modified_env"]) ++ return result_of_next ++ ++ return apply +diff --git a/pcs/cli/booth/test/__init__.py b/pcs/cli/booth/test/__init__.py +new file mode 100644 +index 0000000..e69de29 +diff --git a/pcs/cli/booth/test/test_command.py b/pcs/cli/booth/test/test_command.py +new file mode 100644 +index 0000000..00216f2 +--- /dev/null ++++ b/pcs/cli/booth/test/test_command.py +@@ -0,0 +1,44 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from unittest import TestCase ++ ++from pcs.cli.booth import command ++from pcs.test.tools.pcs_mock import mock ++ ++ ++class ConfigSetupTest(TestCase): ++ def test_call_lib_with_correct_args(self): ++ lib = mock.MagicMock() ++ lib.booth = mock.MagicMock() ++ lib.booth.config_setup = mock.MagicMock() ++ ++ command.config_setup( ++ lib, ++ arg_list=[ ++ "sites", "1.1.1.1", "2.2.2.2", "4.4.4.4", ++ "arbitrators", "3.3.3.3" ++ ], ++ modifiers={ ++ "force": False, ++ } ++ ) ++ lib.booth.config_setup.assert_called_once_with( ++ { ++ "sites": ["1.1.1.1", "2.2.2.2", "4.4.4.4"], ++ "arbitrators": ["3.3.3.3"], ++ }, ++ False ++ ) ++ ++class ConfigTicketAddTest(TestCase): ++ def test_call_lib_with_ticket_name(self): ++ lib = mock.MagicMock() ++ lib.booth = mock.MagicMock() ++ lib.booth.config_ticket_add = mock.MagicMock() ++ command.config_ticket_add(lib, arg_list=["TICKET_A"], modifiers={}) ++ lib.booth.config_ticket_add.assert_called_once_with("TICKET_A") +diff --git a/pcs/cli/booth/test/test_env.py b/pcs/cli/booth/test/test_env.py +new file mode 100644 +index 0000000..1ead6f2 +--- /dev/null ++++ b/pcs/cli/booth/test/test_env.py +@@ -0,0 +1,118 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from unittest import TestCase ++ ++from pcs.cli.booth.env import middleware_config ++from pcs.common import report_codes, env_file_role_codes ++from pcs.lib.errors import LibraryEnvError, ReportItem ++from pcs.test.tools.pcs_mock import mock ++ ++ ++class BoothConfTest(TestCase): ++ @mock.patch("pcs.cli.booth.env.os.path.isfile") ++ def test_sucessfully_care_about_local_file(self, mock_is_file): ++ #setup, fixtures ++ def next_in_line(env): ++ env.booth["modified_env"] = { ++ "config_file": { ++ "content": "file content", ++ "no_existing_file_expected": False, ++ }, ++ "key_file": { ++ "content": "key file content", ++ "no_existing_file_expected": False, ++ } ++ } ++ return "call result" ++ mock_is_file.return_value = True ++ mock_env = mock.MagicMock() ++ ++ mock_open = mock.mock_open() ++ with mock.patch( ++ "pcs.cli.booth.env.open", ++ mock_open, ++ create=True ++ ): ++ #run tested code ++ booth_conf_middleware = middleware_config( ++ "booth-name", ++ "/local/file/path.conf", ++ "/local/file/path.key", ++ ) ++ ++ self.assertEqual( ++ "call result", ++ booth_conf_middleware(next_in_line, mock_env) ++ ) ++ ++ #assertions ++ self.assertEqual(mock_is_file.mock_calls,[ ++ mock.call("/local/file/path.conf"), ++ mock.call("/local/file/path.key"), ++ ]) ++ ++ self.assertEqual(mock_env.booth["name"], "booth-name") ++ self.assertEqual(mock_env.booth["config_file"], {"content": ""}) ++ self.assertEqual(mock_env.booth["key_file"], {"content": ""}) ++ ++ self.assertEqual(mock_open.mock_calls, [ ++ mock.call(u'/local/file/path.conf'), ++ mock.call().read(), ++ mock.call(u'/local/file/path.key'), ++ mock.call().read(), ++ mock.call(u'/local/file/path.key', u'w'), ++ mock.call().write(u'key file content'), ++ mock.call().close(), ++ mock.call(u'/local/file/path.conf', u'w'), ++ mock.call().write(u'file content'), ++ mock.call().close(), ++ ]) ++ ++ @mock.patch("pcs.cli.booth.env.console_report") ++ @mock.patch("pcs.cli.booth.env.os.path.isfile") ++ def test_catch_exactly_his_exception( ++ self, mock_is_file, mock_console_report ++ ): ++ next_in_line = mock.Mock(side_effect=LibraryEnvError( ++ ReportItem.error(report_codes.FILE_DOES_NOT_EXIST, "", info={ ++ "file_role": env_file_role_codes.BOOTH_CONFIG, ++ }), ++ ReportItem.error(report_codes.FILE_DOES_NOT_EXIST, "", info={ ++ "file_role": env_file_role_codes.BOOTH_KEY, ++ }), ++ ReportItem.error("OTHER ERROR", "", info={}), ++ )) ++ mock_is_file.return_value = False ++ mock_env = mock.MagicMock() ++ ++ #run tested code ++ booth_conf_middleware = middleware_config( ++ "booth-name", ++ "/local/file/path.conf", ++ "/local/file/path.key", ++ ) ++ raised_exception = [] ++ def run_middleware(): ++ try: ++ booth_conf_middleware(next_in_line, mock_env) ++ except Exception as e: ++ raised_exception.append(e) ++ raise e ++ ++ self.assertRaises(LibraryEnvError, run_middleware) ++ self.assertEqual(1, len(raised_exception[0].unprocessed)) ++ self.assertEqual("OTHER ERROR", raised_exception[0].unprocessed[0].code) ++ ++ self.assertEqual(mock_console_report.error.mock_calls, [ ++ mock.call( ++ "Booth config file '/local/file/path.conf' does not exist" ++ ), ++ mock.call( ++ "Booth key file '/local/file/path.key' does not exist" ++ ), ++ ]) +diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py +index 3d42798..e600168 100644 +--- a/pcs/cli/common/console_report.py ++++ b/pcs/cli/common/console_report.py +@@ -8,10 +8,15 @@ from __future__ import ( + import sys + + +-def error(message, exit=True): +- sys.stderr.write("Error: {0}\n".format(message)) +- if exit: +- sys.exit(1) ++def warn(message): ++ sys.stdout.write(format_message(message, "Warning: ")) ++ ++def format_message(message, prefix): ++ return "{0}{1}\n".format(prefix, message) ++ ++def error(message): ++ sys.stderr.write(format_message(message, "Error: ")) ++ return SystemExit(1) + + def indent(line_list, indent_step=2): + """ +diff --git a/pcs/cli/common/env.py b/pcs/cli/common/env.py +index 2ba4f70..b1d951d 100644 +--- a/pcs/cli/common/env.py ++++ b/pcs/cli/common/env.py +@@ -6,11 +6,13 @@ from __future__ import ( + ) + + class Env(object): ++ #pylint: disable=too-many-instance-attributes + def __init__(self): + self.cib_data = None + self.cib_upgraded = False + self.user = None + self.groups = None + self.corosync_conf_data = None ++ self.booth = None + self.auth_tokens_getter = None + self.debug = False +diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py +index c4b8342..c836575 100644 +--- a/pcs/cli/common/lib_wrapper.py ++++ b/pcs/cli/common/lib_wrapper.py +@@ -5,27 +5,30 @@ from __future__ import ( + unicode_literals, + ) + +-from collections import namedtuple +-from functools import partial + import logging ++import sys ++from collections import namedtuple + + from pcs.cli.common import middleware +- +-#from pcs.lib import commands does not work: "commands" is package +-from pcs.lib.commands.constraint import colocation as constraint_colocation +-from pcs.lib.commands.constraint import order as constraint_order +-from pcs.lib.commands.constraint import ticket as constraint_ticket ++from pcs.cli.common.reports import ( ++ LibraryReportProcessorToConsole, ++ process_library_reports ++) + from pcs.lib.commands import ( ++ booth, + quorum, + qdevice, + sbd, + alert, + ) +-from pcs.cli.common.reports import ( +- LibraryReportProcessorToConsole as LibraryReportProcessorToConsole, ++from pcs.lib.commands.constraint import ( ++ colocation as constraint_colocation, ++ order as constraint_order, ++ ticket as constraint_ticket + ) +- + from pcs.lib.env import LibraryEnvironment ++from pcs.lib.errors import LibraryEnvError ++ + + _CACHE = {} + +@@ -40,7 +43,8 @@ def cli_env_to_lib_env(cli_env): + cli_env.groups, + cli_env.cib_data, + cli_env.corosync_conf_data, +- cli_env.auth_tokens_getter, ++ booth=cli_env.booth, ++ auth_tokens_getter=cli_env.auth_tokens_getter, + ) + + def lib_env_to_cli_env(lib_env, cli_env): +@@ -49,6 +53,19 @@ def lib_env_to_cli_env(lib_env, cli_env): + cli_env.cib_upgraded = lib_env.cib_upgraded + if not lib_env.is_corosync_conf_live: + cli_env.corosync_conf_data = lib_env.get_corosync_conf_data() ++ ++ #TODO ++ #now we know: if is in cli_env booth is in lib_env as well ++ #when we communicate with the library over the network we will need extra ++ #sanitization here ++ #this applies generally, not only for booth ++ #corosync_conf and cib suffers with this problem as well but in this cases ++ #it is dangerously hidden: when inconsistency between cli and lib ++ #environment inconsitency occurs, original content is put to file (which is ++ #wrong) ++ if cli_env.booth: ++ cli_env.booth["modified_env"] = lib_env.booth.export() ++ + return cli_env + + def bind(cli_env, run_with_middleware, run_library_command): +@@ -62,7 +79,17 @@ def bind(cli_env, run_with_middleware, run_library_command): + lib_env_to_cli_env(lib_env, cli_env) + + return lib_call_result +- return partial(run_with_middleware, run, cli_env) ++ ++ def decorated_run(*args, **kwargs): ++ try: ++ return run_with_middleware(run, cli_env, *args, **kwargs) ++ except LibraryEnvError as e: ++ process_library_reports(e.unprocessed) ++ #TODO we use explicit exit here - process_library_reports stil has ++ #possibility to not exit - it will need deeper rethinking ++ sys.exit(1) ++ ++ return decorated_run + + def bind_all(env, run_with_middleware, dictionary): + return wrapper(dict( +@@ -172,6 +199,33 @@ def load_module(env, middleware_factory, name): + } + ) + ++ if name == "booth": ++ return bind_all( ++ env, ++ middleware.build( ++ middleware_factory.booth_conf, ++ middleware_factory.cib ++ ), ++ { ++ "config_setup": booth.config_setup, ++ "config_destroy": booth.config_destroy, ++ "config_show": booth.config_show, ++ "config_ticket_add": booth.config_ticket_add, ++ "config_ticket_remove": booth.config_ticket_remove, ++ "create_in_cluster": booth.create_in_cluster, ++ "remove_from_cluster": booth.remove_from_cluster, ++ "config_sync": booth.config_sync, ++ "enable": booth.enable_booth, ++ "disable": booth.disable_booth, ++ "start": booth.start_booth, ++ "stop": booth.stop_booth, ++ "pull": booth.pull_config, ++ "status": booth.get_status, ++ "ticket_grant": booth.ticket_grant, ++ "ticket_revoke": booth.ticket_revoke, ++ } ++ ) ++ + raise Exception("No library part '{0}'".format(name)) + + class Library(object): +diff --git a/pcs/cli/common/middleware.py b/pcs/cli/common/middleware.py +index e53e138..9254a12 100644 +--- a/pcs/cli/common/middleware.py ++++ b/pcs/cli/common/middleware.py +@@ -29,11 +29,12 @@ def cib(use_local_cib, load_cib_content, write_cib): + """ + def apply(next_in_line, env, *args, **kwargs): + if use_local_cib: +- env.cib_data = load_cib_content() ++ original_content = load_cib_content() ++ env.cib_data = original_content + + result_of_next = next_in_line(env, *args, **kwargs) + +- if use_local_cib: ++ if use_local_cib and env.cib_data != original_content: + write_cib(env.cib_data, env.cib_upgraded) + + return result_of_next +@@ -45,7 +46,7 @@ def corosync_conf_existing(local_file_path): + try: + env.corosync_conf_data = open(local_file_path).read() + except EnvironmentError as e: +- console_report.error("Unable to read {0}: {1}".format( ++ raise console_report.error("Unable to read {0}: {1}".format( + local_file_path, + e.strerror + )) +@@ -58,7 +59,7 @@ def corosync_conf_existing(local_file_path): + f.write(env.corosync_conf_data) + f.close() + except EnvironmentError as e: +- console_report.error("Unable to write {0}: {1}".format( ++ raise console_report.error("Unable to write {0}: {1}".format( + local_file_path, + e.strerror + )) +diff --git a/pcs/cli/common/parse_args.py b/pcs/cli/common/parse_args.py +index 3b01775..d17c5da 100644 +--- a/pcs/cli/common/parse_args.py ++++ b/pcs/cli/common/parse_args.py +@@ -25,3 +25,30 @@ def prepare_options(cmdline_args): + name, value = arg.split("=", 1) + options[name] = value + return options ++ ++def group_by_keywords( ++ arg_list, keyword_set, ++ implicit_first_keyword=None, keyword_repeat_allowed=True, ++): ++ groups = dict([(keyword, []) for keyword in keyword_set]) ++ if implicit_first_keyword: ++ groups[implicit_first_keyword] = [] ++ ++ if not arg_list: ++ return groups ++ ++ used_keywords = [] ++ if implicit_first_keyword: ++ used_keywords.append(implicit_first_keyword) ++ elif arg_list[0] not in keyword_set: ++ raise CmdLineInputError() ++ ++ for arg in arg_list: ++ if arg in list(groups.keys()): ++ if arg in used_keywords and not keyword_repeat_allowed: ++ raise CmdLineInputError() ++ used_keywords.append(arg) ++ else: ++ groups[used_keywords[-1]].append(arg) ++ ++ return groups +diff --git a/pcs/cli/common/test/test_lib_wrapper.py b/pcs/cli/common/test/test_lib_wrapper.py +index f34d2d0..c10bb62 100644 +--- a/pcs/cli/common/test/test_lib_wrapper.py ++++ b/pcs/cli/common/test/test_lib_wrapper.py +@@ -6,8 +6,10 @@ from __future__ import ( + ) + from unittest import TestCase + +-from pcs.cli.common.lib_wrapper import Library ++from pcs.cli.common.lib_wrapper import Library, bind + from pcs.test.tools.pcs_mock import mock ++from pcs.lib.errors import ReportItem ++from pcs.lib.errors import LibraryEnvError + + class LibraryWrapperTest(TestCase): + def test_raises_for_bad_path(self): +@@ -30,6 +32,28 @@ class LibraryWrapperTest(TestCase): + mock_middleware_factory = mock.MagicMock() + mock_middleware_factory.cib = dummy_middleware + mock_middleware_factory.corosync_conf_existing = dummy_middleware +- Library('env', mock_middleware_factory).constraint_order.set('first', second="third") ++ mock_env = mock.MagicMock() ++ Library(mock_env, mock_middleware_factory).constraint_order.set( ++ 'first', second="third" ++ ) + + mock_order_set.assert_called_once_with(lib_env, "first", second="third") ++ ++class BindTest(TestCase): ++ @mock.patch("pcs.cli.common.lib_wrapper.process_library_reports") ++ def test_report_unprocessed_library_env_errors(self, mock_process_report): ++ report1 = ReportItem.error("OTHER ERROR", "", info={}) ++ report2 = ReportItem.error("OTHER ERROR", "", info={}) ++ report3 = ReportItem.error("OTHER ERROR", "", info={}) ++ e = LibraryEnvError(report1, report2, report3) ++ e.sign_processed(report2) ++ mock_middleware = mock.Mock(side_effect=e) ++ ++ binded = bind( ++ cli_env=None, ++ run_with_middleware=mock_middleware, ++ run_library_command=None ++ ) ++ ++ self.assertRaises(SystemExit, lambda: binded(cli_env=None)) ++ mock_process_report.assert_called_once_with([report1, report3]) +diff --git a/pcs/cli/common/test/test_middleware.py b/pcs/cli/common/test/test_middleware.py +index 6179882..c030cd9 100644 +--- a/pcs/cli/common/test/test_middleware.py ++++ b/pcs/cli/common/test/test_middleware.py +@@ -6,7 +6,8 @@ from __future__ import ( + ) + + from unittest import TestCase +-import pcs.cli.common.middleware ++ ++from pcs.cli.common import middleware + + + class MiddlewareBuildTest(TestCase): +@@ -29,7 +30,7 @@ class MiddlewareBuildTest(TestCase): + next(lib, argv, modificators) + log.append('m2 done') + +- run_with_middleware = pcs.cli.common.middleware.build(m1, m2) ++ run_with_middleware = middleware.build(m1, m2) + run_with_middleware(command, "1", "2", "3") + self.assertEqual(log, [ + 'm1 start: 1, 2, 3', +@@ -38,3 +39,4 @@ class MiddlewareBuildTest(TestCase): + 'm2 done', + 'm1 done', + ]) ++ +diff --git a/pcs/cli/common/test/test_parse_args.py b/pcs/cli/common/test/test_parse_args.py +index 1d6c4b0..eb358a5 100644 +--- a/pcs/cli/common/test/test_parse_args.py ++++ b/pcs/cli/common/test/test_parse_args.py +@@ -6,7 +6,11 @@ from __future__ import ( + ) + + from unittest import TestCase +-from pcs.cli.common.parse_args import split_list, prepare_options ++from pcs.cli.common.parse_args import( ++ split_list, ++ prepare_options, ++ group_by_keywords, ++) + from pcs.cli.common.errors import CmdLineInputError + + +@@ -42,3 +46,81 @@ class SplitListTest(TestCase): + [[], ['a', 'b'], ['c', 'd'], []], + split_list(['|','a', 'b', '|', 'c', 'd', "|"], '|') + ) ++ ++class SplitByKeywords(TestCase): ++ def test_split_with_implicit_first_keyword(self): ++ self.assertEqual( ++ group_by_keywords( ++ [0, "first", 1, 2, "second", 3], ++ set(["first", "second"]), ++ implicit_first_keyword="zero" ++ ), ++ { ++ "zero": [0], ++ "first": [1, 2], ++ "second": [3], ++ } ++ ) ++ ++ def test_splict_without_implict_keyword(self): ++ self.assertEqual( ++ group_by_keywords( ++ ["first", 1, 2, "second", 3], ++ set(["first", "second"]), ++ ), ++ { ++ "first": [1, 2], ++ "second": [3], ++ } ++ ) ++ ++ def test_raises_when_args_do_not_start_with_keyword_nor_implicit(self): ++ self.assertRaises(CmdLineInputError, lambda: group_by_keywords( ++ [0, "first", 1, 2, "second", 3], ++ set(["first", "second"]), ++ )) ++ ++ def test_returns_dict_with_empty_lists_for_no_args(self): ++ self.assertEqual( ++ group_by_keywords( ++ [], ++ set(["first", "second"]) ++ ), ++ { ++ "first": [], ++ "second": [], ++ } ++ ) ++ ++ def test_returns_dict_with_empty_lists_for_no_args_implicit_case(self): ++ self.assertEqual( ++ group_by_keywords( ++ [], ++ set(["first", "second"]), ++ implicit_first_keyword="zero", ++ ), ++ { ++ "zero": [], ++ "first": [], ++ "second": [], ++ } ++ ) ++ ++ def test_allow_keywords_repeating(self): ++ self.assertEqual( ++ group_by_keywords( ++ ["first", 1, 2, "second", 3, "first", 4], ++ set(["first", "second"]), ++ ), ++ { ++ "first": [1, 2, 4], ++ "second": [3], ++ } ++ ) ++ ++ def test_can_disallow_keywords_repeating(self): ++ self.assertRaises(CmdLineInputError, lambda: group_by_keywords( ++ ["first", 1, 2, "second", 3, "first"], ++ set(["first", "second"]), ++ keyword_repeat_allowed=False, ++ )) +diff --git a/pcs/cluster.py b/pcs/cluster.py +index 3f41d96..90fec63 100644 +--- a/pcs/cluster.py ++++ b/pcs/cluster.py +@@ -42,6 +42,7 @@ from pcs.lib import ( + sbd as lib_sbd, + reports as lib_reports, + ) ++from pcs.lib.booth import sync as booth_sync + from pcs.lib.commands.quorum import _add_device_model_net + from pcs.lib.corosync import ( + config_parser as corosync_conf_utils, +@@ -1388,6 +1389,7 @@ def cluster_node(argv): + report_processor = lib_env.report_processor + node_communicator = lib_env.node_communicator() + node_addr = NodeAddresses(node0, node1) ++ modifiers = utils.get_modificators() + try: + if lib_sbd.is_sbd_enabled(utils.cmd_runner()): + if "--watchdog" not in utils.pcs_options: +@@ -1421,6 +1423,14 @@ def cluster_node(argv): + lib_sbd.disable_sbd_service_on_node( + report_processor, node_communicator, node_addr + ) ++ ++ booth_sync.send_all_config_to_node( ++ node_communicator, ++ report_processor, ++ node_addr, ++ rewrite_existing=modifiers["force"], ++ skip_wrong_config=modifiers["force"] ++ ) + except LibraryError as e: + process_library_reports(e.args) + except NodeCommunicationException as e: +diff --git a/pcs/common/env_file_role_codes.py b/pcs/common/env_file_role_codes.py +new file mode 100644 +index 0000000..1f47387 +--- /dev/null ++++ b/pcs/common/env_file_role_codes.py +@@ -0,0 +1,9 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++BOOTH_CONFIG = "BOOTH_CONFIG" ++BOOTH_KEY = "BOOTH_KEY" +diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py +index 53f2ccb..e71d418 100644 +--- a/pcs/common/report_codes.py ++++ b/pcs/common/report_codes.py +@@ -8,6 +8,9 @@ from __future__ import ( + # force cathegories + FORCE_ACTIVE_RRP = "ACTIVE_RRP" + FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE = "FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE" ++FORCE_BOOTH_REMOVE_FROM_CIB = "FORCE_BOOTH_REMOVE_FROM_CIB" ++FORCE_BOOTH_DESTROY = "FORCE_BOOTH_DESTROY" ++FORCE_FILE_OVERWRITE = "FORCE_FILE_OVERWRITE" + FORCE_CONSTRAINT_DUPLICATE = "CONSTRAINT_DUPLICATE" + FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE = "CONSTRAINT_MULTIINSTANCE_RESOURCE" + FORCE_LOAD_THRESHOLD = "LOAD_THRESHOLD" +@@ -17,10 +20,40 @@ FORCE_UNKNOWN_AGENT = "UNKNOWN_AGENT" + FORCE_UNSUPPORTED_AGENT = "UNSUPPORTED_AGENT" + FORCE_METADATA_ISSUE = "METADATA_ISSUE" + SKIP_OFFLINE_NODES = "SKIP_OFFLINE_NODES" ++SKIP_UNREADABLE_CONFIG = "SKIP_UNREADABLE_CONFIG" + + AGENT_GENERAL_ERROR = "AGENT_GENERAL_ERROR" + AGENT_NOT_FOUND = "AGENT_NOT_FOUND" + BAD_CLUSTER_STATE_FORMAT = 'BAD_CLUSTER_STATE_FORMAT' ++BOOTH_ADDRESS_DUPLICATION = "BOOTH_ADDRESS_DUPLICATION" ++BOOTH_ALREADY_IN_CIB = "BOOTH_ALREADY_IN_CIB" ++BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP = "BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP" ++BOOTH_CANNOT_IDENTIFY_KEYFILE = "BOOTH_CANNOT_IDENTIFY_KEYFILE" ++BOOTH_CONFIG_FILE_ALREADY_EXISTS = "BOOTH_CONFIG_FILE_ALREADY_EXISTS" ++BOOTH_CONFIG_IO_ERROR = "BOOTH_CONFIG_IO_ERROR" ++BOOTH_CONFIG_IS_USED = "BOOTH_CONFIG_IS_USED" ++BOOTH_CONFIG_READ_ERROR = "BOOTH_CONFIG_READ_ERROR" ++BOOTH_CONFIG_WRITE_ERROR = "BOOTH_CONFIG_WRITE_ERROR" ++BOOTH_CONFIG_UNEXPECTED_LINES = "BOOTH_CONFIG_UNEXPECTED_LINES" ++BOOTH_CONFIGS_SAVED_ON_NODE = "BOOTH_CONFIGS_SAVED_ON_NODE" ++BOOTH_CONFIGS_SAVING_ON_NODE = "BOOTH_CONFIGS_SAVING_ON_NODE" ++BOOTH_DAEMON_STATUS_ERROR = "BOOTH_DAEMON_STATUS_ERROR" ++BOOTH_DISTRIBUTING_CONFIG = "BOOTH_DISTRIBUTING_CONFIG" ++BOOTH_EVEN_PEERS_NUM = "BOOTH_EVEN_PEERS_NUM" ++BOOTH_FETCHING_CONFIG_FROM_NODE = "BOOTH_FETCHING_CONFIG_FROM_NODE" ++BOOTH_INVALID_CONFIG_NAME = "BOOTH_INVALID_CONFIG_NAME" ++BOOTH_INVALID_NAME = "BOOTH_INVALID_NAME" ++BOOTH_LACK_OF_SITES = "BOOTH_LACK_OF_SITES" ++BOOTH_MULTIPLE_TIMES_IN_CIB = "BOOTH_MULTIPLE_TIMES_IN_CIB" ++BOOTH_NOT_EXISTS_IN_CIB = "BOOTH_NOT_EXISTS_IN_CIB" ++BOOTH_PEERS_STATUS_ERROR = "BOOTH_PEERS_STATUS_ERROR" ++BOOTH_SKIPPING_CONFIG = "BOOTH_SKIPPING_CONFIG" ++BOOTH_TICKET_DOES_NOT_EXIST = "BOOTH_TICKET_DOES_NOT_EXIST" ++BOOTH_TICKET_DUPLICATE = "BOOTH_TICKET_DUPLICATE" ++BOOTH_TICKET_OPERATION_FAILED = "BOOTH_TICKET_OPERATION_FAILED" ++BOOTH_TICKET_NAME_INVALID = "BOOTH_TICKET_NAME_INVALID" ++BOOTH_TICKET_STATUS_ERROR = "BOOTH_TICKET_STATUS_ERROR" ++BOOTH_UNSUPORTED_FILE_LOCATION = "BOOTH_UNSUPORTED_FILE_LOCATION" + CIB_ALERT_NOT_FOUND = "CIB_ALERT_NOT_FOUND" + CIB_ALERT_RECIPIENT_ALREADY_EXISTS = "CIB_ALERT_RECIPIENT_ALREADY_EXISTS" + CIB_ALERT_RECIPIENT_VALUE_INVALID = "CIB_ALERT_RECIPIENT_VALUE_INVALID" +@@ -38,6 +71,7 @@ CMAN_UDPU_RESTART_REQUIRED = 'CMAN_UDPU_RESTART_REQUIRED' + CMAN_UNSUPPORTED_COMMAND = "CMAN_UNSUPPORTED_COMMAND" + COMMON_ERROR = 'COMMON_ERROR' + COMMON_INFO = 'COMMON_INFO' ++LIVE_ENVIRONMENT_REQUIRED = "LIVE_ENVIRONMENT_REQUIRED" + COROSYNC_CONFIG_ACCEPTED_BY_NODE = "COROSYNC_CONFIG_ACCEPTED_BY_NODE" + COROSYNC_CONFIG_DISTRIBUTION_STARTED = "COROSYNC_CONFIG_DISTRIBUTION_STARTED" + COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR = "COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR" +@@ -53,6 +87,9 @@ COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE" + CRM_MON_ERROR = "CRM_MON_ERROR" + DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST" + EMPTY_RESOURCE_SET_LIST = "EMPTY_RESOURCE_SET_LIST" ++FILE_ALREADY_EXISTS = "FILE_ALREADY_EXISTS" ++FILE_DOES_NOT_EXIST = "FILE_DOES_NOT_EXIST" ++FILE_IO_ERROR = "FILE_IO_ERROR" + ID_ALREADY_EXISTS = 'ID_ALREADY_EXISTS' + ID_NOT_FOUND = 'ID_NOT_FOUND' + IGNORED_CMAN_UNSUPPORTED_OPTION = 'IGNORED_CMAN_UNSUPPORTED_OPTION' +@@ -134,10 +171,13 @@ SERVICE_START_SUCCESS = "SERVICE_START_SUCCESS" + SERVICE_STOP_ERROR = "SERVICE_STOP_ERROR" + SERVICE_STOP_STARTED = "SERVICE_STOP_STARTED" + SERVICE_STOP_SUCCESS = "SERVICE_STOP_SUCCESS" ++UNABLE_TO_DETERMINE_USER_UID = "UNABLE_TO_DETERMINE_USER_UID" ++UNABLE_TO_DETERMINE_GROUP_GID = "UNABLE_TO_DETERMINE_GROUP_GID" + UNABLE_TO_GET_AGENT_METADATA = 'UNABLE_TO_GET_AGENT_METADATA' + UNABLE_TO_READ_COROSYNC_CONFIG = "UNABLE_TO_READ_COROSYNC_CONFIG" + UNABLE_TO_GET_SBD_CONFIG = "UNABLE_TO_GET_SBD_CONFIG" + UNABLE_TO_GET_SBD_STATUS = "UNABLE_TO_GET_SBD_STATUS" + UNKNOWN_COMMAND = 'UNKNOWN_COMMAND' + UNSUPPORTED_AGENT = 'UNSUPPORTED_AGENT' ++UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS = "UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS" + WATCHDOG_NOT_FOUND = "WATCHDOG_NOT_FOUND" +diff --git a/pcs/common/test/__init__.py b/pcs/common/test/__init__.py +new file mode 100644 +index 0000000..e69de29 +diff --git a/pcs/common/tools.py b/pcs/common/tools.py +index f4f6c4b..275f6b9 100644 +--- a/pcs/common/tools.py ++++ b/pcs/common/tools.py +@@ -33,3 +33,8 @@ def run_parallel(worker, data_list): + + for thread in thread_list: + thread.join() ++ ++def format_environment_error(e): ++ if e.filename: ++ return "{0}: '{1}'".format(e.strerror, e.filename) ++ return e.strerror +diff --git a/pcs/lib/booth/__init__.py b/pcs/lib/booth/__init__.py +new file mode 100644 +index 0000000..e69de29 +diff --git a/pcs/lib/booth/config_exchange.py b/pcs/lib/booth/config_exchange.py +new file mode 100644 +index 0000000..e0569ba +--- /dev/null ++++ b/pcs/lib/booth/config_exchange.py +@@ -0,0 +1,43 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++from pcs.lib.booth.config_structure import ConfigItem ++ ++EXCHANGE_PRIMITIVES = ["authfile"] ++EXCHANGE_LISTS = [ ++ ("site", "sites"), ++ ("arbitrator", "arbitrators"), ++ ("ticket", "tickets"), ++] ++ ++ ++def to_exchange_format(booth_configuration): ++ exchange_lists = dict(EXCHANGE_LISTS) ++ exchange = dict( ++ (exchange_key, []) for exchange_key in exchange_lists.values() ++ ) ++ ++ for key, value, _ in booth_configuration: ++ if key in exchange_lists: ++ exchange[exchange_lists[key]].append(value) ++ if key in EXCHANGE_PRIMITIVES: ++ exchange[key] = value ++ ++ return exchange ++ ++ ++def from_exchange_format(exchange_format): ++ booth_config = [] ++ for key in EXCHANGE_PRIMITIVES: ++ if key in exchange_format: ++ booth_config.append(ConfigItem(key, exchange_format[key])) ++ ++ for key, exchange_key in EXCHANGE_LISTS: ++ booth_config.extend([ ++ ConfigItem(key, value) ++ for value in exchange_format.get(exchange_key, []) ++ ]) ++ return booth_config +diff --git a/pcs/lib/booth/config_files.py b/pcs/lib/booth/config_files.py +new file mode 100644 +index 0000000..aaad951 +--- /dev/null ++++ b/pcs/lib/booth/config_files.py +@@ -0,0 +1,97 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import os ++import binascii ++ ++from pcs.common import report_codes, env_file_role_codes as file_roles ++from pcs.common.tools import format_environment_error ++from pcs.lib import reports as lib_reports ++from pcs.lib.booth import reports ++from pcs.lib.errors import ReportItemSeverity ++from pcs.settings import booth_config_dir as BOOTH_CONFIG_DIR ++ ++ ++def generate_key(): ++ return binascii.hexlify(os.urandom(32)) ++ ++def get_all_configs_file_names(): ++ """ ++ Returns list of all file names ending with '.conf' in booth configuration ++ directory. ++ """ ++ return [ ++ file_name for file_name in os.listdir(BOOTH_CONFIG_DIR) ++ if os.path.isfile(file_name) and file_name.endswith(".conf") and ++ len(file_name) > len(".conf") ++ ] ++ ++ ++def _read_config(file_name): ++ """ ++ Read specified booth config from default booth config directory. ++ ++ file_name -- string, name of file ++ """ ++ with open(os.path.join(BOOTH_CONFIG_DIR, file_name), "r") as file: ++ return file.read() ++ ++ ++def read_configs(reporter, skip_wrong_config=False): ++ """ ++ Returns content of all configs present on local system in dictionary, ++ where key is name of config and value is its content. ++ ++ reporter -- report processor ++ skip_wrong_config -- if True skip local configs that are unreadable ++ """ ++ report_list = [] ++ output = {} ++ for file_name in get_all_configs_file_names(): ++ try: ++ output[file_name] = _read_config(file_name) ++ except EnvironmentError: ++ report_list.append(reports.booth_config_unable_to_read( ++ file_name, ++ ( ++ ReportItemSeverity.WARNING if skip_wrong_config ++ else ReportItemSeverity.ERROR ++ ), ++ ( ++ None if skip_wrong_config ++ else report_codes.SKIP_UNREADABLE_CONFIG ++ ) ++ )) ++ reporter.process_list(report_list) ++ return output ++ ++ ++def read_authfile(reporter, path): ++ """ ++ Returns content of specified authfile as bytes. None if file is not in ++ default booth directory or there was some IO error. ++ ++ reporter -- report processor ++ path -- path to the authfile to be read ++ """ ++ if not path: ++ return None ++ if os.path.dirname(os.path.abspath(path)) != BOOTH_CONFIG_DIR: ++ reporter.process(reports.booth_unsupported_file_location(path)) ++ return None ++ try: ++ with open(path, "rb") as file: ++ return file.read() ++ except EnvironmentError as e: ++ reporter.process(lib_reports.file_io_error( ++ file_roles.BOOTH_KEY, ++ path, ++ reason=format_environment_error(e), ++ operation="read", ++ severity=ReportItemSeverity.WARNING ++ )) ++ return None +diff --git a/pcs/lib/booth/config_parser.py b/pcs/lib/booth/config_parser.py +new file mode 100644 +index 0000000..62d2203 +--- /dev/null ++++ b/pcs/lib/booth/config_parser.py +@@ -0,0 +1,90 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import re ++ ++from pcs.lib.booth import config_structure, reports ++from pcs.lib.errors import LibraryError ++ ++ ++class InvalidLines(Exception): ++ pass ++ ++def parse(content): ++ try: ++ return organize_lines(parse_to_raw_lines(content)) ++ except InvalidLines as e: ++ raise LibraryError( ++ reports.booth_config_unexpected_lines(e.args[0]) ++ ) ++ ++def build(config_line_list): ++ return "\n".join(build_to_lines(config_line_list)) ++ ++def build_to_lines(config_line_list, deep=0): ++ line_list = [] ++ for key, value, details in config_line_list: ++ line_value = value if key != "ticket" else '"{0}"'.format(value) ++ line_list.append("{0}{1} = {2}".format(" "*deep, key, line_value)) ++ if details: ++ line_list.extend(build_to_lines(details, deep+1)) ++ return line_list ++ ++ ++def organize_lines(raw_line_list): ++ #Decision: Global key is moved up when is below ticket. Alternative is move ++ #it below all ticket details. But it is confusing. ++ global_section = [] ++ ticket_section = [] ++ current_ticket = None ++ for key, value in raw_line_list: ++ if key == "ticket": ++ current_ticket = config_structure.ConfigItem(key, value) ++ ticket_section.append(current_ticket) ++ elif key in config_structure.GLOBAL_KEYS or not current_ticket: ++ global_section.append(config_structure.ConfigItem(key, value)) ++ else: ++ current_ticket.details.append( ++ config_structure.ConfigItem(key, value) ++ ) ++ ++ return global_section + ticket_section ++ ++def search_with_multiple_re(re_object_list, string): ++ """ ++ return MatchObject of first matching regular expression object or None ++ list re_object_list contains regular expresssion objects (products of ++ re.compile) ++ """ ++ for expression in re_object_list: ++ match = expression.search(string) ++ if match: ++ return match ++ return None ++ ++def parse_to_raw_lines(config_content): ++ keyword_part = r"^(?P<key>[a-zA-Z0-9_-]+)\s*=\s*" ++ expression_list = [re.compile(pattern.format(keyword_part)) for pattern in [ ++ r"""{0}(?P<value>[^'"]+)$""", ++ r"""{0}'(?P<value>[^']*)'\s*(#.*)?$""", ++ r"""{0}"(?P<value>[^"]*)"\s*(#.*)?$""", ++ ]] ++ ++ line_list = [] ++ invalid_line_list = [] ++ for line in config_content.splitlines(): ++ line = line.strip() ++ match = search_with_multiple_re(expression_list, line) ++ if match: ++ line_list.append((match.group("key"), match.group("value"))) ++ elif line and not line.startswith("#"): ++ invalid_line_list.append(line) ++ ++ if invalid_line_list: ++ raise InvalidLines(invalid_line_list) ++ ++ return line_list +diff --git a/pcs/lib/booth/config_structure.py b/pcs/lib/booth/config_structure.py +new file mode 100644 +index 0000000..c92f718 +--- /dev/null ++++ b/pcs/lib/booth/config_structure.py +@@ -0,0 +1,111 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import re ++ ++from pcs.lib.booth import reports ++from pcs.lib.errors import LibraryError ++from collections import namedtuple ++ ++GLOBAL_KEYS = ( ++ "transport", ++ "port", ++ "name", ++ "authfile", ++ "maxtimeskew", ++ "site", ++ "arbitrator", ++ "site-user", ++ "site-group", ++ "arbitrator-user", ++ "arbitrator-group", ++ "debug", ++ "ticket", ++) ++TICKET_KEYS = ( ++ "acquire-after", ++ "attr-prereq", ++ "before-acquire-handler", ++ "expire", ++ "renewal-freq", ++ "retries", ++ "timeout", ++ "weights", ++) ++ ++class ConfigItem(namedtuple("ConfigItem", "key value details")): ++ def __new__(cls, key, value, details=None): ++ details = details if details else [] ++ return super(ConfigItem, cls).__new__(cls, key, value, details) ++ ++def validate_peers(site_list, arbitrator_list): ++ report = [] ++ ++ if len(site_list) < 2: ++ report.append(reports.booth_lack_of_sites(site_list)) ++ ++ peer_list = site_list + arbitrator_list ++ ++ if len(peer_list) % 2 == 0: ++ report.append(reports.booth_even_peers_num(len(peer_list))) ++ ++ address_set = set() ++ duplicate_addresses = set() ++ for address in peer_list: ++ if address in address_set: ++ duplicate_addresses.add(address) ++ else: ++ address_set.add(address) ++ if duplicate_addresses: ++ report.append(reports.booth_address_duplication(duplicate_addresses)) ++ ++ if report: ++ raise LibraryError(*report) ++ ++def remove_ticket(booth_configuration, ticket_name): ++ validate_ticket_exists(booth_configuration, ticket_name) ++ return [ ++ config_item for config_item in booth_configuration ++ if config_item.key != "ticket" or config_item.value != ticket_name ++ ] ++ ++def add_ticket(booth_configuration, ticket_name): ++ validate_ticket_name(ticket_name) ++ validate_ticket_unique(booth_configuration, ticket_name) ++ return booth_configuration + [ ++ ConfigItem("ticket", ticket_name) ++ ] ++ ++def validate_ticket_exists(booth_configuration, ticket_name): ++ if not ticket_exists(booth_configuration, ticket_name): ++ raise LibraryError(reports.booth_ticket_does_not_exist(ticket_name)) ++ ++def validate_ticket_unique(booth_configuration, ticket_name): ++ if ticket_exists(booth_configuration, ticket_name): ++ raise LibraryError(reports.booth_ticket_duplicate(ticket_name)) ++ ++def ticket_exists(booth_configuration, ticket_name): ++ return any( ++ value for key, value, _ in booth_configuration ++ if key == "ticket" and value == ticket_name ++ ) ++ ++def validate_ticket_name(ticket_name): ++ if not re.compile(r"^[\w-]+$").search(ticket_name): ++ raise LibraryError(reports.booth_ticket_name_invalid(ticket_name)) ++ ++def set_authfile(booth_configuration, auth_file): ++ return [ConfigItem("authfile", auth_file)] + [ ++ config_item for config_item in booth_configuration ++ if config_item.key != "authfile" ++ ] ++ ++def get_authfile(booth_configuration): ++ for key, value, _ in reversed(booth_configuration): ++ if key == "authfile": ++ return value ++ return None +diff --git a/pcs/lib/booth/env.py b/pcs/lib/booth/env.py +new file mode 100644 +index 0000000..57d47aa +--- /dev/null ++++ b/pcs/lib/booth/env.py +@@ -0,0 +1,149 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import os ++import pwd ++import grp ++ ++from pcs import settings ++from pcs.common import env_file_role_codes ++from pcs.common.tools import format_environment_error ++from pcs.lib import reports as common_reports ++from pcs.lib.booth import reports ++from pcs.lib.env_file import GhostFile, RealFile ++from pcs.lib.errors import LibraryError ++from pcs.settings import booth_config_dir as BOOTH_CONFIG_DIR ++ ++ ++def get_booth_env_file_name(name, extension): ++ report_list = [] ++ if "/" in name: ++ report_list.append( ++ reports.booth_invalid_name(name, "contains illegal character '/'") ++ ) ++ if report_list: ++ raise LibraryError(*report_list) ++ return "{0}.{1}".format(os.path.join(BOOTH_CONFIG_DIR, name), extension) ++ ++def get_config_file_name(name): ++ return get_booth_env_file_name(name, "conf") ++ ++def get_key_path(name): ++ return get_booth_env_file_name(name, "key") ++ ++def report_keyfile_io_error(file_path, operation, e): ++ return LibraryError(common_reports.file_io_error( ++ file_role=env_file_role_codes.BOOTH_KEY, ++ file_path=file_path, ++ operation=operation, ++ reason=format_environment_error(e) ++ )) ++ ++def set_keyfile_access(file_path): ++ #shutil.chown is not in python2 ++ try: ++ uid = pwd.getpwnam(settings.pacemaker_uname).pw_uid ++ except KeyError: ++ raise LibraryError(common_reports.unable_to_determine_user_uid( ++ settings.pacemaker_uname ++ )) ++ try: ++ gid = grp.getgrnam(settings.pacemaker_gname).gr_gid ++ except KeyError: ++ raise LibraryError(common_reports.unable_to_determine_group_gid( ++ settings.pacemaker_gname ++ )) ++ try: ++ os.chown(file_path, uid, gid) ++ except EnvironmentError as e: ++ raise report_keyfile_io_error(file_path, "chown", e) ++ try: ++ os.chmod(file_path, 0o600) ++ except EnvironmentError as e: ++ raise report_keyfile_io_error(file_path, "chmod", e) ++ ++class BoothEnv(object): ++ def __init__(self, report_processor, env_data): ++ self.__report_processor = report_processor ++ self.__name = env_data["name"] ++ if "config_file" in env_data: ++ self.__config = GhostFile( ++ file_role=env_file_role_codes.BOOTH_CONFIG, ++ content=env_data["config_file"]["content"] ++ ) ++ self.__key_path = env_data["key_path"] ++ self.__key = GhostFile( ++ file_role=env_file_role_codes.BOOTH_KEY, ++ content=env_data["key_file"]["content"] ++ ) ++ else: ++ self.__config = RealFile( ++ file_role=env_file_role_codes.BOOTH_CONFIG, ++ file_path=get_config_file_name(env_data["name"]), ++ ) ++ self.__set_key_path(get_key_path(env_data["name"])) ++ ++ def __set_key_path(self, path): ++ self.__key_path = path ++ self.__key = RealFile( ++ file_role=env_file_role_codes.BOOTH_KEY, ++ file_path=path, ++ ) ++ ++ def command_expect_live_env(self): ++ if not self.__config.is_live: ++ raise LibraryError(common_reports.live_environment_required([ ++ "--booth-conf", ++ "--booth-key", ++ ])) ++ ++ def set_key_path(self, path): ++ if not self.__config.is_live: ++ raise AssertionError( ++ "Set path of keyfile is supported only in live environment" ++ ) ++ self.__set_key_path(path) ++ ++ @property ++ def name(self): ++ return self.__name ++ ++ @property ++ def key_path(self): ++ return self.__key_path ++ ++ def get_config_content(self): ++ return self.__config.read() ++ ++ def create_config(self, content, can_overwrite_existing=False): ++ self.__config.assert_no_conflict_with_existing( ++ self.__report_processor, ++ can_overwrite_existing ++ ) ++ self.__config.write(content) ++ ++ def create_key(self, key_content, can_overwrite_existing=False): ++ self.__key.assert_no_conflict_with_existing( ++ self.__report_processor, ++ can_overwrite_existing ++ ) ++ self.__key.write(key_content, set_keyfile_access, is_binary=True) ++ ++ def push_config(self, content): ++ self.__config.write(content) ++ ++ def remove_key(self): ++ self.__key.remove(silence_no_existence=True) ++ ++ def remove_config(self): ++ self.__config.remove() ++ ++ def export(self): ++ return {} if self.__config.is_live else { ++ "config_file": self.__config.export(), ++ "key_file": self.__key.export(), ++ } +diff --git a/pcs/lib/booth/reports.py b/pcs/lib/booth/reports.py +new file mode 100644 +index 0000000..8a804e0 +--- /dev/null ++++ b/pcs/lib/booth/reports.py +@@ -0,0 +1,409 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from pcs.common import report_codes ++from pcs.lib.errors import ReportItem, ReportItemSeverity ++ ++ ++def booth_lack_of_sites(site_list): ++ """ ++ Less than 2 booth sites entered. But it does not make sense. ++ list site_list contains currently entered sites ++ """ ++ return ReportItem.error( ++ report_codes.BOOTH_LACK_OF_SITES, ++ "lack of sites for booth configuration (need 2 at least):" ++ " sites {sites_string}" ++ , ++ info={ ++ "sites": site_list, ++ "sites_string": ", ".join(site_list) if site_list else "missing", ++ } ++ ) ++ ++def booth_even_peers_num(number): ++ """ ++ Booth requires odd number of peers. But even number of peers was entered. ++ integer number determines how many peers was entered ++ """ ++ return ReportItem.error( ++ report_codes.BOOTH_EVEN_PEERS_NUM, ++ "odd number of peers is required (entered {number} peers)", ++ info={ ++ "number": number, ++ } ++ ) ++ ++def booth_address_duplication(duplicate_addresses): ++ """ ++ Address of each peer must unique. But address duplication appeared. ++ set duplicate_addresses contains addreses entered multiple times ++ """ ++ return ReportItem.error( ++ report_codes.BOOTH_ADDRESS_DUPLICATION, ++ "duplicate address for booth configuration: {addresses_string}" ++ , ++ info={ ++ "addresses": duplicate_addresses, ++ "addresses_string": ", ".join(duplicate_addresses), ++ } ++ ) ++ ++def booth_config_unexpected_lines(line_list): ++ """ ++ Booth config have defined structure. But line out of structure definition ++ appeared. ++ list line_list contains lines out of defined structure ++ """ ++ return ReportItem.error( ++ report_codes.BOOTH_CONFIG_UNEXPECTED_LINES, ++ "unexpected line appeard in config: \n{lines_string}", ++ info={ ++ "line_list": line_list, ++ "lines_string": "\n".join(line_list) ++ } ++ ) ++ ++def booth_invalid_name(name, reason): ++ """ ++ Booth instance name have rules. For example it cannot contain illegal ++ characters like '/'. But some of rules was violated. ++ string name is entered booth instance name ++ """ ++ return ReportItem.error( ++ report_codes.BOOTH_INVALID_NAME, ++ "booth name '{name}' is not valid ({reason})" ++ , ++ info={ ++ "name": name, ++ "reason": reason, ++ } ++ ) ++ ++def booth_ticket_name_invalid(ticket_name): ++ """ ++ Name of booth ticket may consists of alphanumeric characters or dash. ++ Entered ticket name violating this rule. ++ string ticket_name is entered booth ticket name ++ """ ++ return ReportItem.error( ++ report_codes.BOOTH_TICKET_NAME_INVALID, ++ "booth ticket name '{ticket_name}' is not valid," ++ " use alphanumeric chars or dash" ++ , ++ info={ ++ "ticket_name": ticket_name, ++ } ++ ) ++ ++def booth_ticket_duplicate(ticket_name): ++ """ ++ Each booth ticket name must be uniqe. But duplicate booth ticket name ++ was entered. ++ string ticket_name is entered booth ticket name ++ """ ++ return ReportItem.error( ++ report_codes.BOOTH_TICKET_DUPLICATE, ++ "booth ticket name '{ticket_name}' already exists in configuration", ++ info={ ++ "ticket_name": ticket_name, ++ } ++ ) ++ ++def booth_ticket_does_not_exist(ticket_name): ++ """ ++ Some operations (like ticket remove) expect the ticket name in booth ++ configuration. But the ticket name not found in booth configuration. ++ string ticket_name is entered booth ticket name ++ """ ++ return ReportItem.error( ++ report_codes.BOOTH_TICKET_DOES_NOT_EXIST, ++ "booth ticket name '{ticket_name}' does not exist", ++ info={ ++ "ticket_name": ticket_name, ++ } ++ ) ++ ++def booth_already_in_cib(name): ++ """ ++ Each booth instance should be in a cib once maximally. Existence of booth ++ instance in cib detected during creating new one. ++ string name is booth instance name ++ """ ++ return ReportItem.error( ++ report_codes.BOOTH_ALREADY_IN_CIB, ++ "booth instance '{name}' is already created as cluster resource", ++ info={ ++ "name": name, ++ } ++ ) ++ ++def booth_not_exists_in_cib(name): ++ """ ++ Remove booth instance from cib required. But no such instance found in cib. ++ string name is booth instance name ++ """ ++ return ReportItem.error( ++ report_codes.BOOTH_NOT_EXISTS_IN_CIB, ++ "booth instance '{name}' not found in cib", ++ info={ ++ "name": name, ++ } ++ ) ++ ++def booth_config_is_used(name, detail=""): ++ """ ++ Booth config use detected during destroy request. ++ string name is booth instance name ++ string detail provide more details (for example booth instance is used as ++ cluster resource or is started/enabled under systemd) ++ """ ++ return ReportItem.error( ++ report_codes.BOOTH_CONFIG_IS_USED, ++ "booth instance '{name}' is used{detail_string}", ++ info={ ++ "name": name, ++ "detail": detail, ++ "detail_string": " {0}".format(detail) if detail else "", ++ } ++ ) ++ ++def booth_multiple_times_in_cib( ++ name, severity=ReportItemSeverity.ERROR ++): ++ """ ++ Each booth instance should be in a cib once maximally. But multiple ++ occurences detected. For example during remove booth instance from cib. ++ Notify user about this fact is required. When operation is forced ++ user should be notified about multiple occurences. ++ string name is booth instance name ++ ReportItemSeverity severit should be ERROR or WARNING (depends on context) ++ is flag for next report processing ++ Because of severity coupling with ReportItem is it specified here. ++ """ ++ return ReportItem( ++ report_codes.BOOTH_MULTIPLE_TIMES_IN_CIB, ++ severity, ++ "found more than one booth instance '{name}' in cib", ++ info={ ++ "name": name, ++ }, ++ forceable=report_codes.FORCE_BOOTH_REMOVE_FROM_CIB ++ if severity == ReportItemSeverity.ERROR else None ++ ) ++ ++ ++def booth_distributing_config(name=None): ++ """ ++ Sending booth config to all nodes in cluster. ++ ++ name -- name of booth instance ++ """ ++ return ReportItem.info( ++ report_codes.BOOTH_DISTRIBUTING_CONFIG, ++ "Sending booth config{0} to all cluster nodes.".format( ++ " ({name})" if name and name != "booth" else "" ++ ), ++ info={"name": name} ++ ) ++ ++ ++def booth_config_saved(node=None, name_list=None): ++ """ ++ Booth config has been saved on specified node. ++ ++ node -- name of node ++ name_list -- list of names of booth instance ++ """ ++ if name_list: ++ name = ", ".join(name_list) ++ if name == "booth": ++ msg = "Booth config saved." ++ else: ++ msg = "Booth config(s) ({name}) saved." ++ else: ++ msg = "Booth config saved." ++ name = None ++ return ReportItem.info( ++ report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ msg if node is None else "{node}: " + msg, ++ info={ ++ "node": node, ++ "name": name, ++ "name_list": name_list ++ } ++ ) ++ ++ ++def booth_config_unable_to_read( ++ name, severity=ReportItemSeverity.ERROR, forceable=None ++): ++ """ ++ Unable to read from specified booth instance config. ++ ++ name -- name of booth instance ++ severity -- severity of report item ++ forceable -- is this report item forceable? by what category? ++ """ ++ if name and name != "booth": ++ msg = "Unable to read booth config ({name})." ++ else: ++ msg = "Unable to read booth config." ++ return ReportItem( ++ report_codes.BOOTH_CONFIG_READ_ERROR, ++ severity, ++ msg, ++ info={"name": name}, ++ forceable=forceable ++ ) ++ ++ ++def booth_config_not_saved(node, reason, name=None): ++ """ ++ Saving booth config failed on specified node. ++ ++ node -- node name ++ reason -- reason of failure ++ name -- name of booth instance ++ """ ++ if name and name != "booth": ++ msg = "Unable to save booth config ({name}) on node '{node}': {reason}" ++ else: ++ msg = "Unable to save booth config on node '{node}': {reason}" ++ return ReportItem.error( ++ report_codes.BOOTH_CONFIG_WRITE_ERROR, ++ msg, ++ info={ ++ "node": node, ++ "name": name, ++ "reason": reason ++ } ++ ) ++ ++ ++def booth_sending_local_configs_to_node(node): ++ """ ++ Sending all local booth configs to node ++ ++ node -- node name ++ """ ++ return ReportItem.info( ++ report_codes.BOOTH_CONFIGS_SAVING_ON_NODE, ++ "{node}: Saving booth config(s)...", ++ info={"node": node} ++ ) ++ ++ ++def booth_fetching_config_from_node(node, config=None): ++ if config or config == 'booth': ++ msg = "Fetching booth config from node '{node}'..." ++ else: ++ msg = "Fetching booth config '{config}' from node '{node}'..." ++ return ReportItem.info( ++ report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE, ++ msg, ++ info={ ++ "node": node, ++ "config": config, ++ } ++ ) ++ ++ ++def booth_unsupported_file_location(file): ++ return ReportItem.warning( ++ report_codes.BOOTH_UNSUPORTED_FILE_LOCATION, ++ "skipping file {file}: unsupported file location", ++ info={"file": file} ++ ) ++ ++ ++def booth_daemon_status_error(reason): ++ return ReportItem.error( ++ report_codes.BOOTH_DAEMON_STATUS_ERROR, ++ "unable to get status of booth daemon: {reason}", ++ info={"reason": reason} ++ ) ++ ++ ++def booth_tickets_status_error(reason=None): ++ return ReportItem.error( ++ report_codes.BOOTH_TICKET_STATUS_ERROR, ++ "unable to get status of booth tickets", ++ info={ ++ "reason": reason, ++ } ++ ) ++ ++ ++def booth_peers_status_error(reason=None): ++ return ReportItem.error( ++ report_codes.BOOTH_PEERS_STATUS_ERROR, ++ "unable to get status of booth peers", ++ info={ ++ "reason": reason, ++ } ++ ) ++ ++def booth_cannot_determine_local_site_ip(): ++ """ ++ Some booth operations are performed on specific site and requires to specify ++ site ip. When site specification omitted pcs can try determine local ip. ++ But determine local site ip failed. ++ """ ++ return ReportItem.error( ++ report_codes.BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP, ++ "cannot determine local site ip, please specify site parameter", ++ info={} ++ ) ++ ++def booth_ticket_operation_failed(operation, reason, site_ip, ticket_name): ++ """ ++ Pcs uses external booth tools for some ticket_name operations. For example ++ grand and revoke. But the external command failed. ++ string operatin determine what was intended perform with ticket_name ++ string reason is taken from external booth command ++ string site_ip specifiy what site had to run the command ++ string ticket_name specify with which ticket had to run the command ++ """ ++ return ReportItem.error( ++ report_codes.BOOTH_TICKET_OPERATION_FAILED, ++ "unable to {operation} booth ticket '{ticket_name}' for site '{site_ip}', " ++ "reason: {reason}" ++ , ++ info={ ++ "operation": operation, ++ "reason": reason, ++ "site_ip": site_ip, ++ "ticket_name": ticket_name, ++ } ++ ) ++ ++def booth_skipping_config(config_file, reason): ++ """ ++ Warning about skipping booth config file. ++ ++ config_file -- file name of config which is skipped ++ reason -- reason ++ """ ++ return ReportItem.warning( ++ report_codes.BOOTH_SKIPPING_CONFIG, ++ "Skipping config file '{config_file}': {reason}", ++ info={ ++ "config_file": config_file, ++ "reason": reason, ++ } ++ ) ++ ++def booth_cannot_identify_keyfile(severity=ReportItemSeverity.ERROR): ++ return ReportItem( ++ report_codes.BOOTH_CANNOT_IDENTIFY_KEYFILE, ++ severity, ++ "cannot identify authfile in booth configuration", ++ info={}, ++ forceable=report_codes.FORCE_BOOTH_DESTROY ++ if severity == ReportItemSeverity.ERROR else None ++ ) +diff --git a/pcs/lib/booth/resource.py b/pcs/lib/booth/resource.py +new file mode 100644 +index 0000000..e793713 +--- /dev/null ++++ b/pcs/lib/booth/resource.py +@@ -0,0 +1,116 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from pcs.lib.cib.tools import find_unique_id ++ ++ ++class BoothNotFoundInCib(Exception): ++ pass ++ ++class BoothMultipleOccurenceFoundInCib(Exception): ++ pass ++ ++def create_resource_id(resources_section, name, suffix): ++ return find_unique_id( ++ resources_section.getroottree(), "booth-{0}-{1}".format(name, suffix) ++ ) ++ ++def get_creator(resource_create): ++ #TODO resource_create is provisional hack until resources are not moved to ++ #lib ++ def create_booth_in_cluster(ip, booth_config_file_path, create_id): ++ ip_id = create_id("ip") ++ booth_id = create_id("service") ++ group_id = create_id("group") ++ ++ resource_create( ++ ra_id=ip_id, ++ ra_type="ocf:heartbeat:IPaddr2", ++ ra_values=["ip={0}".format(ip)], ++ op_values=[], ++ meta_values=[], ++ clone_opts=[], ++ group=group_id, ++ ) ++ resource_create( ++ ra_id=booth_id, ++ ra_type="ocf:pacemaker:booth-site", ++ ra_values=["config={0}".format(booth_config_file_path)], ++ op_values=[], ++ meta_values=[], ++ clone_opts=[], ++ group=group_id, ++ ) ++ return create_booth_in_cluster ++ ++def is_ip_resource(resource_element): ++ return resource_element.attrib["type"] == "IPaddr2" ++ ++def find_grouped_ip_element_to_remove(booth_element): ++ if booth_element.getparent().tag != "group": ++ return None ++ ++ group = booth_element.getparent() ++ if len(group) != 2: ++ #when something else in group, ip is not for remove ++ return None ++ for element in group: ++ if is_ip_resource(element): ++ return element ++ return None ++ ++def get_remover(resource_remove): ++ def remove_from_cluster( ++ resources_section, booth_config_file_path, remove_multiple=False ++ ): ++ element_list = find_for_config( ++ resources_section, ++ booth_config_file_path ++ ) ++ if not element_list: ++ raise BoothNotFoundInCib() ++ ++ if len(element_list) > 1 and not remove_multiple: ++ raise BoothMultipleOccurenceFoundInCib() ++ ++ number_of_removed_booth_elements = 0 ++ for element in element_list: ++ ip_resource_to_remove = find_grouped_ip_element_to_remove(element) ++ if ip_resource_to_remove is not None: ++ resource_remove(ip_resource_to_remove.attrib["id"]) ++ resource_remove(element.attrib["id"]) ++ number_of_removed_booth_elements += 1 ++ ++ return number_of_removed_booth_elements ++ ++ return remove_from_cluster ++ ++def find_for_config(resources_section, booth_config_file_path): ++ return resources_section.xpath((""" ++ .//primitive[ ++ @type="booth-site" ++ and ++ instance_attributes[nvpair[@name="config" and @value="{0}"]] ++ ] ++ """).format(booth_config_file_path)) ++ ++def find_bound_ip(resources_section, booth_config_file_path): ++ return resources_section.xpath((""" ++ .//group[ ++ primitive[ ++ @type="booth-site" ++ and ++ instance_attributes[ ++ nvpair[@name="config" and @value="{0}"] ++ ] ++ ] ++ ] ++ /primitive[@type="IPaddr2"] ++ /instance_attributes ++ /nvpair[@name="ip"] ++ /@value ++ """).format(booth_config_file_path)) +diff --git a/pcs/lib/booth/status.py b/pcs/lib/booth/status.py +new file mode 100644 +index 0000000..4b93161 +--- /dev/null ++++ b/pcs/lib/booth/status.py +@@ -0,0 +1,41 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from pcs import settings ++from pcs.lib.booth import reports ++from pcs.lib.errors import LibraryError ++ ++ ++def get_daemon_status(runner, name=None): ++ cmd = [settings.booth_binary, "status"] ++ if name: ++ cmd += ["-c", name] ++ output, return_value = runner.run(cmd) ++ # 7 means that there is no booth instance running ++ if return_value not in [0, 7]: ++ raise LibraryError(reports.booth_daemon_status_error(output)) ++ return output ++ ++ ++def get_tickets_status(runner, name=None): ++ cmd = [settings.booth_binary, "list"] ++ if name: ++ cmd += ["-c", name] ++ output, return_value = runner.run(cmd) ++ if return_value != 0: ++ raise LibraryError(reports.booth_tickets_status_error(output)) ++ return output ++ ++ ++def get_peers_status(runner, name=None): ++ cmd = [settings.booth_binary, "peers"] ++ if name: ++ cmd += ["-c", name] ++ output, return_value = runner.run(cmd) ++ if return_value != 0: ++ raise LibraryError(reports.booth_peers_status_error(output)) ++ return output +diff --git a/pcs/lib/booth/sync.py b/pcs/lib/booth/sync.py +new file mode 100644 +index 0000000..c9bc30b +--- /dev/null ++++ b/pcs/lib/booth/sync.py +@@ -0,0 +1,208 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import os ++import json ++import base64 ++ ++from pcs.common import report_codes ++from pcs.lib import reports as lib_reports ++from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities ++from pcs.lib.external import ( ++ NodeCommunicator, ++ NodeCommunicationException, ++ node_communicator_exception_to_report_item, ++ parallel_nodes_communication_helper, ++) ++from pcs.lib.booth import ( ++ config_files as booth_conf, ++ config_structure, ++ config_parser, ++ reports, ++) ++ ++ ++def _set_config_on_node( ++ communicator, reporter, node, name, config_data, authfile=None, ++ authfile_data=None ++): ++ """ ++ Set booth config for instance 'name' on specified node. ++ ++ communicator -- NodeCommunicator ++ reporter -- report processor ++ node -- NodeAddresses ++ name -- name of booth instance ++ config_data -- booth config as string ++ authfile -- path to authfile ++ authfile_data -- authfile content as bytes ++ """ ++ data = { ++ "config": { ++ "name": "{0}.conf".format(name), ++ "data": config_data ++ } ++ } ++ if authfile is not None and authfile_data is not None: ++ data["authfile"] = { ++ "name": os.path.basename(authfile), ++ "data": base64.b64encode(authfile_data).decode("utf-8") ++ } ++ communicator.call_node( ++ node, ++ "remote/booth_set_config", ++ NodeCommunicator.format_data_dict([("data_json", json.dumps(data))]) ++ ) ++ reporter.process(reports.booth_config_saved(node.label, [name])) ++ ++ ++def send_config_to_all_nodes( ++ communicator, reporter, node_list, name, config_data, authfile=None, ++ authfile_data=None, skip_offline=False ++): ++ """ ++ Send config_data of specified booth instance from local node to all nodes in ++ node_list. ++ ++ communicator -- NodeCommunicator ++ reporter -- report processor ++ node_list -- NodeAddressesList ++ name -- name of booth instance ++ config_data -- config_data content as string ++ authfile -- path to authfile ++ authfile_data -- content of authfile as bytes ++ skip_offline -- if True offline nodes will be skipped ++ """ ++ reporter.process(reports.booth_distributing_config(name)) ++ parallel_nodes_communication_helper( ++ _set_config_on_node, ++ [ ++ ( ++ [ ++ communicator, reporter, node, name, config_data, ++ authfile, authfile_data ++ ], ++ {} ++ ) ++ for node in node_list ++ ], ++ reporter, ++ skip_offline ++ ) ++ ++ ++def send_all_config_to_node( ++ communicator, ++ reporter, ++ node, ++ rewrite_existing=False, ++ skip_wrong_config=False ++): ++ """ ++ Send all booth configs from default booth config directory and theri ++ authfiles to specified node. ++ ++ communicator -- NodeCommunicator ++ reporter -- report processor ++ node -- NodeAddress ++ rewrite_existing -- if True rewrite existing file ++ skip_wrong_config -- if True skip local configs that are unreadable ++ """ ++ config_dict = booth_conf.read_configs(reporter, skip_wrong_config) ++ if not config_dict: ++ return ++ file_list = [] ++ for config, config_data in sorted(config_dict.items()): ++ try: ++ authfile_path = config_structure.get_authfile( ++ config_parser.parse(config_data) ++ ) ++ file_list.append({ ++ "name": config, ++ "data": config_data, ++ "is_authfile": False ++ }) ++ if authfile_path: ++ content = booth_conf.read_authfile(reporter, authfile_path) ++ if not content: ++ continue ++ file_list.append({ ++ "name": os.path.basename(authfile_path), ++ "data": base64.b64encode(content).decode("utf-8"), ++ "is_authfile": True ++ }) ++ except LibraryError: ++ reporter.process(reports.booth_skipping_config( ++ config, "unable to parse config" ++ )) ++ ++ data = [("data_json", json.dumps(file_list))] ++ ++ if rewrite_existing: ++ data.append(("rewrite_existing", "1")) ++ ++ reporter.process(reports.booth_sending_local_configs_to_node(node.label)) ++ try: ++ response = json.loads(communicator.call_node( ++ node, ++ "remote/booth_save_files", ++ NodeCommunicator.format_data_dict(data) ++ )) ++ report_list = [] ++ for file in response["existing"]: ++ report_list.append(lib_reports.file_already_exists( ++ None, ++ file, ++ Severities.WARNING if rewrite_existing else Severities.ERROR, ++ ( ++ None if rewrite_existing ++ else report_codes.FORCE_FILE_OVERWRITE ++ ), ++ node.label ++ )) ++ for file, reason in response["failed"].items(): ++ report_list.append(reports.booth_config_not_saved( ++ node.label, reason, file ++ )) ++ reporter.process_list(report_list) ++ reporter.process( ++ reports.booth_config_saved(node.label, response["saved"]) ++ ) ++ except NodeCommunicationException as e: ++ raise LibraryError(node_communicator_exception_to_report_item(e)) ++ except (KeyError, ValueError): ++ raise LibraryError(lib_reports.invalid_response_format(node.label)) ++ ++ ++def pull_config_from_node(communicator, node, name): ++ """ ++ Get config of specified booth instance and its authfile if there is one ++ from 'node'. It returns dictionary with format: ++ { ++ "config": { ++ "name": <file name of config>, ++ "data": <content of file> ++ }, ++ "authfile": { ++ "name": <file name of authfile, None if it doesn't exist>, ++ "data": <base64 coded content of authfile> ++ } ++ ++ communicator -- NodeCommunicator ++ node -- NodeAddresses ++ name -- name of booth instance ++ """ ++ try: ++ return json.loads(communicator.call_node( ++ node, ++ "remote/booth_get_config", ++ NodeCommunicator.format_data_dict([("name", name)]) ++ )) ++ except NodeCommunicationException as e: ++ raise LibraryError(node_communicator_exception_to_report_item(e)) ++ except ValueError: ++ raise LibraryError(lib_reports.invalid_response_format(node.label)) +diff --git a/pcs/lib/booth/test/__init__.py b/pcs/lib/booth/test/__init__.py +new file mode 100644 +index 0000000..e69de29 +diff --git a/pcs/lib/booth/test/test_config_exchange.py b/pcs/lib/booth/test/test_config_exchange.py +new file mode 100644 +index 0000000..a9a40ce +--- /dev/null ++++ b/pcs/lib/booth/test/test_config_exchange.py +@@ -0,0 +1,70 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++from unittest import TestCase ++from pcs.lib.booth import config_structure, config_exchange ++ ++ ++class FromExchangeFormatTest(TestCase): ++ def test_convert_all_supported_items(self): ++ self.assertEqual( ++ [ ++ config_structure.ConfigItem("authfile", "/path/to/auth.file"), ++ config_structure.ConfigItem("site", "1.1.1.1"), ++ config_structure.ConfigItem("site", "2.2.2.2"), ++ config_structure.ConfigItem("arbitrator", "3.3.3.3"), ++ config_structure.ConfigItem("ticket", "TA"), ++ config_structure.ConfigItem("ticket", "TB"), ++ ], ++ config_exchange.from_exchange_format( ++ { ++ "sites": ["1.1.1.1", "2.2.2.2"], ++ "arbitrators": ["3.3.3.3"], ++ "tickets": ["TA", "TB"], ++ "authfile": "/path/to/auth.file", ++ }, ++ ) ++ ) ++ ++ ++class GetExchenageFormatTest(TestCase): ++ def test_convert_parsed_config_to_exchange_format(self): ++ self.assertEqual( ++ { ++ "sites": ["1.1.1.1", "2.2.2.2"], ++ "arbitrators": ["3.3.3.3"], ++ "tickets": ["TA", "TB"], ++ "authfile": "/path/to/auth.file", ++ }, ++ config_exchange.to_exchange_format([ ++ config_structure.ConfigItem("site", "1.1.1.1"), ++ config_structure.ConfigItem("site", "2.2.2.2"), ++ config_structure.ConfigItem("arbitrator", "3.3.3.3"), ++ config_structure.ConfigItem("authfile", "/path/to/auth.file"), ++ config_structure.ConfigItem("ticket", "TA"), ++ config_structure.ConfigItem("ticket", "TB", [ ++ config_structure.ConfigItem("timeout", "10") ++ ]), ++ ]) ++ ) ++ ++ def test_convert_parsed_config_to_exchange_format_without_authfile(self): ++ self.assertEqual( ++ { ++ "sites": ["1.1.1.1", "2.2.2.2"], ++ "arbitrators": ["3.3.3.3"], ++ "tickets": ["TA", "TB"], ++ }, ++ config_exchange.to_exchange_format([ ++ config_structure.ConfigItem("site", "1.1.1.1"), ++ config_structure.ConfigItem("site", "2.2.2.2"), ++ config_structure.ConfigItem("arbitrator", "3.3.3.3"), ++ config_structure.ConfigItem("ticket", "TA"), ++ config_structure.ConfigItem("ticket", "TB", [ ++ config_structure.ConfigItem("timeout", "10") ++ ]), ++ ]) ++ ) +diff --git a/pcs/lib/booth/test/test_config_files.py b/pcs/lib/booth/test/test_config_files.py +new file mode 100644 +index 0000000..2d4c3ea +--- /dev/null ++++ b/pcs/lib/booth/test/test_config_files.py +@@ -0,0 +1,272 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from os.path import join ++from unittest import TestCase ++ ++from pcs.common import report_codes, env_file_role_codes as file_roles ++from pcs.lib.booth import config_files ++from pcs.lib.errors import ReportItemSeverity as severities ++from pcs.settings import booth_config_dir as BOOTH_CONFIG_DIR ++from pcs.test.tools.assertions import assert_raise_library_error, assert_report_item_list_equal ++from pcs.test.tools.custom_mock import MockLibraryReportProcessor ++from pcs.test.tools.pcs_mock import mock ++ ++def patch_config_files(target, *args, **kwargs): ++ return mock.patch( ++ "pcs.lib.booth.config_files.{0}".format(target), *args, **kwargs ++ ) ++ ++@mock.patch("os.listdir") ++@mock.patch("os.path.isfile") ++class GetAllConfigsFileNamesTest(TestCase): ++ def test_success(self, mock_is_file, mock_listdir): ++ def mock_is_file_fn(file_name): ++ if file_name in ["dir.cong", "dir"]: ++ return False ++ elif file_name in [ ++ "name1", "name2.conf", "name.conf.conf", ".conf", "name3.conf" ++ ]: ++ return True ++ else: ++ raise AssertionError("unexpected input") ++ ++ mock_is_file.side_effect = mock_is_file_fn ++ mock_listdir.return_value = [ ++ "name1", "name2.conf", "name.conf.conf", ".conf", "name3.conf", ++ "dir.cong", "dir" ++ ] ++ self.assertEqual( ++ ["name2.conf", "name.conf.conf", "name3.conf"], ++ config_files.get_all_configs_file_names() ++ ) ++ mock_listdir.assert_called_once_with(BOOTH_CONFIG_DIR) ++ ++ ++class ReadConfigTest(TestCase): ++ def test_success(self): ++ self.maxDiff = None ++ mock_open = mock.mock_open(read_data="config content") ++ with patch_config_files("open", mock_open, create=True): ++ self.assertEqual( ++ "config content", ++ config_files._read_config("my-file.conf") ++ ) ++ ++ self.assertEqual( ++ [ ++ mock.call(join(BOOTH_CONFIG_DIR, "my-file.conf"), "r"), ++ mock.call().__enter__(), ++ mock.call().read(), ++ mock.call().__exit__(None, None, None) ++ ], ++ mock_open.mock_calls ++ ) ++ ++ ++@patch_config_files("_read_config") ++@patch_config_files("get_all_configs_file_names") ++class ReadConfigsTest(TestCase): ++ def setUp(self): ++ self.mock_reporter = MockLibraryReportProcessor() ++ ++ def test_success(self, mock_get_configs, mock_read): ++ def _mock_read_cfg(file): ++ if file == "name1.conf": ++ return "config1" ++ elif file == "name2.conf": ++ return "config2" ++ elif file == "name3.conf": ++ return "config3" ++ else: ++ raise AssertionError("unexpected input: {0}".format(file)) ++ mock_get_configs.return_value = [ ++ "name1.conf", "name2.conf", "name3.conf" ++ ] ++ mock_read.side_effect = _mock_read_cfg ++ ++ self.assertEqual( ++ { ++ "name1.conf": "config1", ++ "name2.conf": "config2", ++ "name3.conf": "config3" ++ }, ++ config_files.read_configs(self.mock_reporter) ++ ) ++ ++ mock_get_configs.assert_called_once_with() ++ self.assertEqual(3, mock_read.call_count) ++ mock_read.assert_has_calls([ ++ mock.call("name1.conf"), ++ mock.call("name2.conf"), ++ mock.call("name3.conf") ++ ]) ++ self.assertEqual(0, len(self.mock_reporter.report_item_list)) ++ ++ def test_skip_failed(self, mock_get_configs, mock_read): ++ def _mock_read_cfg(file): ++ if file in ["name1.conf", "name3.conf"]: ++ raise EnvironmentError() ++ elif file == "name2.conf": ++ return "config2" ++ else: ++ raise AssertionError("unexpected input: {0}".format(file)) ++ ++ mock_get_configs.return_value = [ ++ "name1.conf", "name2.conf", "name3.conf" ++ ] ++ mock_read.side_effect = _mock_read_cfg ++ ++ self.assertEqual( ++ {"name2.conf": "config2"}, ++ config_files.read_configs(self.mock_reporter, True) ++ ) ++ mock_get_configs.assert_called_once_with() ++ self.assertEqual(3, mock_read.call_count) ++ mock_read.assert_has_calls([ ++ mock.call("name1.conf"), ++ mock.call("name2.conf"), ++ mock.call("name3.conf") ++ ]) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ severities.WARNING, ++ report_codes.BOOTH_CONFIG_READ_ERROR, ++ {"name": "name1.conf"} ++ ), ++ ( ++ severities.WARNING, ++ report_codes.BOOTH_CONFIG_READ_ERROR, ++ {"name": "name3.conf"} ++ ) ++ ] ++ ) ++ ++ def test_do_not_skip_failed(self, mock_get_configs, mock_read): ++ def _mock_read_cfg(file): ++ if file in ["name1.conf", "name3.conf"]: ++ raise EnvironmentError() ++ elif file == "name2.conf": ++ return "config2" ++ else: ++ raise AssertionError("unexpected input: {0}".format(file)) ++ ++ mock_get_configs.return_value = [ ++ "name1.conf", "name2.conf", "name3.conf" ++ ] ++ mock_read.side_effect = _mock_read_cfg ++ ++ assert_raise_library_error( ++ lambda: config_files.read_configs(self.mock_reporter), ++ ( ++ severities.ERROR, ++ report_codes.BOOTH_CONFIG_READ_ERROR, ++ {"name": "name1.conf"}, ++ report_codes.SKIP_UNREADABLE_CONFIG ++ ), ++ ( ++ severities.ERROR, ++ report_codes.BOOTH_CONFIG_READ_ERROR, ++ {"name": "name3.conf"}, ++ report_codes.SKIP_UNREADABLE_CONFIG ++ ) ++ ) ++ mock_get_configs.assert_called_once_with() ++ self.assertEqual(3, mock_read.call_count) ++ mock_read.assert_has_calls([ ++ mock.call("name1.conf"), ++ mock.call("name2.conf"), ++ mock.call("name3.conf") ++ ]) ++ self.assertEqual(2, len(self.mock_reporter.report_item_list)) ++ ++ ++class ReadAuthfileTest(TestCase): ++ def setUp(self): ++ self.mock_reporter = MockLibraryReportProcessor() ++ self.maxDiff = None ++ ++ def test_success(self): ++ path = join(BOOTH_CONFIG_DIR, "file.key") ++ mock_open = mock.mock_open(read_data="key") ++ ++ with patch_config_files("open", mock_open, create=True): ++ self.assertEqual( ++ "key", config_files.read_authfile(self.mock_reporter, path) ++ ) ++ ++ self.assertEqual( ++ [ ++ mock.call(path, "rb"), ++ mock.call().__enter__(), ++ mock.call().read(), ++ mock.call().__exit__(None, None, None) ++ ], ++ mock_open.mock_calls ++ ) ++ self.assertEqual(0, len(self.mock_reporter.report_item_list)) ++ ++ def test_path_none(self): ++ self.assertTrue( ++ config_files.read_authfile(self.mock_reporter, None) is None ++ ) ++ self.assertEqual(0, len(self.mock_reporter.report_item_list)) ++ ++ def test_invalid_path(self): ++ path = "/not/etc/booth/booth.key" ++ self.assertTrue( ++ config_files.read_authfile(self.mock_reporter, path) is None ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [( ++ severities.WARNING, ++ report_codes.BOOTH_UNSUPORTED_FILE_LOCATION, ++ {"file": path} ++ )] ++ ) ++ ++ def test_not_abs_path(self): ++ path = "/etc/booth/../booth.key" ++ self.assertTrue( ++ config_files.read_authfile(self.mock_reporter, path) is None ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [( ++ severities.WARNING, ++ report_codes.BOOTH_UNSUPORTED_FILE_LOCATION, ++ {"file": path} ++ )] ++ ) ++ ++ @patch_config_files("format_environment_error", return_value="reason") ++ def test_read_failure(self, _): ++ path = join(BOOTH_CONFIG_DIR, "file.key") ++ mock_open = mock.mock_open() ++ mock_open().read.side_effect = EnvironmentError() ++ ++ with patch_config_files("open", mock_open, create=True): ++ return_value = config_files.read_authfile(self.mock_reporter, path) ++ ++ self.assertTrue(return_value is None) ++ ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [( ++ severities.WARNING, ++ report_codes.FILE_IO_ERROR, ++ { ++ "file_role": file_roles.BOOTH_KEY, ++ "file_path": path, ++ "reason": "reason", ++ "operation": "read", ++ } ++ )] ++ ) +diff --git a/pcs/lib/booth/test/test_config_parser.py b/pcs/lib/booth/test/test_config_parser.py +new file mode 100644 +index 0000000..684fc79 +--- /dev/null ++++ b/pcs/lib/booth/test/test_config_parser.py +@@ -0,0 +1,169 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from pcs.common import report_codes ++from pcs.lib.booth import config_parser ++from pcs.lib.booth.config_structure import ConfigItem ++from pcs.lib.errors import ReportItemSeverity as severities ++from pcs.test.tools.assertions import assert_raise_library_error ++from pcs.test.tools.pcs_unittest import TestCase ++ ++ ++class BuildTest(TestCase): ++ def test_build_file_content_from_parsed_structure(self): ++ self.assertEqual( ++ "\n".join([ ++ "authfile = /path/to/auth.file", ++ "site = 1.1.1.1", ++ "site = 2.2.2.2", ++ "arbitrator = 3.3.3.3", ++ 'ticket = "TA"', ++ 'ticket = "TB"', ++ " timeout = 10", ++ ]), ++ config_parser.build([ ++ ConfigItem("authfile", "/path/to/auth.file"), ++ ConfigItem("site", "1.1.1.1"), ++ ConfigItem("site", "2.2.2.2"), ++ ConfigItem("arbitrator", "3.3.3.3"), ++ ConfigItem("ticket", "TA"), ++ ConfigItem("ticket", "TB", [ ++ ConfigItem("timeout", "10") ++ ]), ++ ]) ++ ) ++ ++ ++class OrganizeLinesTest(TestCase): ++ def test_move_non_ticket_config_keys_above_tickets(self): ++ self.assertEqual( ++ [ ++ ConfigItem("site", "1.1.1.1"), ++ ConfigItem('site', '2.2.2.2'), ++ ConfigItem('arbitrator', '3.3.3.3'), ++ ConfigItem("ticket", "TA"), ++ ], ++ config_parser.organize_lines([ ++ ("site", "1.1.1.1"), ++ ("ticket", "TA"), ++ ('site', '2.2.2.2'), ++ ('arbitrator', '3.3.3.3'), ++ ]) ++ ) ++ ++ def test_use_ticket_key_as_ticket_detail(self): ++ self.maxDiff = None ++ self.assertEqual( ++ [ ++ ConfigItem("site", "1.1.1.1"), ++ ConfigItem('expire', '300'), ++ ConfigItem('site', '2.2.2.2'), ++ ConfigItem('arbitrator', '3.3.3.3'), ++ ConfigItem("ticket", "TA", [ ++ ConfigItem("timeout", "10"), ++ ConfigItem('--nonexistent', 'value'), ++ ConfigItem("expire", "300"), ++ ]), ++ ConfigItem("ticket", "TB", [ ++ ConfigItem("timeout", "20"), ++ ConfigItem("renewal-freq", "40"), ++ ]), ++ ], ++ config_parser.organize_lines([ ++ ("site", "1.1.1.1"), ++ ("expire", "300"), # out of ticket content is kept global ++ ("ticket", "TA"), ++ ("site", "2.2.2.2"), # move to global ++ ("timeout", "10"), ++ ("--nonexistent", "value"), # no global is kept under ticket ++ ("expire", "300"), ++ ("ticket", "TB"), ++ ('arbitrator', '3.3.3.3'), ++ ("timeout", "20"), ++ ("renewal-freq", "40"), ++ ]) ++ ) ++ ++ ++class ParseRawLinesTest(TestCase): ++ def test_parse_simple_correct_lines(self): ++ self.assertEqual( ++ [ ++ ("site", "1.1.1.1"), ++ ('site', '2.2.2.2'), ++ ('arbitrator', '3.3.3.3'), ++ ('syntactically_correct', 'nonsense'), ++ ('line-with', 'hash#literal'), ++ ], ++ config_parser.parse_to_raw_lines("\n".join([ ++ "site = 1.1.1.1", ++ " site = 2.2.2.2 ", ++ "arbitrator=3.3.3.3", ++ "syntactically_correct = nonsense", ++ "line-with = hash#literal", ++ ])) ++ ) ++ ++ def test_parse_lines_with_whole_line_comment(self): ++ self.assertEqual( ++ [("site", "1.1.1.1")], ++ config_parser.parse_to_raw_lines("\n".join([ ++ " # some comment", ++ "site = 1.1.1.1", ++ ])) ++ ) ++ ++ def test_skip_empty_lines(self): ++ self.assertEqual( ++ [("site", "1.1.1.1")], ++ config_parser.parse_to_raw_lines("\n".join([ ++ " ", ++ "site = 1.1.1.1", ++ ])) ++ ) ++ ++ def test_raises_when_unexpected_lines_appear(self): ++ invalid_line_list = [ ++ "first invalid line", ++ "second = 'invalid line' something else #comment", ++ "third = 'invalid line 'something#'#", ++ ] ++ line_list = ["site = 1.1.1.1"] + invalid_line_list ++ with self.assertRaises(config_parser.InvalidLines) as context_manager: ++ config_parser.parse_to_raw_lines("\n".join(line_list)) ++ self.assertEqual(context_manager.exception.args[0], invalid_line_list) ++ ++ def test_parse_lines_finishing_with_comment(self): ++ self.assertEqual( ++ [("site", "1.1.1.1")], ++ config_parser.parse_to_raw_lines("\n".join([ ++ "site = '1.1.1.1' #comment", ++ ])) ++ ) ++ ++class ParseTest(TestCase): ++ def test_raises_when_invalid_lines_appear(self): ++ invalid_line_list = [ ++ "first invalid line", ++ "second = 'invalid line' something else #comment" ++ ] ++ line_list = ["site = 1.1.1.1"] + invalid_line_list ++ assert_raise_library_error( ++ lambda: ++ config_parser.parse("\n".join(line_list)) ++ , ++ ( ++ severities.ERROR, ++ report_codes.BOOTH_CONFIG_UNEXPECTED_LINES, ++ { ++ "line_list": invalid_line_list, ++ }, ++ ), ++ ) ++ ++ def test_do_not_raises_when_no_invalid_liens_there(self): ++ config_parser.parse("site = 1.1.1.1") +diff --git a/pcs/lib/booth/test/test_config_structure.py b/pcs/lib/booth/test/test_config_structure.py +new file mode 100644 +index 0000000..27faca5 +--- /dev/null ++++ b/pcs/lib/booth/test/test_config_structure.py +@@ -0,0 +1,224 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from unittest import TestCase ++ ++from pcs.common import report_codes ++from pcs.lib.booth import config_structure ++from pcs.lib.errors import ReportItemSeverity as severities ++from pcs.test.tools.assertions import assert_raise_library_error ++from pcs.test.tools.pcs_mock import mock ++ ++ ++class ValidateTicketExistsTest(TestCase): ++ def test_raises_on_duplicate_ticket(self): ++ assert_raise_library_error( ++ lambda: config_structure.validate_ticket_exists( ++ [config_structure.ConfigItem("ticket", "B")], "A" ++ ), ++ ( ++ severities.ERROR, ++ report_codes.BOOTH_TICKET_DOES_NOT_EXIST, ++ { ++ "ticket_name": "A", ++ }, ++ ), ++ ) ++ ++class ValidateTicketUniqueTest(TestCase): ++ def test_raises_on_duplicate_ticket(self): ++ assert_raise_library_error( ++ lambda: config_structure.validate_ticket_unique( ++ [config_structure.ConfigItem("ticket", "A")], "A" ++ ), ++ ( ++ severities.ERROR, ++ report_codes.BOOTH_TICKET_DUPLICATE, ++ { ++ "ticket_name": "A", ++ }, ++ ), ++ ) ++ ++ def test_do_not_raises_when_no_duplicated_ticket(self): ++ config_structure.validate_ticket_unique([], "A") ++ ++class TicketExistsTest(TestCase): ++ def test_returns_true_if_ticket_in_structure(self): ++ self.assertTrue(config_structure.ticket_exists( ++ [config_structure.ConfigItem("ticket", "A")], "A" ++ )) ++ ++ def test_returns_false_if_ticket_in_structure(self): ++ self.assertFalse(config_structure.ticket_exists( ++ [config_structure.ConfigItem("ticket", "A")], "B" ++ )) ++ ++class ValidateTicketNameTest(TestCase): ++ def test_accept_valid_ticket_name(self): ++ config_structure.validate_ticket_name("abc") ++ ++ def test_refuse_bad_ticket_name(self): ++ assert_raise_library_error( ++ lambda: config_structure.validate_ticket_name("@ticket"), ++ ( ++ severities.ERROR, ++ report_codes.BOOTH_TICKET_NAME_INVALID, ++ { ++ "ticket_name": "@ticket", ++ }, ++ ), ++ ) ++ ++class ValidatePeersTest(TestCase): ++ def test_do_no_raises_on_correct_args(self): ++ config_structure.validate_peers( ++ site_list=["1.1.1.1", "2.2.2.2"], ++ arbitrator_list=["3.3.3.3"] ++ ) ++ ++ def test_refuse_less_than_2_sites(self): ++ assert_raise_library_error( ++ lambda: config_structure.validate_peers( ++ site_list=["1.1.1.1"], ++ arbitrator_list=["3.3.3.3", "4.4.4.4"] ++ ), ++ ( ++ severities.ERROR, ++ report_codes.BOOTH_LACK_OF_SITES, ++ { ++ "sites": ["1.1.1.1"], ++ } ++ ), ++ ) ++ ++ def test_refuse_even_number_peers(self): ++ assert_raise_library_error( ++ lambda: config_structure.validate_peers( ++ site_list=["1.1.1.1", "2.2.2.2"], ++ arbitrator_list=[] ++ ), ++ ( ++ severities.ERROR, ++ report_codes.BOOTH_EVEN_PEERS_NUM, ++ { ++ "number": 2, ++ } ++ ), ++ ) ++ ++ def test_refuse_address_duplication(self): ++ assert_raise_library_error( ++ lambda: config_structure.validate_peers( ++ site_list=["1.1.1.1", "1.1.1.1", "1.1.1.1"], ++ arbitrator_list=["3.3.3.3", "4.4.4.4"] ++ ), ++ ( ++ severities.ERROR, ++ report_codes.BOOTH_ADDRESS_DUPLICATION, ++ { ++ "addresses": set(["1.1.1.1"]), ++ } ++ ), ++ ) ++ ++ def test_refuse_problem_combination(self): ++ assert_raise_library_error( ++ lambda: config_structure.validate_peers( ++ site_list=["1.1.1.1"], ++ arbitrator_list=["1.1.1.1"] ++ ), ++ ( ++ severities.ERROR, ++ report_codes.BOOTH_LACK_OF_SITES, ++ { ++ "sites": ["1.1.1.1"], ++ } ++ ), ++ ( ++ severities.ERROR, ++ report_codes.BOOTH_EVEN_PEERS_NUM, ++ { ++ "number": 2, ++ } ++ ), ++ ( ++ severities.ERROR, ++ report_codes.BOOTH_ADDRESS_DUPLICATION, ++ { ++ "addresses": set(["1.1.1.1"]), ++ } ++ ), ++ ) ++ ++class RemoveTicketTest(TestCase): ++ @mock.patch("pcs.lib.booth.config_structure.validate_ticket_exists") ++ def test_successfully_remove_ticket(self, mock_validate_ticket_exists): ++ configuration = [ ++ config_structure.ConfigItem("ticket", "some-ticket"), ++ config_structure.ConfigItem("ticket", "deprecated-ticket"), ++ ] ++ self.assertEqual( ++ config_structure.remove_ticket(configuration, "deprecated-ticket"), ++ [ ++ config_structure.ConfigItem("ticket", "some-ticket"), ++ ] ++ ) ++ mock_validate_ticket_exists.assert_called_once_with( ++ configuration, ++ "deprecated-ticket" ++ ) ++ ++class AddTicketTest(TestCase): ++ @mock.patch("pcs.lib.booth.config_structure.validate_ticket_unique") ++ @mock.patch("pcs.lib.booth.config_structure.validate_ticket_name") ++ def test_successfully_add_ticket( ++ self, mock_validate_name, mock_validate_uniq ++ ): ++ configuration = [ ++ config_structure.ConfigItem("ticket", "some-ticket"), ++ ] ++ self.assertEqual( ++ config_structure.add_ticket(configuration, "new-ticket"), ++ [ ++ config_structure.ConfigItem("ticket", "some-ticket"), ++ config_structure.ConfigItem("ticket", "new-ticket"), ++ ], ++ ) ++ ++ mock_validate_name.assert_called_once_with("new-ticket") ++ mock_validate_uniq.assert_called_once_with(configuration, "new-ticket") ++ ++class SetAuthfileTest(TestCase): ++ def test_add_authfile(self): ++ self.assertEqual( ++ [ ++ config_structure.ConfigItem("authfile", "/path/to/auth.file"), ++ config_structure.ConfigItem("site", "1.1.1.1"), ++ ], ++ config_structure.set_authfile( ++ [ ++ config_structure.ConfigItem("site", "1.1.1.1"), ++ ], ++ "/path/to/auth.file" ++ ) ++ ) ++ def test_reset_authfile(self): ++ self.assertEqual( ++ [ ++ config_structure.ConfigItem("authfile", "/path/to/auth.file"), ++ config_structure.ConfigItem("site", "1.1.1.1"), ++ ], ++ config_structure.set_authfile( ++ [ ++ config_structure.ConfigItem("site", "1.1.1.1"), ++ config_structure.ConfigItem("authfile", "/old/path/to/auth1.file"), ++ config_structure.ConfigItem("authfile", "/old/path/to/auth2.file"), ++ ], ++ "/path/to/auth.file" ++ ) ++ ) +diff --git a/pcs/lib/booth/test/test_env.py b/pcs/lib/booth/test/test_env.py +new file mode 100644 +index 0000000..77e0944 +--- /dev/null ++++ b/pcs/lib/booth/test/test_env.py +@@ -0,0 +1,228 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import grp ++import os ++import pwd ++from unittest import TestCase ++ ++from pcs import settings ++from pcs.common import report_codes ++from pcs.lib.booth import env ++from pcs.lib.errors import ReportItemSeverity as severities ++from pcs.test.tools.assertions import assert_raise_library_error ++from pcs.test.tools.misc import get_test_resource as rc ++from pcs.test.tools.pcs_mock import mock ++ ++def patch_env(target, *args, **kwargs): ++ return mock.patch( ++ "pcs.lib.booth.env.{0}".format(target), *args, **kwargs ++ ) ++ ++class GetConfigFileNameTest(TestCase): ++ @patch_env("os.path.exists") ++ def test_refuse_when_name_starts_with_slash(self, mock_path_exists): ++ mock_path_exists.return_value = True ++ assert_raise_library_error( ++ lambda: env.get_config_file_name("/booth"), ++ ( ++ severities.ERROR, ++ report_codes.BOOTH_INVALID_NAME, ++ { ++ "name": "/booth", ++ "reason": "contains illegal character '/'", ++ } ++ ), ++ ) ++ ++class BoothEnvTest(TestCase): ++ @patch_env("RealFile") ++ def test_get_content_from_file(self, mock_real_file): ++ mock_real_file.return_value = mock.MagicMock( ++ read=mock.MagicMock(return_value="content") ++ ) ++ self.assertEqual( ++ "content", ++ env.BoothEnv("report processor", env_data={"name": "booth"}) ++ .get_config_content() ++ ) ++ ++ @patch_env("set_keyfile_access") ++ @patch_env("RealFile") ++ def test_create_config(self, mock_real_file, mock_set_keyfile_access): ++ mock_file = mock.MagicMock( ++ assert_no_conflict_with_existing=mock.MagicMock(), ++ write=mock.MagicMock(), ++ ) ++ mock_real_file.return_value = mock_file ++ ++ ++ env.BoothEnv( ++ "report processor", ++ env_data={"name": "booth"} ++ ).create_config("a", can_overwrite_existing=True) ++ ++ self.assertEqual(mock_file.assert_no_conflict_with_existing.mock_calls,[ ++ mock.call('report processor', True), ++ ]) ++ self.assertEqual(mock_file.write.mock_calls, [mock.call('a')]) ++ ++ @patch_env("RealFile") ++ def test_push_config(self, mock_real_file): ++ mock_file = mock.MagicMock( ++ assert_no_conflict_with_existing=mock.MagicMock(), ++ write=mock.MagicMock(), ++ ) ++ mock_real_file.return_value = mock_file ++ env.BoothEnv( ++ "report processor", ++ env_data={"name": "booth"} ++ ).push_config("a") ++ mock_file.write.assert_called_once_with("a") ++ ++ ++ ++ def test_export_config_file_when_was_present_in_env_data(self): ++ self.assertEqual( ++ env.BoothEnv( ++ "report processor", ++ { ++ "name": "booth-name", ++ "config_file": { ++ "content": "a\nb", ++ }, ++ "key_file": { ++ "content": "secure", ++ }, ++ "key_path": "/path/to/file.key", ++ } ++ ).export(), ++ { ++ "config_file": { ++ "content": "a\nb", ++ "can_overwrite_existing_file": False, ++ "no_existing_file_expected": False, ++ "is_binary": False, ++ }, ++ "key_file": { ++ "content": "secure", ++ "can_overwrite_existing_file": False, ++ "no_existing_file_expected": False, ++ "is_binary": False, ++ }, ++ } ++ ) ++ ++ def test_do_not_export_config_file_when_no_provided(self): ++ self.assertEqual( ++ env.BoothEnv("report processor", {"name": "booth"}).export(), ++ {} ++ ) ++ ++class SetKeyfileAccessTest(TestCase): ++ def test_set_desired_file_access(self): ++ #setup ++ file_path = rc("temp-keyfile") ++ if os.path.exists(file_path): ++ os.remove(file_path) ++ with open(file_path, "w") as file: ++ file.write("content") ++ ++ #check assumptions ++ stat = os.stat(file_path) ++ self.assertNotEqual('600', oct(stat.st_mode)[-3:]) ++ current_user = pwd.getpwuid(os.getuid())[0] ++ if current_user != settings.pacemaker_uname: ++ file_user = pwd.getpwuid(stat.st_uid)[0] ++ self.assertNotEqual(file_user, settings.pacemaker_uname) ++ current_group = grp.getgrgid(os.getgid())[0] ++ if current_group != settings.pacemaker_gname: ++ file_group = grp.getgrgid(stat.st_gid)[0] ++ self.assertNotEqual(file_group, settings.pacemaker_gname) ++ ++ #run tested method ++ env.set_keyfile_access(file_path) ++ ++ #check ++ stat = os.stat(file_path) ++ self.assertEqual('600', oct(stat.st_mode)[-3:]) ++ ++ file_user = pwd.getpwuid(stat.st_uid)[0] ++ self.assertEqual(file_user, settings.pacemaker_uname) ++ ++ file_group = grp.getgrgid(stat.st_gid)[0] ++ self.assertEqual(file_group, settings.pacemaker_gname) ++ ++ @patch_env("pwd.getpwnam", mock.MagicMock(side_effect=KeyError)) ++ @patch_env("settings.pacemaker_uname", "some-user") ++ def test_raises_when_cannot_get_uid(self): ++ assert_raise_library_error( ++ lambda: env.set_keyfile_access("/booth"), ++ ( ++ severities.ERROR, ++ report_codes.UNABLE_TO_DETERMINE_USER_UID, ++ { ++ "user": "some-user", ++ } ++ ), ++ ) ++ ++ @patch_env("grp.getgrnam", mock.MagicMock(side_effect=KeyError)) ++ @patch_env("pwd.getpwnam", mock.MagicMock()) ++ @patch_env("settings.pacemaker_gname", "some-group") ++ def test_raises_when_cannot_get_gid(self): ++ assert_raise_library_error( ++ lambda: env.set_keyfile_access("/booth"), ++ ( ++ severities.ERROR, ++ report_codes.UNABLE_TO_DETERMINE_GROUP_GID, ++ { ++ "group": "some-group", ++ } ++ ), ++ ) ++ ++ @patch_env("format_environment_error", mock.Mock(return_value="err")) ++ @patch_env("os.chown", mock.MagicMock(side_effect=EnvironmentError())) ++ @patch_env("grp.getgrnam", mock.MagicMock()) ++ @patch_env("pwd.getpwnam", mock.MagicMock()) ++ @patch_env("settings.pacemaker_gname", "some-group") ++ def test_raises_when_cannot_chown(self): ++ assert_raise_library_error( ++ lambda: env.set_keyfile_access("/booth"), ++ ( ++ severities.ERROR, ++ report_codes.FILE_IO_ERROR, ++ { ++ 'reason': 'err', ++ 'file_role': u'BOOTH_KEY', ++ 'file_path': '/booth', ++ 'operation': u'chown', ++ } ++ ), ++ ) ++ ++ @patch_env("format_environment_error", mock.Mock(return_value="err")) ++ @patch_env("os.chmod", mock.MagicMock(side_effect=EnvironmentError())) ++ @patch_env("os.chown", mock.MagicMock()) ++ @patch_env("grp.getgrnam", mock.MagicMock()) ++ @patch_env("pwd.getpwnam", mock.MagicMock()) ++ @patch_env("settings.pacemaker_gname", "some-group") ++ def test_raises_when_cannot_chmod(self): ++ assert_raise_library_error( ++ lambda: env.set_keyfile_access("/booth"), ++ ( ++ severities.ERROR, ++ report_codes.FILE_IO_ERROR, ++ { ++ 'reason': 'err', ++ 'file_role': u'BOOTH_KEY', ++ 'file_path': '/booth', ++ 'operation': u'chmod', ++ } ++ ), ++ ) +diff --git a/pcs/lib/booth/test/test_resource.py b/pcs/lib/booth/test/test_resource.py +new file mode 100644 +index 0000000..440ddde +--- /dev/null ++++ b/pcs/lib/booth/test/test_resource.py +@@ -0,0 +1,203 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from unittest import TestCase ++ ++from lxml import etree ++ ++import pcs.lib.booth.resource as booth_resource ++from pcs.test.tools.pcs_mock import mock ++ ++ ++def fixture_resources_with_booth(booth_config_file_path): ++ return etree.fromstring(''' ++ <resources> ++ <primitive type="booth-site"> ++ <instance_attributes> ++ <nvpair name="config" value="{0}"/> ++ </instance_attributes> ++ </primitive> ++ </resources> ++ '''.format(booth_config_file_path)) ++ ++def fixture_booth_element(id, booth_config_file_path): ++ return etree.fromstring(''' ++ <primitive id="{0}" type="booth-site"> ++ <instance_attributes> ++ <nvpair name="config" value="{1}"/> ++ </instance_attributes> ++ </primitive> ++ '''.format(id, booth_config_file_path)) ++ ++def fixture_ip_element(id, ip=""): ++ return etree.fromstring(''' ++ <primitive id="{0}" type="IPaddr2"> ++ <instance_attributes id="{0}-ia"> ++ <nvpair ++ id="booth-booth-{0}-ia-ip" ++ name="ip" ++ value="{1}" ++ /> ++ </instance_attributes> ++ </primitive> ++ '''.format(id, ip)) ++ ++class CreateResourceIdTest(TestCase): ++ @mock.patch("pcs.lib.booth.resource.find_unique_id") ++ def test_return_new_uinq_id(self, mock_find_unique_id): ++ resources_section = etree.fromstring('''<resources/>''') ++ mock_find_unique_id.side_effect = ( ++ lambda resources_section, id: "{0}-n".format(id) ++ ) ++ self.assertEqual( ++ "booth-some-name-ip-n", ++ booth_resource.create_resource_id( ++ resources_section, "some-name", "ip" ++ ) ++ ) ++ ++class FindBoothResourceElementsTest(TestCase): ++ def test_returns_empty_list_when_no_matching_booth_element(self): ++ self.assertEqual([], booth_resource.find_for_config( ++ fixture_resources_with_booth("/ANOTHER/PATH/TO/CONF"), ++ "/PATH/TO/CONF" ++ )) ++ ++ ++ def test_returns_all_found_resource_elements(self): ++ resources = etree.fromstring('<resources/>') ++ first = fixture_booth_element("first", "/PATH/TO/CONF") ++ second = fixture_booth_element("second", "/ANOTHER/PATH/TO/CONF") ++ third = fixture_booth_element("third", "/PATH/TO/CONF") ++ for element in [first, second,third]: ++ resources.append(element) ++ ++ self.assertEqual( ++ [first, third], ++ booth_resource.find_for_config( ++ resources, ++ "/PATH/TO/CONF" ++ ) ++ ) ++ ++class RemoveFromClusterTest(TestCase): ++ def call(self, resources_section, remove_multiple=False): ++ mock_resource_remove = mock.Mock() ++ num_of_removed_booth_resources = booth_resource.get_remover( ++ mock_resource_remove ++ )( ++ resources_section, ++ "/PATH/TO/CONF", ++ remove_multiple, ++ ) ++ return ( ++ mock_resource_remove, ++ num_of_removed_booth_resources ++ ) ++ ++ def fixture_resources_including_two_booths(self): ++ resources_section = etree.fromstring('<resources/>') ++ first = fixture_booth_element("first", "/PATH/TO/CONF") ++ second = fixture_booth_element("second", "/PATH/TO/CONF") ++ resources_section.append(first) ++ resources_section.append(second) ++ return resources_section ++ ++ def test_raises_when_booth_resource_not_found(self): ++ self.assertRaises( ++ booth_resource.BoothNotFoundInCib, ++ lambda: self.call(etree.fromstring('<resources/>')), ++ ) ++ ++ def test_raises_when_more_booth_resources_found(self): ++ resources_section = self.fixture_resources_including_two_booths() ++ self.assertRaises( ++ booth_resource.BoothMultipleOccurenceFoundInCib, ++ lambda: self.call(resources_section), ++ ) ++ ++ def test_returns_number_of_removed_elements(self): ++ resources_section = self.fixture_resources_including_two_booths() ++ mock_resource_remove, num_of_removed_booth_resources = self.call( ++ resources_section, ++ remove_multiple=True ++ ) ++ self.assertEqual(num_of_removed_booth_resources, 2) ++ self.assertEqual( ++ mock_resource_remove.mock_calls, [ ++ mock.call('first'), ++ mock.call('second'), ++ ] ++ ) ++ ++ def test_remove_ip_when_is_only_booth_sibling_in_group(self): ++ resources_section = etree.fromstring(''' ++ <resources> ++ <group> ++ <primitive id="ip" type="IPaddr2"/> ++ <primitive id="booth" type="booth-site"> ++ <instance_attributes> ++ <nvpair name="config" value="/PATH/TO/CONF"/> ++ </instance_attributes> ++ </primitive> ++ </group> ++ </resources> ++ ''') ++ ++ mock_resource_remove, _ = self.call( ++ resources_section, ++ remove_multiple=True ++ ) ++ self.assertEqual( ++ mock_resource_remove.mock_calls, [ ++ mock.call('ip'), ++ mock.call('booth'), ++ ] ++ ) ++ ++ ++class FindBindedIpTest(TestCase): ++ def fixture_resource_section(self, ip_element_list): ++ resources_section = etree.fromstring('<resources/>') ++ group = etree.SubElement(resources_section, "group") ++ group.append(fixture_booth_element("booth1", "/PATH/TO/CONF")) ++ for ip_element in ip_element_list: ++ group.append(ip_element) ++ return resources_section ++ ++ ++ def test_returns_None_when_no_ip(self): ++ self.assertEqual( ++ [], ++ booth_resource.find_bound_ip( ++ self.fixture_resource_section([]), ++ "/PATH/TO/CONF", ++ ) ++ ) ++ ++ def test_returns_ip_when_correctly_found(self): ++ self.assertEqual( ++ ["192.168.122.31"], ++ booth_resource.find_bound_ip( ++ self.fixture_resource_section([ ++ fixture_ip_element("ip1", "192.168.122.31"), ++ ]), ++ "/PATH/TO/CONF", ++ ) ++ ) ++ ++ def test_returns_None_when_more_ip(self): ++ self.assertEqual( ++ ["192.168.122.31", "192.168.122.32"], ++ booth_resource.find_bound_ip( ++ self.fixture_resource_section([ ++ fixture_ip_element("ip1", "192.168.122.31"), ++ fixture_ip_element("ip2", "192.168.122.32"), ++ ]), ++ "/PATH/TO/CONF", ++ ) ++ ) +diff --git a/pcs/lib/booth/test/test_status.py b/pcs/lib/booth/test/test_status.py +new file mode 100644 +index 0000000..0ea837a +--- /dev/null ++++ b/pcs/lib/booth/test/test_status.py +@@ -0,0 +1,137 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from unittest import TestCase ++ ++try: ++ # python 2 ++ #pylint: disable=unused-import ++ from urlparse import parse_qs as url_decode ++except ImportError: ++ # python 3 ++ from urllib.parse import parse_qs as url_decode ++ ++from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.assertions import assert_raise_library_error ++ ++from pcs import settings ++from pcs.common import report_codes ++from pcs.lib.errors import ReportItemSeverity as Severities ++from pcs.lib.external import CommandRunner ++import pcs.lib.booth.status as lib ++ ++ ++class GetDaemonStatusTest(TestCase): ++ def setUp(self): ++ self.mock_run = mock.MagicMock(spec_set=CommandRunner) ++ ++ def test_no_name(self): ++ self.mock_run.run.return_value = ("output", 0) ++ self.assertEqual("output", lib.get_daemon_status(self.mock_run)) ++ self.mock_run.run.assert_called_once_with( ++ [settings.booth_binary, "status"] ++ ) ++ ++ def test_with_name(self): ++ self.mock_run.run.return_value = ("output", 0) ++ self.assertEqual("output", lib.get_daemon_status(self.mock_run, "name")) ++ self.mock_run.run.assert_called_once_with( ++ [settings.booth_binary, "status", "-c", "name"] ++ ) ++ ++ def test_daemon_not_running(self): ++ self.mock_run.run.return_value = ("", 7) ++ self.assertEqual("", lib.get_daemon_status(self.mock_run)) ++ self.mock_run.run.assert_called_once_with( ++ [settings.booth_binary, "status"] ++ ) ++ ++ def test_failure(self): ++ self.mock_run.run.return_value = ("out", 1) ++ assert_raise_library_error( ++ lambda: lib.get_daemon_status(self.mock_run), ++ ( ++ Severities.ERROR, ++ report_codes.BOOTH_DAEMON_STATUS_ERROR, ++ {"reason": "out"} ++ ) ++ ) ++ self.mock_run.run.assert_called_once_with( ++ [settings.booth_binary, "status"] ++ ) ++ ++ ++class GetTicketsStatusTest(TestCase): ++ def setUp(self): ++ self.mock_run = mock.MagicMock(spec_set=CommandRunner) ++ ++ def test_no_name(self): ++ self.mock_run.run.return_value = ("output", 0) ++ self.assertEqual("output", lib.get_tickets_status(self.mock_run)) ++ self.mock_run.run.assert_called_once_with( ++ [settings.booth_binary, "list"] ++ ) ++ ++ def test_with_name(self): ++ self.mock_run.run.return_value = ("output", 0) ++ self.assertEqual( ++ "output", lib.get_tickets_status(self.mock_run, "name") ++ ) ++ self.mock_run.run.assert_called_once_with( ++ [settings.booth_binary, "list", "-c", "name"] ++ ) ++ ++ def test_failure(self): ++ self.mock_run.run.return_value = ("out", 1) ++ assert_raise_library_error( ++ lambda: lib.get_tickets_status(self.mock_run), ++ ( ++ Severities.ERROR, ++ report_codes.BOOTH_TICKET_STATUS_ERROR, ++ { ++ "reason": "out" ++ } ++ ) ++ ) ++ self.mock_run.run.assert_called_once_with( ++ [settings.booth_binary, "list"] ++ ) ++ ++ ++class GetPeersStatusTest(TestCase): ++ def setUp(self): ++ self.mock_run = mock.MagicMock(spec_set=CommandRunner) ++ ++ def test_no_name(self): ++ self.mock_run.run.return_value = ("output", 0) ++ self.assertEqual("output", lib.get_peers_status(self.mock_run)) ++ self.mock_run.run.assert_called_once_with( ++ [settings.booth_binary, "peers"] ++ ) ++ ++ def test_with_name(self): ++ self.mock_run.run.return_value = ("output", 0) ++ self.assertEqual("output", lib.get_peers_status(self.mock_run, "name")) ++ self.mock_run.run.assert_called_once_with( ++ [settings.booth_binary, "peers", "-c", "name"] ++ ) ++ ++ def test_failure(self): ++ self.mock_run.run.return_value = ("out", 1) ++ assert_raise_library_error( ++ lambda: lib.get_peers_status(self.mock_run), ++ ( ++ Severities.ERROR, ++ report_codes.BOOTH_PEERS_STATUS_ERROR, ++ { ++ "reason": "out" ++ } ++ ) ++ ) ++ self.mock_run.run.assert_called_once_with( ++ [settings.booth_binary, "peers"] ++ ) +diff --git a/pcs/lib/booth/test/test_sync.py b/pcs/lib/booth/test/test_sync.py +new file mode 100644 +index 0000000..58500cc +--- /dev/null ++++ b/pcs/lib/booth/test/test_sync.py +@@ -0,0 +1,1215 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from unittest import TestCase ++ ++import json ++import base64 ++try: ++ # python 2 ++ from urlparse import parse_qs as url_decode ++except ImportError: ++ # python 3 ++ from urllib.parse import parse_qs as url_decode ++ ++from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.assertions import ( ++ assert_report_item_list_equal, ++ assert_raise_library_error, ++) ++from pcs.test.tools.custom_mock import MockLibraryReportProcessor ++ ++from pcs.common import report_codes ++from pcs.lib.node import NodeAddresses, NodeAddressesList ++from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities ++from pcs.lib.external import NodeCommunicator, NodeConnectionException ++import pcs.lib.booth.sync as lib ++ ++ ++def to_b64(string): ++ return base64.b64encode(string.encode("utf-8")).decode("utf-8") ++ ++ ++class SetConfigOnNodeTest(TestCase): ++ def setUp(self): ++ self.mock_com = mock.MagicMock(spec_set=NodeCommunicator) ++ self.mock_rep = MockLibraryReportProcessor() ++ self.node = NodeAddresses("node") ++ ++ def test_with_authfile(self): ++ lib._set_config_on_node( ++ self.mock_com, ++ self.mock_rep, ++ self.node, ++ "cfg_name", ++ "cfg", ++ authfile="/abs/path/my-key.key", ++ authfile_data="test key".encode("utf-8") ++ ) ++ self.assertEqual(1, self.mock_com.call_node.call_count) ++ self.assertEqual(self.node, self.mock_com.call_node.call_args[0][0]) ++ self.assertEqual( ++ "remote/booth_set_config", self.mock_com.call_node.call_args[0][1] ++ ) ++ data = url_decode(self.mock_com.call_node.call_args[0][2]) ++ self.assertTrue("data_json" in data) ++ self.assertEqual( ++ { ++ "config": { ++ "name": "cfg_name.conf", ++ "data": "cfg" ++ }, ++ "authfile": { ++ "name": "my-key.key", ++ "data": to_b64("test key") ++ } ++ }, ++ json.loads(data["data_json"][0]) ++ ) ++ assert_report_item_list_equal( ++ self.mock_rep.report_item_list, ++ [( ++ Severities.INFO, ++ report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ { ++ "node": self.node.label, ++ "name": "cfg_name", ++ "name_list": ["cfg_name"] ++ } ++ )] ++ ) ++ ++ def _assert(self): ++ self.assertEqual(1, self.mock_com.call_node.call_count) ++ self.assertEqual(self.node, self.mock_com.call_node.call_args[0][0]) ++ self.assertEqual( ++ "remote/booth_set_config", self.mock_com.call_node.call_args[0][1] ++ ) ++ data = url_decode(self.mock_com.call_node.call_args[0][2]) ++ self.assertTrue("data_json" in data) ++ self.assertEqual( ++ { ++ "config": { ++ "name": "cfg_name.conf", ++ "data": "cfg" ++ } ++ }, ++ json.loads(data["data_json"][0]) ++ ) ++ assert_report_item_list_equal( ++ self.mock_rep.report_item_list, ++ [( ++ Severities.INFO, ++ report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ { ++ "node": self.node.label, ++ "name": "cfg_name", ++ "name_list": ["cfg_name"] ++ } ++ )] ++ ) ++ ++ def test_authfile_data_None(self): ++ lib._set_config_on_node( ++ self.mock_com, self.mock_rep, self.node, "cfg_name", "cfg", ++ authfile="key.key" ++ ) ++ self._assert() ++ ++ def test_authfile_only_data(self): ++ lib._set_config_on_node( ++ self.mock_com, self.mock_rep, self.node, "cfg_name", "cfg", ++ authfile_data="key".encode("utf-8") ++ ) ++ self._assert() ++ ++ def test_without_authfile(self): ++ lib._set_config_on_node( ++ self.mock_com, self.mock_rep, self.node, "cfg_name", "cfg" ++ ) ++ self._assert() ++ ++ ++@mock.patch("pcs.lib.booth.sync.parallel_nodes_communication_helper") ++class SyncConfigInCluster(TestCase): ++ def setUp(self): ++ self.mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) ++ self.mock_reporter = MockLibraryReportProcessor() ++ self.node_list = NodeAddressesList( ++ [NodeAddresses("node" + str(i) for i in range(5))] ++ ) ++ ++ def test_without_authfile(self, mock_parallel): ++ lib.send_config_to_all_nodes( ++ self.mock_communicator, ++ self.mock_reporter, ++ self.node_list, ++ "cfg_name", ++ "config data" ++ ) ++ mock_parallel.assert_called_once_with( ++ lib._set_config_on_node, ++ [ ++ ( ++ [ ++ self.mock_communicator, ++ self.mock_reporter, ++ node, ++ "cfg_name", ++ "config data", ++ None, ++ None ++ ], ++ {} ++ ) ++ for node in self.node_list ++ ], ++ self.mock_reporter, ++ False ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [( ++ Severities.INFO, ++ report_codes.BOOTH_DISTRIBUTING_CONFIG, ++ {"name": "cfg_name"} ++ )] ++ ) ++ ++ def test_skip_offline(self, mock_parallel): ++ lib.send_config_to_all_nodes( ++ self.mock_communicator, ++ self.mock_reporter, ++ self.node_list, ++ "cfg_name", ++ "config data", ++ skip_offline=True ++ ) ++ mock_parallel.assert_called_once_with( ++ lib._set_config_on_node, ++ [ ++ ( ++ [ ++ self.mock_communicator, ++ self.mock_reporter, ++ node, ++ "cfg_name", ++ "config data", ++ None, ++ None ++ ], ++ {} ++ ) ++ for node in self.node_list ++ ], ++ self.mock_reporter, ++ True ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [( ++ Severities.INFO, ++ report_codes.BOOTH_DISTRIBUTING_CONFIG, ++ {"name": "cfg_name"} ++ )] ++ ) ++ ++ def test_with_authfile(self, mock_parallel): ++ lib.send_config_to_all_nodes( ++ self.mock_communicator, ++ self.mock_reporter, ++ self.node_list, ++ "cfg_name", ++ "config data", ++ authfile="/my/auth/file.key", ++ authfile_data="authfile data".encode("utf-8") ++ ) ++ mock_parallel.assert_called_once_with( ++ lib._set_config_on_node, ++ [ ++ ( ++ [ ++ self.mock_communicator, ++ self.mock_reporter, ++ node, ++ "cfg_name", ++ "config data", ++ "/my/auth/file.key", ++ "authfile data".encode("utf-8") ++ ], ++ {} ++ ) ++ for node in self.node_list ++ ], ++ self.mock_reporter, ++ False ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [( ++ Severities.INFO, ++ report_codes.BOOTH_DISTRIBUTING_CONFIG, ++ {"name": "cfg_name"} ++ )] ++ ) ++ ++ ++@mock.patch("pcs.lib.booth.config_structure.get_authfile") ++@mock.patch("pcs.lib.booth.config_parser.parse") ++@mock.patch("pcs.lib.booth.config_files.read_configs") ++@mock.patch("pcs.lib.booth.config_files.read_authfile") ++class SendAllConfigToNodeTest(TestCase): ++ def setUp(self): ++ self.mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) ++ self.mock_reporter = MockLibraryReportProcessor() ++ self.node = NodeAddresses("node") ++ ++ @staticmethod ++ def mock_parse_fn(config_content): ++ if config_content not in ["config1", "config2"]: ++ raise AssertionError( ++ "unexpected input {0}".format(config_content) ++ ) ++ return config_content ++ ++ @staticmethod ++ def mock_authfile_fn(parsed_config): ++ _data = { ++ "config1": "/path/to/file1.key", ++ "config2": "/path/to/file2.key" ++ } ++ if parsed_config not in _data: ++ raise AssertionError( ++ "unexpected input {0}".format(parsed_config) ++ ) ++ return _data[parsed_config] ++ ++ @staticmethod ++ def mock_read_authfile_fn(_, authfile_path): ++ _data = { ++ "/path/to/file1.key": "some key".encode("utf-8"), ++ "/path/to/file2.key": "another key".encode("utf-8"), ++ } ++ if authfile_path not in _data: ++ raise AssertionError( ++ "unexpected input {0}".format(authfile_path) ++ ) ++ return _data[authfile_path] ++ ++ def test_success( ++ self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile ++ ): ++ mock_parse.side_effect = self.mock_parse_fn ++ mock_authfile.side_effect = self.mock_authfile_fn ++ mock_read_authfile.side_effect = self.mock_read_authfile_fn ++ mock_read_configs.return_value = { ++ "name1.conf": "config1", ++ "name2.conf": "config2" ++ } ++ self.mock_communicator.call_node.return_value = """ ++ { ++ "existing": [], ++ "failed": {}, ++ "saved": ["name1.conf", "file1.key", "name2.conf", "file2.key"] ++ } ++ """ ++ lib.send_all_config_to_node( ++ self.mock_communicator, self.mock_reporter, self.node ++ ) ++ self.assertEqual(2, mock_parse.call_count) ++ mock_parse.assert_has_calls([ ++ mock.call("config1"), mock.call("config2") ++ ]) ++ self.assertEqual(2, mock_authfile.call_count) ++ mock_authfile.assert_has_calls([ ++ mock.call("config1"), mock.call("config2") ++ ]) ++ self.assertEqual(2, mock_read_authfile.call_count) ++ mock_read_authfile.assert_has_calls([ ++ mock.call(self.mock_reporter, "/path/to/file1.key"), ++ mock.call(self.mock_reporter, "/path/to/file2.key") ++ ]) ++ mock_read_configs.assert_called_once_with(self.mock_reporter, False) ++ self.assertEqual(1, self.mock_communicator.call_node.call_count) ++ self.assertEqual( ++ self.node, self.mock_communicator.call_node.call_args[0][0] ++ ) ++ self.assertEqual( ++ "remote/booth_save_files", ++ self.mock_communicator.call_node.call_args[0][1] ++ ) ++ data = url_decode(self.mock_communicator.call_node.call_args[0][2]) ++ self.assertFalse("rewrite_existing" in data) ++ self.assertTrue("data_json" in data) ++ self.assertEqual( ++ [ ++ { ++ "name": "name1.conf", ++ "data": "config1", ++ "is_authfile": False ++ }, ++ { ++ "name": "file1.key", ++ "data": to_b64("some key"), ++ "is_authfile": True ++ }, ++ { ++ "name": "name2.conf", ++ "data": "config2", ++ "is_authfile": False ++ }, ++ { ++ "name": "file2.key", ++ "data": to_b64("another key"), ++ "is_authfile": True ++ } ++ ], ++ json.loads(data["data_json"][0]) ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ Severities.INFO, ++ report_codes.BOOTH_CONFIGS_SAVING_ON_NODE, ++ {"node": self.node.label} ++ ), ++ ( ++ Severities.INFO, ++ report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ { ++ "node": self.node.label, ++ "name": "name1.conf, file1.key, name2.conf, file2.key", ++ "name_list": [ ++ "name1.conf", "file1.key", "name2.conf", "file2.key" ++ ] ++ } ++ ) ++ ] ++ ) ++ ++ def test_do_not_rewrite_existing( ++ self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile ++ ): ++ mock_parse.side_effect = self.mock_parse_fn ++ mock_authfile.side_effect = self.mock_authfile_fn ++ mock_read_authfile.side_effect = self.mock_read_authfile_fn ++ mock_read_configs.return_value = { ++ "name1.conf": "config1", ++ "name2.conf": "config2" ++ } ++ self.mock_communicator.call_node.return_value = """ ++ { ++ "existing": ["name1.conf", "file1.key"], ++ "failed": {}, ++ "saved": ["name2.conf", "file2.key"] ++ } ++ """ ++ assert_raise_library_error( ++ lambda: lib.send_all_config_to_node( ++ self.mock_communicator, self.mock_reporter, self.node ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.FILE_ALREADY_EXISTS, ++ { ++ "file_role": None, ++ "file_path": "name1.conf", ++ "node": self.node.label ++ }, ++ report_codes.FORCE_FILE_OVERWRITE ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.FILE_ALREADY_EXISTS, ++ { ++ "file_role": None, ++ "file_path": "file1.key", ++ "node": self.node.label ++ }, ++ report_codes.FORCE_FILE_OVERWRITE ++ ) ++ ) ++ self.assertEqual(2, mock_parse.call_count) ++ mock_parse.assert_has_calls([ ++ mock.call("config1"), mock.call("config2") ++ ]) ++ self.assertEqual(2, mock_authfile.call_count) ++ mock_authfile.assert_has_calls([ ++ mock.call("config1"), mock.call("config2") ++ ]) ++ self.assertEqual(2, mock_read_authfile.call_count) ++ mock_read_authfile.assert_has_calls([ ++ mock.call(self.mock_reporter, "/path/to/file1.key"), ++ mock.call(self.mock_reporter, "/path/to/file2.key") ++ ]) ++ mock_read_configs.assert_called_once_with(self.mock_reporter, False) ++ self.assertEqual(1, self.mock_communicator.call_node.call_count) ++ self.assertEqual( ++ self.node, self.mock_communicator.call_node.call_args[0][0] ++ ) ++ self.assertEqual( ++ "remote/booth_save_files", ++ self.mock_communicator.call_node.call_args[0][1] ++ ) ++ data = url_decode(self.mock_communicator.call_node.call_args[0][2]) ++ self.assertFalse("rewrite_existing" in data) ++ self.assertTrue("data_json" in data) ++ self.assertEqual( ++ [ ++ { ++ "name": "name1.conf", ++ "data": "config1", ++ "is_authfile": False ++ }, ++ { ++ "name": "file1.key", ++ "data": to_b64("some key"), ++ "is_authfile": True ++ }, ++ { ++ "name": "name2.conf", ++ "data": "config2", ++ "is_authfile": False ++ }, ++ { ++ "name": "file2.key", ++ "data": to_b64("another key"), ++ "is_authfile": True ++ } ++ ], ++ json.loads(data["data_json"][0]) ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ Severities.INFO, ++ report_codes.BOOTH_CONFIGS_SAVING_ON_NODE, ++ {"node": self.node.label} ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.FILE_ALREADY_EXISTS, ++ { ++ "file_role": None, ++ "file_path": "name1.conf", ++ "node": self.node.label ++ }, ++ report_codes.FORCE_FILE_OVERWRITE ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.FILE_ALREADY_EXISTS, ++ { ++ "file_role": None, ++ "file_path": "file1.key", ++ "node": self.node.label ++ }, ++ report_codes.FORCE_FILE_OVERWRITE ++ ) ++ ] ++ ) ++ ++ def test_rewrite_existing( ++ self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile ++ ): ++ mock_parse.side_effect = self.mock_parse_fn ++ mock_authfile.side_effect = self.mock_authfile_fn ++ mock_read_authfile.side_effect = self.mock_read_authfile_fn ++ mock_read_configs.return_value = { ++ "name1.conf": "config1", ++ "name2.conf": "config2" ++ } ++ self.mock_communicator.call_node.return_value = """ ++ { ++ "existing": ["name1.conf", "file1.key"], ++ "failed": {}, ++ "saved": ["name2.conf", "file2.key"] ++ } ++ """ ++ lib.send_all_config_to_node( ++ self.mock_communicator, ++ self.mock_reporter, ++ self.node, ++ rewrite_existing=True ++ ) ++ mock_read_configs.assert_called_once_with(self.mock_reporter, False) ++ self.assertEqual(2, mock_parse.call_count) ++ mock_parse.assert_has_calls([ ++ mock.call("config1"), mock.call("config2") ++ ]) ++ self.assertEqual(2, mock_authfile.call_count) ++ mock_authfile.assert_has_calls([ ++ mock.call("config1"), mock.call("config2") ++ ]) ++ self.assertEqual(2, mock_read_authfile.call_count) ++ mock_read_authfile.assert_has_calls([ ++ mock.call(self.mock_reporter, "/path/to/file1.key"), ++ mock.call(self.mock_reporter, "/path/to/file2.key") ++ ]) ++ self.assertEqual(1, self.mock_communicator.call_node.call_count) ++ self.assertEqual( ++ self.node, self.mock_communicator.call_node.call_args[0][0] ++ ) ++ self.assertEqual( ++ "remote/booth_save_files", ++ self.mock_communicator.call_node.call_args[0][1] ++ ) ++ data = url_decode(self.mock_communicator.call_node.call_args[0][2]) ++ self.assertTrue("rewrite_existing" in data) ++ self.assertTrue("data_json" in data) ++ self.assertEqual( ++ [ ++ { ++ "name": "name1.conf", ++ "data": "config1", ++ "is_authfile": False ++ }, ++ { ++ "name": "file1.key", ++ "data": to_b64("some key"), ++ "is_authfile": True ++ }, ++ { ++ "name": "name2.conf", ++ "data": "config2", ++ "is_authfile": False ++ }, ++ { ++ "name": "file2.key", ++ "data": to_b64("another key"), ++ "is_authfile": True ++ } ++ ], ++ json.loads(data["data_json"][0]) ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ Severities.INFO, ++ report_codes.BOOTH_CONFIGS_SAVING_ON_NODE, ++ {"node": self.node.label} ++ ), ++ ( ++ Severities.WARNING, ++ report_codes.FILE_ALREADY_EXISTS, ++ { ++ "file_role": None, ++ "file_path": "name1.conf", ++ "node": self.node.label ++ } ++ ), ++ ( ++ Severities.WARNING, ++ report_codes.FILE_ALREADY_EXISTS, ++ { ++ "file_role": None, ++ "file_path": "file1.key", ++ "node": self.node.label ++ } ++ ), ++ ( ++ Severities.INFO, ++ report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ { ++ "node": self.node.label, ++ "name": "name2.conf, file2.key", ++ "name_list": ["name2.conf", "file2.key"] ++ } ++ ) ++ ] ++ ) ++ ++ def test_write_failure( ++ self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile ++ ): ++ mock_parse.side_effect = self.mock_parse_fn ++ mock_authfile.side_effect = self.mock_authfile_fn ++ mock_read_authfile.side_effect = self.mock_read_authfile_fn ++ mock_read_configs.return_value = { ++ "name1.conf": "config1", ++ "name2.conf": "config2" ++ } ++ self.mock_communicator.call_node.return_value = """ ++ { ++ "existing": [], ++ "failed": { ++ "name1.conf": "Error message", ++ "file1.key": "Another error message" ++ }, ++ "saved": ["name2.conf", "file2.key"] ++ } ++ """ ++ assert_raise_library_error( ++ lambda: lib.send_all_config_to_node( ++ self.mock_communicator, self.mock_reporter, self.node ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.BOOTH_CONFIG_WRITE_ERROR, ++ { ++ "node": self.node.label, ++ "name": "name1.conf", ++ "reason": "Error message" ++ } ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.BOOTH_CONFIG_WRITE_ERROR, ++ { ++ "node": self.node.label, ++ "name": "file1.key", ++ "reason": "Another error message" ++ } ++ ) ++ ) ++ self.assertEqual(2, mock_parse.call_count) ++ mock_parse.assert_has_calls([ ++ mock.call("config1"), mock.call("config2") ++ ]) ++ self.assertEqual(2, mock_authfile.call_count) ++ mock_authfile.assert_has_calls([ ++ mock.call("config1"), mock.call("config2") ++ ]) ++ self.assertEqual(2, mock_read_authfile.call_count) ++ mock_read_authfile.assert_has_calls([ ++ mock.call(self.mock_reporter, "/path/to/file1.key"), ++ mock.call(self.mock_reporter, "/path/to/file2.key") ++ ]) ++ mock_read_configs.assert_called_once_with(self.mock_reporter, False) ++ self.assertEqual(1, self.mock_communicator.call_node.call_count) ++ self.assertEqual( ++ self.node, self.mock_communicator.call_node.call_args[0][0] ++ ) ++ self.assertEqual( ++ "remote/booth_save_files", ++ self.mock_communicator.call_node.call_args[0][1] ++ ) ++ data = url_decode(self.mock_communicator.call_node.call_args[0][2]) ++ self.assertFalse("rewrite_existing" in data) ++ self.assertTrue("data_json" in data) ++ self.assertEqual( ++ [ ++ { ++ "name": "name1.conf", ++ "data": "config1", ++ "is_authfile": False ++ }, ++ { ++ "name": "file1.key", ++ "data": to_b64("some key"), ++ "is_authfile": True ++ }, ++ { ++ "name": "name2.conf", ++ "data": "config2", ++ "is_authfile": False ++ }, ++ { ++ "name": "file2.key", ++ "data": to_b64("another key"), ++ "is_authfile": True ++ } ++ ], ++ json.loads(data["data_json"][0]) ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ Severities.INFO, ++ report_codes.BOOTH_CONFIGS_SAVING_ON_NODE, ++ {"node": self.node.label} ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.BOOTH_CONFIG_WRITE_ERROR, ++ { ++ "node": self.node.label, ++ "name": "name1.conf", ++ "reason": "Error message" ++ } ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.BOOTH_CONFIG_WRITE_ERROR, ++ { ++ "node": self.node.label, ++ "name": "file1.key", ++ "reason": "Another error message" ++ } ++ ) ++ ] ++ ) ++ ++ def test_communication_failure( ++ self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile ++ ): ++ mock_parse.side_effect = self.mock_parse_fn ++ mock_authfile.side_effect = self.mock_authfile_fn ++ mock_read_authfile.side_effect = self.mock_read_authfile_fn ++ mock_read_configs.return_value = { ++ "name1.conf": "config1", ++ "name2.conf": "config2" ++ } ++ self.mock_communicator.call_node.side_effect = NodeConnectionException( ++ self.node.label, "command", "reason" ++ ) ++ assert_raise_library_error( ++ lambda: lib.send_all_config_to_node( ++ self.mock_communicator, self.mock_reporter, self.node ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, ++ { ++ "node": self.node.label, ++ "command": "command", ++ "reason": "reason" ++ } ++ ) ++ ) ++ self.assertEqual(2, mock_parse.call_count) ++ mock_parse.assert_has_calls([ ++ mock.call("config1"), mock.call("config2") ++ ]) ++ self.assertEqual(2, mock_authfile.call_count) ++ mock_authfile.assert_has_calls([ ++ mock.call("config1"), mock.call("config2") ++ ]) ++ self.assertEqual(2, mock_read_authfile.call_count) ++ mock_read_authfile.assert_has_calls([ ++ mock.call(self.mock_reporter, "/path/to/file1.key"), ++ mock.call(self.mock_reporter, "/path/to/file2.key") ++ ]) ++ mock_read_configs.assert_called_once_with(self.mock_reporter, False) ++ self.assertEqual(1, self.mock_communicator.call_node.call_count) ++ self.assertEqual( ++ self.node, self.mock_communicator.call_node.call_args[0][0] ++ ) ++ self.assertEqual( ++ "remote/booth_save_files", ++ self.mock_communicator.call_node.call_args[0][1] ++ ) ++ data = url_decode(self.mock_communicator.call_node.call_args[0][2]) ++ self.assertFalse("rewrite_existing" in data) ++ self.assertTrue("data_json" in data) ++ self.assertEqual( ++ [ ++ { ++ "name": "name1.conf", ++ "data": "config1", ++ "is_authfile": False ++ }, ++ { ++ "name": "file1.key", ++ "data": to_b64("some key"), ++ "is_authfile": True ++ }, ++ { ++ "name": "name2.conf", ++ "data": "config2", ++ "is_authfile": False ++ }, ++ { ++ "name": "file2.key", ++ "data": to_b64("another key"), ++ "is_authfile": True ++ } ++ ], ++ json.loads(data["data_json"][0]) ++ ) ++ ++ def test_wrong_response_format( ++ self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile ++ ): ++ mock_parse.side_effect = self.mock_parse_fn ++ mock_authfile.side_effect = self.mock_authfile_fn ++ mock_read_authfile.side_effect = self.mock_read_authfile_fn ++ mock_read_configs.return_value = { ++ "name1.conf": "config1", ++ "name2.conf": "config2" ++ } ++ self.mock_communicator.call_node.return_value = """ ++ { ++ "existing_files": [], ++ "failed": { ++ "name1.conf": "Error message", ++ "file1.key": "Another error message" ++ }, ++ "saved": ["name2.conf", "file2.key"] ++ } ++ """ ++ assert_raise_library_error( ++ lambda: lib.send_all_config_to_node( ++ self.mock_communicator, self.mock_reporter, self.node ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.INVALID_RESPONSE_FORMAT, ++ {"node": self.node.label} ++ ) ++ ) ++ self.assertEqual(2, mock_parse.call_count) ++ mock_parse.assert_has_calls([ ++ mock.call("config1"), mock.call("config2") ++ ]) ++ self.assertEqual(2, mock_authfile.call_count) ++ mock_authfile.assert_has_calls([ ++ mock.call("config1"), mock.call("config2") ++ ]) ++ self.assertEqual(2, mock_read_authfile.call_count) ++ mock_read_authfile.assert_has_calls([ ++ mock.call(self.mock_reporter, "/path/to/file1.key"), ++ mock.call(self.mock_reporter, "/path/to/file2.key") ++ ]) ++ mock_read_configs.assert_called_once_with(self.mock_reporter, False) ++ self.assertEqual(1, self.mock_communicator.call_node.call_count) ++ self.assertEqual( ++ self.node, self.mock_communicator.call_node.call_args[0][0] ++ ) ++ self.assertEqual( ++ "remote/booth_save_files", ++ self.mock_communicator.call_node.call_args[0][1] ++ ) ++ data = url_decode(self.mock_communicator.call_node.call_args[0][2]) ++ self.assertFalse("rewrite_existing" in data) ++ self.assertTrue("data_json" in data) ++ self.assertEqual( ++ [ ++ { ++ "name": "name1.conf", ++ "data": "config1", ++ "is_authfile": False ++ }, ++ { ++ "name": "file1.key", ++ "data": to_b64("some key"), ++ "is_authfile": True ++ }, ++ { ++ "name": "name2.conf", ++ "data": "config2", ++ "is_authfile": False ++ }, ++ { ++ "name": "file2.key", ++ "data": to_b64("another key"), ++ "is_authfile": True ++ } ++ ], ++ json.loads(data["data_json"][0]) ++ ) ++ ++ def test_response_not_json( ++ self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile ++ ): ++ mock_parse.side_effect = self.mock_parse_fn ++ mock_authfile.side_effect = self.mock_authfile_fn ++ mock_read_authfile.side_effect = self.mock_read_authfile_fn ++ mock_read_configs.return_value = { ++ "name1.conf": "config1", ++ "name2.conf": "config2" ++ } ++ self.mock_communicator.call_node.return_value = "not json" ++ assert_raise_library_error( ++ lambda: lib.send_all_config_to_node( ++ self.mock_communicator, self.mock_reporter, self.node ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.INVALID_RESPONSE_FORMAT, ++ {"node": self.node.label} ++ ) ++ ) ++ self.assertEqual(2, mock_parse.call_count) ++ mock_parse.assert_has_calls([ ++ mock.call("config1"), mock.call("config2") ++ ]) ++ self.assertEqual(2, mock_authfile.call_count) ++ mock_authfile.assert_has_calls([ ++ mock.call("config1"), mock.call("config2") ++ ]) ++ self.assertEqual(2, mock_read_authfile.call_count) ++ mock_read_authfile.assert_has_calls([ ++ mock.call(self.mock_reporter, "/path/to/file1.key"), ++ mock.call(self.mock_reporter, "/path/to/file2.key") ++ ]) ++ mock_read_configs.assert_called_once_with(self.mock_reporter, False) ++ self.assertEqual(1, self.mock_communicator.call_node.call_count) ++ self.assertEqual( ++ self.node, self.mock_communicator.call_node.call_args[0][0] ++ ) ++ self.assertEqual( ++ "remote/booth_save_files", ++ self.mock_communicator.call_node.call_args[0][1] ++ ) ++ data = url_decode(self.mock_communicator.call_node.call_args[0][2]) ++ self.assertFalse("rewrite_existing" in data) ++ self.assertTrue("data_json" in data) ++ self.assertEqual( ++ [ ++ { ++ "name": "name1.conf", ++ "data": "config1", ++ "is_authfile": False ++ }, ++ { ++ "name": "file1.key", ++ "data": to_b64("some key"), ++ "is_authfile": True ++ }, ++ { ++ "name": "name2.conf", ++ "data": "config2", ++ "is_authfile": False ++ }, ++ { ++ "name": "file2.key", ++ "data": to_b64("another key"), ++ "is_authfile": True ++ } ++ ], ++ json.loads(data["data_json"][0]) ++ ) ++ ++ ++ def test_configs_without_authfiles( ++ self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile ++ ): ++ def mock_authfile_fn(parsed_config): ++ if parsed_config == "config1": ++ return None ++ elif parsed_config == "config2": ++ return "/path/to/file2.key" ++ else: ++ raise AssertionError( ++ "unexpected input: {0}".format(parsed_config) ++ ) ++ ++ mock_parse.side_effect = self.mock_parse_fn ++ mock_authfile.side_effect = mock_authfile_fn ++ mock_read_authfile.return_value = "another key".encode("utf-8") ++ mock_read_configs.return_value = { ++ "name1.conf": "config1", ++ "name2.conf": "config2" ++ } ++ self.mock_communicator.call_node.return_value = """ ++ { ++ "existing": [], ++ "failed": {}, ++ "saved": ["name1.conf", "name2.conf", "file2.key"] ++ } ++ """ ++ lib.send_all_config_to_node( ++ self.mock_communicator, self.mock_reporter, self.node ++ ) ++ self.assertEqual(2, mock_parse.call_count) ++ mock_parse.assert_has_calls([ ++ mock.call("config1"), mock.call("config2") ++ ]) ++ self.assertEqual(2, mock_authfile.call_count) ++ mock_authfile.assert_has_calls([ ++ mock.call("config1"), mock.call("config2") ++ ]) ++ mock_read_authfile.assert_called_once_with( ++ self.mock_reporter, "/path/to/file2.key" ++ ) ++ mock_read_configs.assert_called_once_with(self.mock_reporter, False) ++ self.assertEqual(1, self.mock_communicator.call_node.call_count) ++ self.assertEqual( ++ self.node, self.mock_communicator.call_node.call_args[0][0] ++ ) ++ self.assertEqual( ++ "remote/booth_save_files", ++ self.mock_communicator.call_node.call_args[0][1] ++ ) ++ data = url_decode(self.mock_communicator.call_node.call_args[0][2]) ++ self.assertFalse("rewrite_existing" in data) ++ self.assertTrue("data_json" in data) ++ self.assertEqual( ++ [ ++ { ++ "name": "name1.conf", ++ "data": "config1", ++ "is_authfile": False ++ }, ++ { ++ "name": "name2.conf", ++ "data": "config2", ++ "is_authfile": False ++ }, ++ { ++ "name": "file2.key", ++ "data": to_b64("another key"), ++ "is_authfile": True ++ } ++ ], ++ json.loads(data["data_json"][0]) ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ Severities.INFO, ++ report_codes.BOOTH_CONFIGS_SAVING_ON_NODE, ++ {"node": self.node.label} ++ ), ++ ( ++ Severities.INFO, ++ report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ { ++ "node": self.node.label, ++ "name": "name1.conf, name2.conf, file2.key", ++ "name_list": ["name1.conf", "name2.conf", "file2.key"] ++ } ++ ) ++ ] ++ ) ++ ++ def test_unable_to_parse_config( ++ self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile ++ ): ++ def mock_parse_fn(config_data): ++ if config_data == "config1": ++ raise LibraryError() ++ elif config_data == "config2": ++ return "config2" ++ else: ++ raise AssertionError( ++ "unexpected input: {0}".format(config_data) ++ ) ++ ++ mock_parse.side_effect = mock_parse_fn ++ mock_authfile.return_value = "/path/to/file2.key" ++ mock_read_authfile.return_value = "another key".encode("utf-8") ++ mock_read_configs.return_value = { ++ "name1.conf": "config1", ++ "name2.conf": "config2" ++ } ++ self.mock_communicator.call_node.return_value = """ ++ { ++ "existing": [], ++ "failed": {}, ++ "saved": ["name2.conf", "file2.key"] ++ } ++ """ ++ lib.send_all_config_to_node( ++ self.mock_communicator, self.mock_reporter, self.node ++ ) ++ self.assertEqual(2, mock_parse.call_count) ++ mock_parse.assert_has_calls([ ++ mock.call("config1"), mock.call("config2") ++ ]) ++ mock_authfile.assert_called_once_with("config2") ++ mock_read_authfile.assert_called_once_with( ++ self.mock_reporter, "/path/to/file2.key" ++ ) ++ mock_read_configs.assert_called_once_with(self.mock_reporter, False) ++ self.assertEqual(1, self.mock_communicator.call_node.call_count) ++ self.assertEqual( ++ self.node, self.mock_communicator.call_node.call_args[0][0] ++ ) ++ self.assertEqual( ++ "remote/booth_save_files", ++ self.mock_communicator.call_node.call_args[0][1] ++ ) ++ data = url_decode(self.mock_communicator.call_node.call_args[0][2]) ++ self.assertFalse("rewrite_existing" in data) ++ self.assertTrue("data_json" in data) ++ self.assertEqual( ++ [ ++ { ++ "name": "name2.conf", ++ "data": "config2", ++ "is_authfile": False ++ }, ++ { ++ "name": "file2.key", ++ "data": to_b64("another key"), ++ "is_authfile": True ++ } ++ ], ++ json.loads(data["data_json"][0]) ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [ ++ ( ++ Severities.INFO, ++ report_codes.BOOTH_CONFIGS_SAVING_ON_NODE, ++ {"node": self.node.label} ++ ), ++ ( ++ Severities.WARNING, ++ report_codes.BOOTH_SKIPPING_CONFIG, ++ { ++ "config_file": "name1.conf" ++ } ++ ), ++ ( ++ Severities.INFO, ++ report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ { ++ "node": self.node.label, ++ "name": "name2.conf, file2.key", ++ "name_list": ["name2.conf", "file2.key"] ++ } ++ ) ++ ] ++ ) ++ ++ ++class PullConfigFromNodeTest(TestCase): ++ def setUp(self): ++ self.mock_communicator = mock.MagicMock(spec_set=NodeCommunicator) ++ self.node = NodeAddresses("node") ++ ++ def test_success(self): ++ self.mock_communicator.call_node.return_value = "{}" ++ self.assertEqual( ++ {}, lib.pull_config_from_node( ++ self.mock_communicator, self.node, "booth" ++ ) ++ ) ++ self.mock_communicator.call_node.assert_called_once_with( ++ self.node, "remote/booth_get_config", "name=booth" ++ ) ++ ++ def test_not_json(self): ++ self.mock_communicator.call_node.return_value = "not json" ++ assert_raise_library_error( ++ lambda: lib.pull_config_from_node( ++ self.mock_communicator, self.node, "booth" ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.INVALID_RESPONSE_FORMAT, ++ {"node": self.node.label} ++ ) ++ ) ++ ++ def test_communication_failure(self): ++ self.mock_communicator.call_node.side_effect = NodeConnectionException( ++ self.node.label, "command", "reason" ++ ) ++ assert_raise_library_error( ++ lambda: lib.pull_config_from_node( ++ self.mock_communicator, self.node, "booth" ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT, ++ { ++ "node": self.node.label, ++ "command": "command", ++ "reason": "reason" ++ } ++ ) ++ ) +diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py +index f86b63b..d8ce57a 100644 +--- a/pcs/lib/cib/tools.py ++++ b/pcs/lib/cib/tools.py +@@ -100,6 +100,13 @@ def get_constraints(tree): + """ + return _get_mandatory_section(tree, "configuration/constraints") + ++def get_resources(tree): ++ """ ++ Return 'resources' element from tree ++ tree cib etree node ++ """ ++ return _get_mandatory_section(tree, "configuration/resources") ++ + def find_parent(element, tag_names): + candidate = element + while True: +diff --git a/pcs/lib/commands/booth.py b/pcs/lib/commands/booth.py +new file mode 100644 +index 0000000..43ea9dd +--- /dev/null ++++ b/pcs/lib/commands/booth.py +@@ -0,0 +1,349 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import base64 ++import os.path ++from functools import partial ++ ++from pcs import settings ++from pcs.lib import external, reports ++from pcs.lib.booth import ( ++ config_exchange, ++ config_files, ++ config_structure, ++ reports as booth_reports, ++ resource, ++ status, ++ sync, ++) ++from pcs.lib.booth.config_parser import parse, build ++from pcs.lib.booth.env import get_config_file_name ++from pcs.lib.cib.tools import get_resources ++from pcs.lib.errors import LibraryError, ReportItemSeverity ++from pcs.lib.node import NodeAddresses ++ ++ ++def config_setup(env, booth_configuration, overwrite_existing=False): ++ """ ++ create boot configuration ++ list site_list contains site adresses of multisite ++ list arbitrator_list contains arbitrator adresses of multisite ++ """ ++ ++ config_structure.validate_peers( ++ booth_configuration.get("sites", []), ++ booth_configuration.get("arbitrators", []) ++ ) ++ config_content = config_exchange.from_exchange_format(booth_configuration) ++ ++ env.booth.create_key(config_files.generate_key(), overwrite_existing) ++ config_content = config_structure.set_authfile( ++ config_content, ++ env.booth.key_path ++ ) ++ env.booth.create_config(build(config_content), overwrite_existing) ++ ++def config_destroy(env, ignore_config_load_problems=False): ++ env.booth.command_expect_live_env() ++ env.command_expect_live_corosync_env() ++ ++ name = env.booth.name ++ config_is_used = partial(booth_reports.booth_config_is_used, name) ++ ++ report_list = [] ++ ++ if(env.is_node_in_cluster() and resource.find_for_config( ++ get_resources(env.get_cib()), ++ get_config_file_name(name), ++ )): ++ report_list.append(config_is_used("in cluster resource")) ++ ++ #Only systemd is currently supported. Initd does not supports multiple ++ #instances (here specified by name) ++ if external.is_systemctl(): ++ if external.is_service_running(env.cmd_runner(), "booth", name): ++ report_list.append(config_is_used("(running in systemd)")) ++ ++ if external.is_service_enabled(env.cmd_runner(), "booth", name): ++ report_list.append(config_is_used("(enabled in systemd)")) ++ ++ if report_list: ++ raise LibraryError(*report_list) ++ ++ authfile_path = None ++ try: ++ authfile_path = config_structure.get_authfile( ++ parse(env.booth.get_config_content()) ++ ) ++ except LibraryError: ++ if not ignore_config_load_problems: ++ raise LibraryError(booth_reports.booth_cannot_identify_keyfile()) ++ ++ #if content not received, not valid,... still remove config needed ++ env.report_processor.process( ++ booth_reports.booth_cannot_identify_keyfile( ++ severity=ReportItemSeverity.WARNING ++ ) ++ ) ++ ++ if( ++ authfile_path ++ and ++ os.path.dirname(authfile_path) == settings.booth_config_dir ++ ): ++ env.booth.set_key_path(authfile_path) ++ env.booth.remove_key() ++ env.booth.remove_config() ++ ++def config_show(env): ++ """ ++ return configuration as tuple of sites list and arbitrators list ++ """ ++ return config_exchange.to_exchange_format( ++ parse(env.booth.get_config_content()) ++ ) ++ ++def config_ticket_add(env, ticket_name): ++ """ ++ add ticket to booth configuration ++ """ ++ booth_configuration = config_structure.add_ticket( ++ parse(env.booth.get_config_content()), ++ ticket_name ++ ) ++ env.booth.push_config(build(booth_configuration)) ++ ++def config_ticket_remove(env, ticket_name): ++ """ ++ remove ticket from booth configuration ++ """ ++ booth_configuration = config_structure.remove_ticket( ++ parse(env.booth.get_config_content()), ++ ticket_name ++ ) ++ env.booth.push_config(build(booth_configuration)) ++ ++def create_in_cluster(env, name, ip, resource_create): ++ #TODO resource_create is provisional hack until resources are not moved to ++ #lib ++ resources_section = get_resources(env.get_cib()) ++ ++ booth_config_file_path = get_config_file_name(name) ++ if resource.find_for_config(resources_section, booth_config_file_path): ++ raise LibraryError(booth_reports.booth_already_in_cib(name)) ++ ++ resource.get_creator(resource_create)( ++ ip, ++ booth_config_file_path, ++ create_id = partial( ++ resource.create_resource_id, ++ resources_section, ++ name ++ ) ++ ) ++ ++def remove_from_cluster(env, name, resource_remove): ++ #TODO resource_remove is provisional hack until resources are not moved to ++ #lib ++ try: ++ num_of_removed_booth_resources = resource.get_remover(resource_remove)( ++ get_resources(env.get_cib()), ++ get_config_file_name(name), ++ ) ++ if num_of_removed_booth_resources > 1: ++ env.report_processor.process( ++ booth_reports.booth_multiple_times_in_cib( ++ name, ++ severity=ReportItemSeverity.WARNING, ++ ) ++ ) ++ except resource.BoothNotFoundInCib: ++ raise LibraryError(booth_reports.booth_not_exists_in_cib(name)) ++ except resource.BoothMultipleOccurenceFoundInCib: ++ raise LibraryError(booth_reports.booth_multiple_times_in_cib(name)) ++ ++def ticket_operation(operation, env, name, ticket, site_ip): ++ if not site_ip: ++ site_ip_list = resource.find_bound_ip( ++ get_resources(env.get_cib()), ++ get_config_file_name(name) ++ ) ++ if len(site_ip_list) != 1: ++ raise LibraryError( ++ booth_reports.booth_cannot_determine_local_site_ip() ++ ) ++ site_ip = site_ip_list[0] ++ ++ command_output, return_code = env.cmd_runner().run([ ++ settings.booth_binary, operation, ++ "-s", site_ip, ++ ticket ++ ]) ++ ++ if return_code != 0: ++ raise LibraryError( ++ booth_reports.booth_ticket_operation_failed( ++ operation, ++ command_output, ++ site_ip, ++ ticket ++ ) ++ ) ++ ++ticket_grant = partial(ticket_operation, "grant") ++ticket_revoke = partial(ticket_operation, "revoke") ++ ++def config_sync(env, name, skip_offline_nodes=False): ++ """ ++ Send specified local booth configuration to all nodes in cluster. ++ ++ env -- LibraryEnvironment ++ name -- booth instance name ++ skip_offline_nodes -- if True offline nodes will be skipped ++ """ ++ config = env.booth.get_config_content() ++ authfile_path = config_structure.get_authfile(parse(config)) ++ authfile_content = config_files.read_authfile( ++ env.report_processor, authfile_path ++ ) ++ ++ sync.send_config_to_all_nodes( ++ env.node_communicator(), ++ env.report_processor, ++ env.get_corosync_conf().get_nodes(), ++ name, ++ config, ++ authfile=authfile_path, ++ authfile_data=authfile_content, ++ skip_offline=skip_offline_nodes ++ ) ++ ++ ++def enable_booth(env, name=None): ++ """ ++ Enable specified instance of booth service. Currently it is supported only ++ systemd systems. ++ ++ env -- LibraryEnvironment ++ name -- string, name of booth instance ++ """ ++ external.ensure_is_systemd() ++ try: ++ external.enable_service(env.cmd_runner(), "booth", name) ++ except external.EnableServiceError as e: ++ raise LibraryError(reports.service_enable_error( ++ "booth", e.message, instance=name ++ )) ++ env.report_processor.process(reports.service_enable_success( ++ "booth", instance=name ++ )) ++ ++ ++def disable_booth(env, name=None): ++ """ ++ Disable specified instance of booth service. Currently it is supported only ++ systemd systems. ++ ++ env -- LibraryEnvironment ++ name -- string, name of booth instance ++ """ ++ external.ensure_is_systemd() ++ try: ++ external.disable_service(env.cmd_runner(), "booth", name) ++ except external.DisableServiceError as e: ++ raise LibraryError(reports.service_disable_error( ++ "booth", e.message, instance=name ++ )) ++ env.report_processor.process(reports.service_disable_success( ++ "booth", instance=name ++ )) ++ ++ ++def start_booth(env, name=None): ++ """ ++ Start specified instance of booth service. Currently it is supported only ++ systemd systems. On non systems it can be run like this: ++ BOOTH_CONF_FILE=<booth-file-path> /etc/initd/booth-arbitrator ++ ++ env -- LibraryEnvironment ++ name -- string, name of booth instance ++ """ ++ external.ensure_is_systemd() ++ try: ++ external.start_service(env.cmd_runner(), "booth", name) ++ except external.StartServiceError as e: ++ raise LibraryError(reports.service_start_error( ++ "booth", e.message, instance=name ++ )) ++ env.report_processor.process(reports.service_start_success( ++ "booth", instance=name ++ )) ++ ++ ++def stop_booth(env, name=None): ++ """ ++ Stop specified instance of booth service. Currently it is supported only ++ systemd systems. ++ ++ env -- LibraryEnvironment ++ name -- string, name of booth instance ++ """ ++ external.ensure_is_systemd() ++ try: ++ external.stop_service(env.cmd_runner(), "booth", name) ++ except external.StopServiceError as e: ++ raise LibraryError(reports.service_stop_error( ++ "booth", e.message, instance=name ++ )) ++ env.report_processor.process(reports.service_stop_success( ++ "booth", instance=name ++ )) ++ ++ ++def pull_config(env, node_name, name): ++ """ ++ Get config from specified node and save it on local system. It will ++ rewrite existing files. ++ ++ env -- LibraryEnvironment ++ node_name -- string, name of node from which config should be fetched ++ name -- string, name of booth instance of which config should be fetched ++ """ ++ env.report_processor.process( ++ booth_reports.booth_fetching_config_from_node(node_name, name) ++ ) ++ output = sync.pull_config_from_node( ++ env.node_communicator(), NodeAddresses(node_name), name ++ ) ++ try: ++ env.booth.create_config(output["config"]["data"], True) ++ if ( ++ output["authfile"]["name"] is not None and ++ output["authfile"]["data"] ++ ): ++ env.booth.set_key_path(os.path.join( ++ settings.booth_config_dir, output["authfile"]["name"] ++ )) ++ env.booth.create_key( ++ base64.b64decode( ++ output["authfile"]["data"].encode("utf-8") ++ ), ++ True ++ ) ++ env.report_processor.process( ++ booth_reports.booth_config_saved(name_list=[name]) ++ ) ++ except KeyError: ++ raise LibraryError(reports.invalid_response_format(node_name)) ++ ++ ++def get_status(env, name=None): ++ return { ++ "status": status.get_daemon_status(env.cmd_runner(), name), ++ "ticket": status.get_tickets_status(env.cmd_runner(), name), ++ "peers": status.get_peers_status(env.cmd_runner(), name), ++ } +diff --git a/pcs/lib/commands/test/test_booth.py b/pcs/lib/commands/test/test_booth.py +new file mode 100644 +index 0000000..20bf06a +--- /dev/null ++++ b/pcs/lib/commands/test/test_booth.py +@@ -0,0 +1,614 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import os ++import base64 ++ ++from unittest import TestCase ++ ++from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.custom_mock import MockLibraryReportProcessor ++from pcs.test.tools.assertions import ( ++ assert_raise_library_error, ++ assert_report_item_list_equal, ++) ++ ++from pcs import settings ++from pcs.common import report_codes ++from pcs.lib.booth import resource as booth_resource ++from pcs.lib.env import LibraryEnvironment ++from pcs.lib.node import NodeAddresses ++from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities ++from pcs.lib.commands import booth as commands ++from pcs.lib.external import ( ++ NodeCommunicator, ++ CommandRunner, ++ EnableServiceError, ++ DisableServiceError, ++ StartServiceError, ++ StopServiceError ++) ++ ++def patch_commands(target, *args, **kwargs): ++ return mock.patch( ++ "pcs.lib.commands.booth.{0}".format(target), *args, **kwargs ++ ) ++ ++@mock.patch("pcs.lib.booth.config_files.generate_key", return_value="key value") ++@mock.patch("pcs.lib.commands.booth.build", return_value="config content") ++@mock.patch("pcs.lib.booth.config_structure.validate_peers") ++class ConfigSetupTest(TestCase): ++ def test_successfuly_build_and_write_to_std_path( ++ self, mock_validate_peers, mock_build, mock_generate_key ++ ): ++ env = mock.MagicMock() ++ commands.config_setup( ++ env, ++ booth_configuration={ ++ "sites": ["1.1.1.1"], ++ "arbitrators": ["2.2.2.2"], ++ }, ++ ) ++ env.booth.create_config.assert_called_once_with( ++ "config content", ++ False ++ ) ++ env.booth.create_key.assert_called_once_with( ++ "key value", ++ False ++ ) ++ mock_validate_peers.assert_called_once_with( ++ ["1.1.1.1"], ["2.2.2.2"] ++ ) ++ ++ def test_sanitize_peers_before_validation( ++ self, mock_validate_peers, mock_build, mock_generate_key ++ ): ++ commands.config_setup(env=mock.MagicMock(), booth_configuration={}) ++ mock_validate_peers.assert_called_once_with([], []) ++ ++ ++class ConfigDestroyTest(TestCase): ++ @patch_commands("external.is_systemctl", mock.Mock(return_value=True)) ++ @patch_commands("external.is_service_enabled", mock.Mock(return_value=True)) ++ @patch_commands("external.is_service_running", mock.Mock(return_value=True)) ++ @patch_commands("resource.find_for_config", mock.Mock(return_value=[True])) ++ def test_raises_when_booth_config_in_use(self): ++ env = mock.MagicMock() ++ env.booth.name = "somename" ++ ++ assert_raise_library_error( ++ lambda: commands.config_destroy(env), ++ ( ++ Severities.ERROR, ++ report_codes.BOOTH_CONFIG_IS_USED, ++ { ++ "name": "somename", ++ "detail": "in cluster resource", ++ } ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.BOOTH_CONFIG_IS_USED, ++ { ++ "name": "somename", ++ "detail": "(enabled in systemd)", ++ } ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.BOOTH_CONFIG_IS_USED, ++ { ++ "name": "somename", ++ "detail": "(running in systemd)", ++ } ++ ) ++ ) ++ ++ @patch_commands("external.is_systemctl", mock.Mock(return_value=False)) ++ @patch_commands("resource.find_for_config", mock.Mock(return_value=[])) ++ @patch_commands("parse", mock.Mock(side_effect=LibraryError())) ++ def test_raises_when_cannot_get_content_of_config(self): ++ env = mock.MagicMock() ++ assert_raise_library_error( ++ lambda: commands.config_destroy(env), ++ ( ++ Severities.ERROR, ++ report_codes.BOOTH_CANNOT_IDENTIFY_KEYFILE, ++ {}, ++ report_codes.FORCE_BOOTH_DESTROY ++ ) ++ ) ++ ++ @patch_commands("external.is_systemctl", mock.Mock(return_value=False)) ++ @patch_commands("resource.find_for_config", mock.Mock(return_value=[])) ++ @patch_commands("parse", mock.Mock(side_effect=LibraryError())) ++ def test_remove_config_even_if_cannot_get_its_content_when_forced(self): ++ env = mock.MagicMock() ++ env.report_processor = MockLibraryReportProcessor() ++ commands.config_destroy(env, ignore_config_load_problems=True) ++ env.booth.remove_config.assert_called_once_with() ++ assert_report_item_list_equal(env.report_processor.report_item_list, [ ++ ( ++ Severities.WARNING, ++ report_codes.BOOTH_CANNOT_IDENTIFY_KEYFILE, ++ {} ++ ) ++ ]) ++ ++@mock.patch("pcs.lib.commands.booth.config_structure.get_authfile") ++@mock.patch("pcs.lib.commands.booth.parse") ++@mock.patch("pcs.lib.booth.config_files.read_authfile") ++@mock.patch("pcs.lib.booth.sync.send_config_to_all_nodes") ++class ConfigSyncTest(TestCase): ++ def setUp(self): ++ self.mock_env = mock.MagicMock() ++ self.mock_rep = MockLibraryReportProcessor() ++ self.mock_env.report_processor = self.mock_rep ++ self.mock_com = mock.MagicMock(spec_set=NodeCommunicator) ++ self.mock_env.node_communicator.return_value = self.mock_com ++ self.node_list = ["node1", "node2", "node3"] ++ corosync_conf = mock.MagicMock() ++ corosync_conf.get_nodes.return_value = self.node_list ++ self.mock_env.get_corosync_conf.return_value = corosync_conf ++ self.mock_env.booth.get_config_content.return_value = "config" ++ ++ def test_skip_offline( ++ self, mock_sync, mock_read_key, mock_parse, mock_get_authfile ++ ): ++ mock_get_authfile.return_value = "/key/path.key" ++ mock_read_key.return_value = "key" ++ commands.config_sync(self.mock_env, "name", True) ++ self.mock_env.booth.get_config_content.assert_called_once_with() ++ mock_read_key.assert_called_once_with(self.mock_rep, "/key/path.key") ++ mock_parse.assert_called_once_with("config") ++ mock_sync.assert_called_once_with( ++ self.mock_com, ++ self.mock_rep, ++ self.node_list, ++ "name", ++ "config", ++ authfile="/key/path.key", ++ authfile_data="key", ++ skip_offline=True ++ ) ++ ++ def test_do_not_skip_offline( ++ self, mock_sync, mock_read_key, mock_parse, mock_get_authfile ++ ): ++ mock_get_authfile.return_value = "/key/path.key" ++ mock_read_key.return_value = "key" ++ commands.config_sync(self.mock_env, "name") ++ self.mock_env.booth.get_config_content.assert_called_once_with() ++ mock_read_key.assert_called_once_with(self.mock_rep, "/key/path.key") ++ mock_parse.assert_called_once_with("config") ++ mock_sync.assert_called_once_with( ++ self.mock_com, ++ self.mock_rep, ++ self.node_list, ++ "name", ++ "config", ++ authfile="/key/path.key", ++ authfile_data="key", ++ skip_offline=False ++ ) ++ ++ ++@mock.patch("pcs.lib.commands.booth.external.ensure_is_systemd") ++@mock.patch("pcs.lib.external.enable_service") ++class EnableBoothTest(TestCase): ++ def setUp(self): ++ self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment) ++ self.mock_rep = MockLibraryReportProcessor() ++ self.mock_run = mock.MagicMock(spec_set=CommandRunner) ++ self.mock_env.cmd_runner.return_value = self.mock_run ++ self.mock_env.report_processor = self.mock_rep ++ ++ def test_success(self, mock_enable, mock_is_systemctl): ++ commands.enable_booth(self.mock_env, "name") ++ mock_enable.assert_called_once_with(self.mock_run, "booth", "name") ++ mock_is_systemctl.assert_called_once_with() ++ assert_report_item_list_equal( ++ self.mock_rep.report_item_list, ++ [( ++ Severities.INFO, ++ report_codes.SERVICE_ENABLE_SUCCESS, ++ { ++ "service": "booth", ++ "node": None, ++ "instance": "name", ++ } ++ )] ++ ) ++ ++ def test_failed(self, mock_enable, mock_is_systemctl): ++ mock_enable.side_effect = EnableServiceError("booth", "msg", "name") ++ assert_raise_library_error( ++ lambda: commands.enable_booth(self.mock_env, "name"), ++ ( ++ Severities.ERROR, ++ report_codes.SERVICE_ENABLE_ERROR, ++ { ++ "service": "booth", ++ "reason": "msg", ++ "node": None, ++ "instance": "name", ++ } ++ ) ++ ) ++ mock_enable.assert_called_once_with(self.mock_run, "booth", "name") ++ mock_is_systemctl.assert_called_once_with() ++ ++ ++@mock.patch("pcs.lib.commands.booth.external.ensure_is_systemd") ++@mock.patch("pcs.lib.external.disable_service") ++class DisableBoothTest(TestCase): ++ def setUp(self): ++ self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment) ++ self.mock_rep = MockLibraryReportProcessor() ++ self.mock_run = mock.MagicMock(spec_set=CommandRunner) ++ self.mock_env.cmd_runner.return_value = self.mock_run ++ self.mock_env.report_processor = self.mock_rep ++ ++ def test_success(self, mock_disable, mock_is_systemctl): ++ commands.disable_booth(self.mock_env, "name") ++ mock_disable.assert_called_once_with(self.mock_run, "booth", "name") ++ mock_is_systemctl.assert_called_once_with() ++ assert_report_item_list_equal( ++ self.mock_rep.report_item_list, ++ [( ++ Severities.INFO, ++ report_codes.SERVICE_DISABLE_SUCCESS, ++ { ++ "service": "booth", ++ "node": None, ++ "instance": "name", ++ } ++ )] ++ ) ++ ++ def test_failed(self, mock_disable, mock_is_systemctl): ++ mock_disable.side_effect = DisableServiceError("booth", "msg", "name") ++ assert_raise_library_error( ++ lambda: commands.disable_booth(self.mock_env, "name"), ++ ( ++ Severities.ERROR, ++ report_codes.SERVICE_DISABLE_ERROR, ++ { ++ "service": "booth", ++ "reason": "msg", ++ "node": None, ++ "instance": "name", ++ } ++ ) ++ ) ++ mock_disable.assert_called_once_with(self.mock_run, "booth", "name") ++ mock_is_systemctl.assert_called_once_with() ++ ++ ++@mock.patch("pcs.lib.commands.booth.external.ensure_is_systemd") ++@mock.patch("pcs.lib.external.start_service") ++class StartBoothTest(TestCase): ++ def setUp(self): ++ self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment) ++ self.mock_rep = MockLibraryReportProcessor() ++ self.mock_run = mock.MagicMock(spec_set=CommandRunner) ++ self.mock_env.cmd_runner.return_value = self.mock_run ++ self.mock_env.report_processor = self.mock_rep ++ ++ def test_success(self, mock_start, mock_is_systemctl): ++ commands.start_booth(self.mock_env, "name") ++ mock_start.assert_called_once_with(self.mock_run, "booth", "name") ++ mock_is_systemctl.assert_called_once_with() ++ assert_report_item_list_equal( ++ self.mock_rep.report_item_list, ++ [( ++ Severities.INFO, ++ report_codes.SERVICE_START_SUCCESS, ++ { ++ "service": "booth", ++ "node": None, ++ "instance": "name", ++ } ++ )] ++ ) ++ ++ def test_failed(self, mock_start, mock_is_systemctl): ++ mock_start.side_effect = StartServiceError("booth", "msg", "name") ++ assert_raise_library_error( ++ lambda: commands.start_booth(self.mock_env, "name"), ++ ( ++ Severities.ERROR, ++ report_codes.SERVICE_START_ERROR, ++ { ++ "service": "booth", ++ "reason": "msg", ++ "node": None, ++ "instance": "name", ++ } ++ ) ++ ) ++ mock_start.assert_called_once_with(self.mock_run, "booth", "name") ++ mock_is_systemctl.assert_called_once_with() ++ ++ ++@mock.patch("pcs.lib.commands.booth.external.ensure_is_systemd") ++@mock.patch("pcs.lib.external.stop_service") ++class StopBoothTest(TestCase): ++ def setUp(self): ++ self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment) ++ self.mock_rep = MockLibraryReportProcessor() ++ self.mock_run = mock.MagicMock(spec_set=CommandRunner) ++ self.mock_env.cmd_runner.return_value = self.mock_run ++ self.mock_env.report_processor = self.mock_rep ++ ++ def test_success(self, mock_stop, mock_is_systemctl): ++ commands.stop_booth(self.mock_env, "name") ++ mock_stop.assert_called_once_with(self.mock_run, "booth", "name") ++ mock_is_systemctl.assert_called_once_with() ++ assert_report_item_list_equal( ++ self.mock_rep.report_item_list, ++ [( ++ Severities.INFO, ++ report_codes.SERVICE_STOP_SUCCESS, ++ { ++ "service": "booth", ++ "node": None, ++ "instance": "name", ++ } ++ )] ++ ) ++ ++ def test_failed(self, mock_stop, mock_is_systemctl): ++ mock_stop.side_effect = StopServiceError("booth", "msg", "name") ++ assert_raise_library_error( ++ lambda: commands.stop_booth(self.mock_env, "name"), ++ ( ++ Severities.ERROR, ++ report_codes.SERVICE_STOP_ERROR, ++ { ++ "service": "booth", ++ "reason": "msg", ++ "node": None, ++ "instance": "name", ++ } ++ ) ++ ) ++ mock_stop.assert_called_once_with(self.mock_run, "booth", "name") ++ mock_is_systemctl.assert_called_once_with() ++ ++ ++@mock.patch("pcs.lib.booth.sync.pull_config_from_node") ++class PullConfigTest(TestCase): ++ def setUp(self): ++ self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment) ++ self.mock_rep = MockLibraryReportProcessor() ++ self.mock_com = mock.MagicMock(spec_set=NodeCommunicator) ++ self.mock_env.node_communicator.return_value = self.mock_com ++ self.mock_env.report_processor = self.mock_rep ++ ++ def test_with_authfile(self, mock_pull): ++ mock_pull.return_value = { ++ "config": { ++ "name": "name.conf", ++ "data": "config" ++ }, ++ "authfile": { ++ "name": "name.key", ++ "data": base64.b64encode("key".encode("utf-8")).decode("utf-8") ++ } ++ } ++ commands.pull_config(self.mock_env, "node", "name") ++ mock_pull.assert_called_once_with( ++ self.mock_com, NodeAddresses("node"), "name" ++ ) ++ self.mock_env.booth.create_config.called_once_with("config", True) ++ self.mock_env.booth.set_key_path.called_once_with(os.path.join( ++ settings.booth_config_dir, "name.key" ++ )) ++ self.mock_env.booth.create_key.called_once_with( ++ "key".encode("utf-8"), True ++ ) ++ assert_report_item_list_equal( ++ self.mock_rep.report_item_list, ++ [ ++ ( ++ Severities.INFO, ++ report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE, ++ { ++ "node": "node", ++ "config": "name" ++ } ++ ), ++ ( ++ Severities.INFO, ++ report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ { ++ "node": None, ++ "name": "name", ++ "name_list": ["name"] ++ } ++ ) ++ ] ++ ) ++ ++ def test_without_authfile(self, mock_pull): ++ mock_pull.return_value = { ++ "config": { ++ "name": "name.conf", ++ "data": "config" ++ }, ++ "authfile": { ++ "name": None, ++ "data": None ++ } ++ } ++ commands.pull_config(self.mock_env, "node", "name") ++ mock_pull.assert_called_once_with( ++ self.mock_com, NodeAddresses("node"), "name" ++ ) ++ self.mock_env.booth.create_config.called_once_with("config", True) ++ self.assertEqual(0, self.mock_env.booth.set_key_path.call_count) ++ self.assertEqual(0, self.mock_env.booth.create_key.call_count) ++ assert_report_item_list_equal( ++ self.mock_rep.report_item_list, ++ [ ++ ( ++ Severities.INFO, ++ report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE, ++ { ++ "node": "node", ++ "config": "name" ++ } ++ ), ++ ( ++ Severities.INFO, ++ report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ { ++ "node": None, ++ "name": "name", ++ "name_list": ["name"] ++ } ++ ) ++ ] ++ ) ++ ++ def test_invalid_input(self, mock_pull): ++ mock_pull.return_value = {} ++ assert_raise_library_error( ++ lambda: commands.pull_config(self.mock_env, "node", "name"), ++ ( ++ Severities.ERROR, ++ report_codes.INVALID_RESPONSE_FORMAT, ++ {"node": "node"} ++ ) ++ ) ++ mock_pull.assert_called_once_with( ++ self.mock_com, NodeAddresses("node"), "name" ++ ) ++ self.assertEqual(0, self.mock_env.booth.create_config.call_count) ++ self.assertEqual(0, self.mock_env.booth.set_key_path.call_count) ++ self.assertEqual(0, self.mock_env.booth.create_key.call_count) ++ assert_report_item_list_equal( ++ self.mock_rep.report_item_list, ++ [( ++ Severities.INFO, ++ report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE, ++ { ++ "node": "node", ++ "config": "name" ++ } ++ )] ++ ) ++ ++class TicketOperationTest(TestCase): ++ @mock.patch("pcs.lib.booth.resource.find_bound_ip") ++ def test_raises_when_implicit_site_not_found_in_cib( ++ self, mock_find_bound_ip ++ ): ++ mock_find_bound_ip.return_value = [] ++ assert_raise_library_error( ++ lambda: commands.ticket_operation( ++ "grant", mock.Mock(), "booth", "ABC", site_ip=None ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP, ++ {} ++ ), ++ ) ++ ++ def test_raises_when_command_fail(self): ++ mock_run = mock.Mock(return_value=("some message", 1)) ++ mock_env = mock.MagicMock( ++ cmd_runner=mock.Mock(return_value=mock.MagicMock(run=mock_run)) ++ ) ++ assert_raise_library_error( ++ lambda: commands.ticket_operation( ++ "grant", mock_env, "booth", "ABC", site_ip="1.2.3.4" ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.BOOTH_TICKET_OPERATION_FAILED, ++ { ++ "operation": "grant", ++ "reason": "some message", ++ "site_ip": "1.2.3.4", ++ "ticket_name": "ABC", ++ } ++ ), ++ ) ++ ++class CreateInClusterTest(TestCase): ++ @patch_commands("get_resources", mock.MagicMock()) ++ def test_raises_when_is_created_already(self): ++ assert_raise_library_error( ++ lambda: commands.create_in_cluster( ++ mock.MagicMock(), "somename", ip="1.2.3.4", resource_create=None ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.BOOTH_ALREADY_IN_CIB, ++ { ++ "name": "somename", ++ } ++ ), ++ ) ++ ++class RemoveFromClusterTest(TestCase): ++ @patch_commands("resource.get_remover", mock.Mock(return_value = mock.Mock( ++ side_effect=booth_resource.BoothNotFoundInCib() ++ ))) ++ def test_raises_when_no_booth_resource_found(self): ++ assert_raise_library_error( ++ lambda: commands.remove_from_cluster( ++ mock.MagicMock(), "somename", resource_remove=None ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.BOOTH_NOT_EXISTS_IN_CIB, ++ { ++ 'name': 'somename', ++ } ++ ), ++ ) ++ ++ @patch_commands("resource.get_remover", mock.Mock(return_value = mock.Mock( ++ side_effect=booth_resource.BoothMultipleOccurenceFoundInCib() ++ ))) ++ def test_raises_when_multiple_booth_resource_found(self): ++ assert_raise_library_error( ++ lambda: commands.remove_from_cluster( ++ mock.MagicMock(), "somename", resource_remove=None ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.BOOTH_MULTIPLE_TIMES_IN_CIB, ++ { ++ 'name': 'somename', ++ }, ++ report_codes.FORCE_BOOTH_REMOVE_FROM_CIB, ++ ), ++ ) ++ ++ @patch_commands("resource.get_remover", mock.Mock(return_value = mock.Mock( ++ return_value=2 ++ ))) ++ def test_warn_when_multiple_booth_resources_removed(self): ++ report_processor=MockLibraryReportProcessor() ++ commands.remove_from_cluster( ++ mock.MagicMock(report_processor=report_processor), ++ "somename", ++ resource_remove=None ++ ) ++ assert_report_item_list_equal(report_processor.report_item_list, [( ++ Severities.WARNING, ++ report_codes.BOOTH_MULTIPLE_TIMES_IN_CIB, ++ { ++ 'name': 'somename', ++ }, ++ )]) +diff --git a/pcs/lib/commands/test/test_ticket.py b/pcs/lib/commands/test/test_ticket.py +index 751001b..d8b8a5f 100644 +--- a/pcs/lib/commands/test/test_ticket.py ++++ b/pcs/lib/commands/test/test_ticket.py +@@ -5,27 +5,22 @@ from __future__ import ( + unicode_literals, + ) + +-import logging + from unittest import TestCase + + from pcs.common import report_codes + from pcs.lib.commands.constraint import ticket as ticket_command +-from pcs.lib.env import LibraryEnvironment as Env + from pcs.lib.errors import ReportItemSeverity as severities ++from pcs.lib.test.misc import get_mocked_env + from pcs.test.tools.assertions import ( + assert_xml_equal, + assert_raise_library_error + ) +-from pcs.test.tools.custom_mock import MockLibraryReportProcessor + from pcs.test.tools.misc import get_test_resource as rc +-from pcs.test.tools.pcs_mock import mock + from pcs.test.tools.xml import get_xml_manipulation_creator_from_file + + + class CreateTest(TestCase): + def setUp(self): +- self.mock_logger = mock.MagicMock(logging.Logger) +- self.mock_reporter = MockLibraryReportProcessor() + self.create_cib = get_xml_manipulation_creator_from_file( + rc("cib-empty.xml") + ) +@@ -37,7 +32,7 @@ class CreateTest(TestCase): + .append_to_first_tag_name('resources', resource_xml) + ) + +- env = Env(self.mock_logger, self.mock_reporter, cib_data=str(cib)) ++ env = get_mocked_env(cib_data=str(cib)) + ticket_command.create(env, "ticketA", "resourceA", { + "loss-policy": "fence", + "rsc-role": "master" +@@ -59,11 +54,7 @@ class CreateTest(TestCase): + ) + + def test_refuse_for_nonexisting_resource(self): +- env = Env( +- self.mock_logger, +- self.mock_reporter, +- cib_data=str(self.create_cib()) +- ) ++ env = get_mocked_env(cib_data=str(self.create_cib())) + assert_raise_library_error( + lambda: ticket_command.create( + env, "ticketA", "resourceA", "master", {"loss-policy": "fence"} +diff --git a/pcs/lib/corosync/live.py b/pcs/lib/corosync/live.py +index b49b9f6..1e68c31 100644 +--- a/pcs/lib/corosync/live.py ++++ b/pcs/lib/corosync/live.py +@@ -22,6 +22,9 @@ def get_local_corosync_conf(): + except IOError as e: + raise LibraryError(reports.corosync_config_read_error(path, e.strerror)) + ++def exists_local_corosync_conf(): ++ return os.path.exists(settings.corosync_conf_file) ++ + def set_remote_corosync_conf(node_communicator, node_addr, config_text): + """ + Send corosync.conf to a node +diff --git a/pcs/lib/env.py b/pcs/lib/env.py +index 24e4252..b139c58 100644 +--- a/pcs/lib/env.py ++++ b/pcs/lib/env.py +@@ -5,20 +5,27 @@ from __future__ import ( + unicode_literals, + ) + ++import os.path ++ + from lxml import etree + ++from pcs import settings + from pcs.lib import reports ++from pcs.lib.booth.env import BoothEnv ++from pcs.lib.cib.tools import ensure_cib_version ++from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade ++from pcs.lib.corosync.live import ( ++ exists_local_corosync_conf, ++ get_local_corosync_conf, ++ reload_config as reload_corosync_config, ++) + from pcs.lib.external import ( + is_cman_cluster, + is_service_running, + CommandRunner, + NodeCommunicator, + ) +-from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade +-from pcs.lib.corosync.live import ( +- get_local_corosync_conf, +- reload_config as reload_corosync_config, +-) ++from pcs.lib.errors import LibraryError + from pcs.lib.nodes_task import ( + distribute_corosync_conf, + check_corosync_offline_on_nodes, +@@ -29,7 +36,6 @@ from pcs.lib.pacemaker import ( + get_cib_xml, + replace_cib_configuration_xml, + ) +-from pcs.lib.cib.tools import ensure_cib_version + + + class LibraryEnvironment(object): +@@ -43,6 +49,7 @@ class LibraryEnvironment(object): + user_groups=None, + cib_data=None, + corosync_conf_data=None, ++ booth=None, + auth_tokens_getter=None, + ): + self._logger = logger +@@ -51,6 +58,9 @@ class LibraryEnvironment(object): + self._user_groups = [] if user_groups is None else user_groups + self._cib_data = cib_data + self._corosync_conf_data = corosync_conf_data ++ self._booth = ( ++ BoothEnv(report_processor, booth) if booth is not None else None ++ ) + self._is_cman_cluster = None + # TODO tokens probably should not be inserted from outside, but we're + # postponing dealing with them, because it's not that easy to move +@@ -169,6 +179,24 @@ class LibraryEnvironment(object): + else: + self._corosync_conf_data = corosync_conf_data + ++ def is_node_in_cluster(self): ++ if self.is_cman_cluster: ++ #TODO --cluster_conf is not propagated here. So no live check not ++ #needed here. But this should not be permanently ++ return os.path.exists(settings.corosync_conf_file) ++ ++ if not self.is_corosync_conf_live: ++ raise AssertionError( ++ "Cannot check if node is in cluster with mocked corosync_conf." ++ ) ++ return exists_local_corosync_conf() ++ ++ def command_expect_live_corosync_env(self): ++ if not self.is_corosync_conf_live: ++ raise LibraryError(reports.live_environment_required([ ++ "--corosync_conf" ++ ])) ++ + @property + def is_corosync_conf_live(self): + return self._corosync_conf_data is None +@@ -195,3 +223,7 @@ class LibraryEnvironment(object): + else: + self._auth_tokens = {} + return self._auth_tokens ++ ++ @property ++ def booth(self): ++ return self._booth +diff --git a/pcs/lib/env_file.py b/pcs/lib/env_file.py +new file mode 100644 +index 0000000..e683a57 +--- /dev/null ++++ b/pcs/lib/env_file.py +@@ -0,0 +1,122 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import os.path ++ ++from pcs.common import report_codes ++from pcs.common.tools import format_environment_error ++from pcs.lib import reports ++from pcs.lib.errors import ReportItemSeverity, LibraryError, LibraryEnvError ++ ++ ++class GhostFile(object): ++ is_live = False ++ def __init__(self, file_role, content=None): ++ self.__file_role = file_role ++ self.__content = content ++ self.__no_existing_file_expected = False ++ self.__can_overwrite_existing_file = False ++ self.__is_binary = False ++ ++ def read(self): ++ if self.__content is None: ++ raise LibraryEnvError( ++ reports.file_does_not_exist(self.__file_role) ++ ) ++ ++ return self.__content ++ ++ def remove(self, silence_no_existence): ++ raise AssertionError("Remove GhostFile is not supported.") ++ ++ def write(self, content, file_operation=None, is_binary=False): ++ """ ++ callable file_operation is there only for RealFile compatible interface ++ it has no efect ++ """ ++ self.__is_binary = is_binary ++ self.__content = content ++ ++ def assert_no_conflict_with_existing( ++ self, report_processor, can_overwrite_existing=False ++ ): ++ self.__no_existing_file_expected = True ++ self.__can_overwrite_existing_file = can_overwrite_existing ++ ++ def export(self): ++ return { ++ "content": self.__content, ++ "no_existing_file_expected": self.__no_existing_file_expected, ++ "can_overwrite_existing_file": self.__can_overwrite_existing_file, ++ "is_binary": self.__is_binary, ++ } ++ ++ ++class RealFile(object): ++ is_live = True ++ def __init__( ++ self, file_role, file_path, ++ overwrite_code=report_codes.FORCE_FILE_OVERWRITE ++ ): ++ self.__file_role = file_role ++ self.__file_path = file_path ++ self.__overwrite_code = overwrite_code ++ ++ def assert_no_conflict_with_existing( ++ self, report_processor, can_overwrite_existing=False ++ ): ++ if os.path.exists(self.__file_path): ++ report_processor.process(reports.file_already_exists( ++ self.__file_role, ++ self.__file_path, ++ ReportItemSeverity.WARNING if can_overwrite_existing ++ else ReportItemSeverity.ERROR, ++ forceable=None if can_overwrite_existing ++ else self.__overwrite_code, ++ )) ++ ++ def write(self, content, file_operation=None, is_binary=False): ++ """ ++ callable file_operation takes path and proces operation on it e.g. chmod ++ """ ++ mode = "wb" if is_binary else "w" ++ try: ++ with open(self.__file_path, mode) as config_file: ++ config_file.write(content) ++ if file_operation: ++ file_operation(self.__file_path) ++ except EnvironmentError as e: ++ raise self.__report_io_error(e, "write") ++ ++ def read(self): ++ try: ++ with open(self.__file_path, "r") as file: ++ return file.read() ++ except EnvironmentError as e: ++ raise self.__report_io_error(e, "read") ++ ++ def remove(self, silence_no_existence=False): ++ if os.path.exists(self.__file_path): ++ try: ++ os.remove(self.__file_path) ++ except EnvironmentError as e: ++ raise self.__report_io_error(e, "remove") ++ elif not silence_no_existence: ++ raise LibraryError(reports.file_io_error( ++ self.__file_role, ++ file_path=self.__file_path, ++ operation="remove", ++ reason="File does not exist" ++ )) ++ ++ def __report_io_error(self, e, operation): ++ return LibraryError(reports.file_io_error( ++ self.__file_role, ++ file_path=self.__file_path, ++ operation=operation, ++ reason=format_environment_error(e) ++ )) +diff --git a/pcs/lib/errors.py b/pcs/lib/errors.py +index 9cab5e9..0a8f4fa 100644 +--- a/pcs/lib/errors.py ++++ b/pcs/lib/errors.py +@@ -8,6 +8,20 @@ from __future__ import ( + class LibraryError(Exception): + pass + ++class LibraryEnvError(LibraryError): ++ def __init__(self, *args, **kwargs): ++ super(LibraryEnvError, self).__init__(*args, **kwargs) ++ self.processed = [] ++ ++ def sign_processed(self, report): ++ self.processed.append(report) ++ ++ @property ++ def unprocessed(self): ++ return [report for report in self.args if report not in self.processed] ++ ++ ++ + class ReportItemSeverity(object): + ERROR = 'ERROR' + WARNING = 'WARNING' +diff --git a/pcs/lib/external.py b/pcs/lib/external.py +index c773e5a..25e071f 100644 +--- a/pcs/lib/external.py ++++ b/pcs/lib/external.py +@@ -59,9 +59,10 @@ from pcs import settings + + class ManageServiceError(Exception): + #pylint: disable=super-init-not-called +- def __init__(self, service, message=None): ++ def __init__(self, service, message=None, instance=None): + self.service = service + self.message = message ++ self.instance = instance + + class DisableServiceError(ManageServiceError): + pass +@@ -91,6 +92,22 @@ def is_dir_nonempty(path): + return len(os.listdir(path)) > 0 + + ++def _get_service_name(service, instance=None): ++ return "{0}{1}.service".format( ++ service, "" if instance is None else "@{0}".format(instance) ++ ) ++ ++def ensure_is_systemd(): ++ """ ++ Ensure if current system is systemd system. Raises Library error if not. ++ """ ++ if not is_systemctl(): ++ raise LibraryError( ++ reports.unsupported_operation_on_non_systemd_systems() ++ ) ++ ++ ++ + @simple_cache + def is_systemctl(): + """ +@@ -108,74 +125,82 @@ def is_systemctl(): + return False + + +-def disable_service(runner, service): ++def disable_service(runner, service, instance=None): + """ + Disable specified service in local system. + Raise DisableServiceError or LibraryError on failure. + + runner -- CommandRunner + service -- name of service ++ instance -- instance name, it ha no effect on not systemd systems. ++ If None no instance name will be used. + """ + if is_systemctl(): + output, retval = runner.run([ +- "systemctl", "disable", service + ".service" ++ "systemctl", "disable", _get_service_name(service, instance) + ]) + else: + if not is_service_installed(runner, service): + return + output, retval = runner.run(["chkconfig", service, "off"]) + if retval != 0: +- raise DisableServiceError(service, output.rstrip()) ++ raise DisableServiceError(service, output.rstrip(), instance) + + +-def enable_service(runner, service): ++def enable_service(runner, service, instance=None): + """ + Enable specified service in local system. + Raise EnableServiceError or LibraryError on failure. + + runner -- CommandRunner + service -- name of service ++ instance -- instance name, it ha no effect on not systemd systems. ++ If None no instance name will be used. + """ + if is_systemctl(): + output, retval = runner.run([ +- "systemctl", "enable", service + ".service" ++ "systemctl", "enable", _get_service_name(service, instance) + ]) + else: + output, retval = runner.run(["chkconfig", service, "on"]) + if retval != 0: +- raise EnableServiceError(service, output.rstrip()) ++ raise EnableServiceError(service, output.rstrip(), instance) + + +-def start_service(runner, service): ++def start_service(runner, service, instance=None): + """ + Start specified service in local system + CommandRunner runner + string service service name ++ string instance instance name, it ha no effect on not systemd systems. ++ If None no instance name will be used. + """ + if is_systemctl(): + output, retval = runner.run([ +- "systemctl", "start", "{0}.service".format(service) ++ "systemctl", "start", _get_service_name(service, instance) + ]) + else: + output, retval = runner.run(["service", service, "start"]) + if retval != 0: +- raise StartServiceError(service, output.rstrip()) ++ raise StartServiceError(service, output.rstrip(), instance) + + +-def stop_service(runner, service): ++def stop_service(runner, service, instance=None): + """ + Stop specified service in local system + CommandRunner runner + string service service name ++ string instance instance name, it ha no effect on not systemd systems. ++ If None no instance name will be used. + """ + if is_systemctl(): + output, retval = runner.run([ +- "systemctl", "stop", "{0}.service".format(service) ++ "systemctl", "stop", _get_service_name(service, instance) + ]) + else: + output, retval = runner.run(["service", service, "stop"]) + if retval != 0: +- raise StopServiceError(service, output.rstrip()) ++ raise StopServiceError(service, output.rstrip(), instance) + + + def kill_services(runner, services): +@@ -196,7 +221,7 @@ def kill_services(runner, services): + raise KillServicesError(list(services), output.rstrip()) + + +-def is_service_enabled(runner, service): ++def is_service_enabled(runner, service, instance=None): + """ + Check if specified service is enabled in local system. + +@@ -205,7 +230,7 @@ def is_service_enabled(runner, service): + """ + if is_systemctl(): + _, retval = runner.run( +- ["systemctl", "is-enabled", service + ".service"] ++ ["systemctl", "is-enabled", _get_service_name(service, instance)] + ) + else: + _, retval = runner.run(["chkconfig", service]) +@@ -213,7 +238,7 @@ def is_service_enabled(runner, service): + return retval == 0 + + +-def is_service_running(runner, service): ++def is_service_running(runner, service, instance=None): + """ + Check if specified service is currently running on local system. + +@@ -221,7 +246,11 @@ def is_service_running(runner, service): + service -- name of service + """ + if is_systemctl(): +- _, retval = runner.run(["systemctl", "is-active", service + ".service"]) ++ _, retval = runner.run([ ++ "systemctl", ++ "is-active", ++ _get_service_name(service, instance) ++ ]) + else: + _, retval = runner.run(["service", service, "status"]) + +@@ -314,6 +343,9 @@ class CommandRunner(object): + self, args, ignore_stderr=False, stdin_string=None, env_extend=None, + binary_output=False + ): ++ #Reset environment variables by empty dict is desired here. We need to ++ #get rid of defaults - we do not know the context and environment of the ++ #library. So executable must be specified with full path. + env_vars = dict(env_extend) if env_extend else dict() + env_vars.update(self._env_vars) + +diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py +index fc2670b..eac95c7 100644 +--- a/pcs/lib/reports.py ++++ b/pcs/lib/reports.py +@@ -1153,27 +1153,37 @@ def cman_broadcast_all_rings(): + + "broadcast in only one ring" + ) + +-def service_start_started(service): ++def service_start_started(service, instance=None): + """ + system service is being started + string service service name or description ++ string instance instance of service + """ ++ if instance: ++ msg = "Starting {service}@{instance}..." ++ else: ++ msg = "Starting {service}..." + return ReportItem.info( + report_codes.SERVICE_START_STARTED, +- "Starting {service}...", ++ msg, + info={ + "service": service, ++ "instance": instance, + } + ) + +-def service_start_error(service, reason, node=None): ++def service_start_error(service, reason, node=None, instance=None): + """ + system service start failed + string service service name or description + string reason error message + string node node on which service has been requested to start ++ string instance instance of service + """ +- msg = "Unable to start {service}: {reason}" ++ if instance: ++ msg = "Unable to start {service}@{instance}: {reason}" ++ else: ++ msg = "Unable to start {service}: {reason}" + return ReportItem.error( + report_codes.SERVICE_START_ERROR, + msg if node is None else "{node}: " + msg, +@@ -1181,33 +1191,43 @@ def service_start_error(service, reason, node=None): + "service": service, + "reason": reason, + "node": node, ++ "instance": instance, + } + ) + +-def service_start_success(service, node=None): ++def service_start_success(service, node=None, instance=None): + """ + system service was started successfully + string service service name or description + string node node on which service has been requested to start ++ string instance instance of service + """ +- msg = "{service} started" ++ if instance: ++ msg = "{service}@{instance} started" ++ else: ++ msg = "{service} started" + return ReportItem.info( + report_codes.SERVICE_START_SUCCESS, + msg if node is None else "{node}: " + msg, + info={ + "service": service, + "node": node, ++ "instance": instance, + } + ) + +-def service_start_skipped(service, reason, node=None): ++def service_start_skipped(service, reason, node=None, instance=None): + """ + starting system service was skipped, no error occured + string service service name or description + string reason why the start has been skipped + string node node on which service has been requested to start ++ string instance instance of service + """ +- msg = "not starting {service} - {reason}" ++ if instance: ++ msg = "not starting {service}@{instance} - {reason}" ++ else: ++ msg = "not starting {service} - {reason}" + return ReportItem.info( + report_codes.SERVICE_START_SKIPPED, + msg if node is None else "{node}: " + msg, +@@ -1215,30 +1235,41 @@ def service_start_skipped(service, reason, node=None): + "service": service, + "reason": reason, + "node": node, ++ "instance": instance, + } + ) + +-def service_stop_started(service): ++def service_stop_started(service, instance=None): + """ + system service is being stopped + string service service name or description ++ string instance instance of service + """ ++ if instance: ++ msg = "Stopping {service}@{instance}..." ++ else: ++ msg = "Stopping {service}..." + return ReportItem.info( + report_codes.SERVICE_STOP_STARTED, +- "Stopping {service}...", ++ msg, + info={ + "service": service, ++ "instance": instance, + } + ) + +-def service_stop_error(service, reason, node=None): ++def service_stop_error(service, reason, node=None, instance=None): + """ + system service stop failed + string service service name or description + string reason error message + string node node on which service has been requested to stop ++ string instance instance of service + """ +- msg = "Unable to stop {service}: {reason}" ++ if instance: ++ msg = "Unable to stop {service}@{instance}: {reason}" ++ else: ++ msg = "Unable to stop {service}: {reason}" + return ReportItem.error( + report_codes.SERVICE_STOP_ERROR, + msg if node is None else "{node}: " + msg, +@@ -1246,22 +1277,28 @@ def service_stop_error(service, reason, node=None): + "service": service, + "reason": reason, + "node": node, ++ "instance": instance, + } + ) + +-def service_stop_success(service, node=None): ++def service_stop_success(service, node=None, instance=None): + """ + system service was stopped successfully + string service service name or description + string node node on which service has been requested to stop ++ string instance instance of service + """ +- msg = "{service} stopped" ++ if instance: ++ msg = "{service}@{instance} stopped" ++ else: ++ msg = "{service} stopped" + return ReportItem.info( + report_codes.SERVICE_STOP_SUCCESS, + msg if node is None else "{node}: " + msg, + info={ + "service": service, + "node": node, ++ "instance": instance, + } + ) + +@@ -1295,27 +1332,37 @@ def service_kill_success(services): + } + ) + +-def service_enable_started(service): ++def service_enable_started(service, instance=None): + """ + system service is being enabled + string service service name or description ++ string instance instance of service + """ ++ if instance: ++ msg = "Enabling {service}@{instance}..." ++ else: ++ msg = "Enabling {service}..." + return ReportItem.info( + report_codes.SERVICE_ENABLE_STARTED, +- "Enabling {service}...", ++ msg, + info={ + "service": service, ++ "instance": instance, + } + ) + +-def service_enable_error(service, reason, node=None): ++def service_enable_error(service, reason, node=None, instance=None): + """ + system service enable failed + string service service name or description + string reason error message + string node node on which service was enabled ++ string instance instance of service + """ +- msg = "Unable to enable {service}: {reason}" ++ if instance: ++ msg = "Unable to enable {service}@{instance}: {reason}" ++ else: ++ msg = "Unable to enable {service}: {reason}" + return ReportItem.error( + report_codes.SERVICE_ENABLE_ERROR, + msg if node is None else "{node}: " + msg, +@@ -1323,33 +1370,43 @@ def service_enable_error(service, reason, node=None): + "service": service, + "reason": reason, + "node": node, ++ "instance": instance, + } + ) + +-def service_enable_success(service, node=None): ++def service_enable_success(service, node=None, instance=None): + """ + system service was enabled successfully + string service service name or description + string node node on which service has been enabled ++ string instance instance of service + """ +- msg = "{service} enabled" ++ if instance: ++ msg = "{service}@{instance} enabled" ++ else: ++ msg = "{service} enabled" + return ReportItem.info( + report_codes.SERVICE_ENABLE_SUCCESS, + msg if node is None else "{node}: " + msg, + info={ + "service": service, + "node": node, ++ "instance": instance, + } + ) + +-def service_enable_skipped(service, reason, node=None): ++def service_enable_skipped(service, reason, node=None, instance=None): + """ + enabling system service was skipped, no error occured + string service service name or description + string reason why the enabling has been skipped + string node node on which service has been requested to enable ++ string instance instance of service + """ +- msg = "not enabling {service} - {reason}" ++ if instance: ++ msg = "not enabling {service}@{instance} - {reason}" ++ else: ++ msg = "not enabling {service} - {reason}" + return ReportItem.info( + report_codes.SERVICE_ENABLE_SKIPPED, + msg if node is None else "{node}: " + msg, +@@ -1357,30 +1414,41 @@ def service_enable_skipped(service, reason, node=None): + "service": service, + "reason": reason, + "node": node, ++ "instance": instance + } + ) + +-def service_disable_started(service): ++def service_disable_started(service, instance=None): + """ + system service is being disabled + string service service name or description ++ string instance instance of service + """ ++ if instance: ++ msg = "Disabling {service}@{instance}..." ++ else: ++ msg = "Disabling {service}..." + return ReportItem.info( + report_codes.SERVICE_DISABLE_STARTED, +- "Disabling {service}...", ++ msg, + info={ + "service": service, ++ "instance": instance, + } + ) + +-def service_disable_error(service, reason, node=None): ++def service_disable_error(service, reason, node=None, instance=None): + """ + system service disable failed + string service service name or description + string reason error message + string node node on which service was disabled ++ string instance instance of service + """ +- msg = "Unable to disable {service}: {reason}" ++ if instance: ++ msg = "Unable to disable {service}@{instance}: {reason}" ++ else: ++ msg = "Unable to disable {service}: {reason}" + return ReportItem.error( + report_codes.SERVICE_DISABLE_ERROR, + msg if node is None else "{node}: " + msg, +@@ -1388,22 +1456,28 @@ def service_disable_error(service, reason, node=None): + "service": service, + "reason": reason, + "node": node, ++ "instance": instance, + } + ) + +-def service_disable_success(service, node=None): ++def service_disable_success(service, node=None, instance=None): + """ + system service was disabled successfully + string service service name or description + string node node on which service was disabled ++ string instance instance of service + """ +- msg = "{service} disabled" ++ if instance: ++ msg = "{service}@{instance} disabled" ++ else: ++ msg = "{service} disabled" + return ReportItem.info( + report_codes.SERVICE_DISABLE_SUCCESS, + msg if node is None else "{node}: " + msg, + info={ + "service": service, + "node": node, ++ "instance": instance, + } + ) + +@@ -1742,3 +1816,88 @@ def unable_to_upgrade_cib_to_required_version( + "current_version": "{0}.{1}.{2}".format(*current_version) + } + ) ++ ++def file_already_exists( ++ file_role, file_path, severity=ReportItemSeverity.ERROR, ++ forceable=None, node=None ++ ): ++ msg = "file {file_path} already exists" ++ if file_role: ++ msg = "{file_role} " + msg ++ if node: ++ msg = "{node}: " + msg ++ return ReportItem( ++ report_codes.FILE_ALREADY_EXISTS, ++ severity, ++ msg, ++ info={ ++ "file_role": file_role, ++ "file_path": file_path, ++ "node": node, ++ }, ++ forceable=forceable, ++ ) ++ ++def file_does_not_exist(file_role, file_path=""): ++ return ReportItem.error( ++ report_codes.FILE_DOES_NOT_EXIST, ++ "{file_role} file {file_path} does not exist", ++ info={ ++ "file_role": file_role, ++ "file_path": file_path, ++ }, ++ ) ++ ++def file_io_error( ++ file_role, file_path="", reason="", operation="work with", ++ severity=ReportItemSeverity.ERROR ++): ++ if file_path: ++ msg = "unable to {operation} {file_role} '{file_path}': {reason}" ++ else: ++ msg = "unable to {operation} {file_role}: {reason}" ++ return ReportItem( ++ report_codes.FILE_IO_ERROR, ++ severity, ++ msg, ++ info={ ++ "file_role": file_role, ++ "file_path": file_path, ++ "reason": reason, ++ "operation": operation ++ }, ++ ) ++ ++def unable_to_determine_user_uid(user): ++ return ReportItem.error( ++ report_codes.UNABLE_TO_DETERMINE_USER_UID, ++ "Unable to determine uid of user '{user}'", ++ info={ ++ "user": user ++ } ++ ) ++ ++def unable_to_determine_group_gid(group): ++ return ReportItem.error( ++ report_codes.UNABLE_TO_DETERMINE_GROUP_GID, ++ "Unable to determine gid of group '{group}'", ++ info={ ++ "group": group ++ } ++ ) ++ ++def unsupported_operation_on_non_systemd_systems(): ++ return ReportItem.error( ++ report_codes.UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS, ++ "unsupported operation on non systemd systems" ++ ) ++ ++def live_environment_required(forbidden_options): ++ return ReportItem.error( ++ report_codes.LIVE_ENVIRONMENT_REQUIRED, ++ "This command does not support {options_string}", ++ info={ ++ "forbidden_options": forbidden_options, ++ "options_string": ", ".join(forbidden_options), ++ } ++ ) +diff --git a/pcs/lib/test/misc.py b/pcs/lib/test/misc.py +new file mode 100644 +index 0000000..1b1670a +--- /dev/null ++++ b/pcs/lib/test/misc.py +@@ -0,0 +1,20 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import logging ++ ++from pcs.lib.env import LibraryEnvironment as Env ++from pcs.test.tools.custom_mock import MockLibraryReportProcessor ++from pcs.test.tools.pcs_mock import mock ++ ++ ++def get_mocked_env(**kwargs): ++ return Env( ++ logger=mock.MagicMock(logging.Logger), ++ report_processor=MockLibraryReportProcessor(), ++ **kwargs ++ ) +diff --git a/pcs/lib/test/test_env_file.py b/pcs/lib/test/test_env_file.py +new file mode 100644 +index 0000000..3e27af1 +--- /dev/null ++++ b/pcs/lib/test/test_env_file.py +@@ -0,0 +1,187 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from unittest import TestCase ++ ++from pcs.common import report_codes ++from pcs.lib.env_file import RealFile, GhostFile ++from pcs.lib.errors import ReportItemSeverity as severities ++from pcs.test.tools.assertions import( ++ assert_raise_library_error, ++ assert_report_item_list_equal ++) ++from pcs.test.tools.custom_mock import MockLibraryReportProcessor ++from pcs.test.tools.pcs_mock import mock ++ ++ ++class GhostFileReadTest(TestCase): ++ def test_raises_when_trying_read_nonexistent_file(self): ++ assert_raise_library_error( ++ lambda: GhostFile("some role", content=None).read(), ++ ( ++ severities.ERROR, ++ report_codes.FILE_DOES_NOT_EXIST, ++ { ++ "file_role": "some role", ++ } ++ ), ++ ) ++ ++@mock.patch("pcs.lib.env_file.os.path.exists", return_value=True) ++class RealFileAssertNoConflictWithExistingTest(TestCase): ++ def check(self, report_processor, can_overwrite_existing=False): ++ real_file = RealFile("some role", "/etc/booth/some-name.conf") ++ real_file.assert_no_conflict_with_existing( ++ report_processor, ++ can_overwrite_existing ++ ) ++ ++ def test_success_when_config_not_exists(self, mock_exists): ++ mock_exists.return_value = False ++ report_processor=MockLibraryReportProcessor() ++ self.check(report_processor) ++ assert_report_item_list_equal(report_processor.report_item_list, []) ++ ++ def test_raises_when_config_exists_and_overwrite_not_allowed(self, mock_ex): ++ assert_raise_library_error( ++ lambda: self.check(MockLibraryReportProcessor()), ++ ( ++ severities.ERROR, ++ report_codes.FILE_ALREADY_EXISTS, ++ { ++ "file_path": "/etc/booth/some-name.conf" ++ }, ++ report_codes.FORCE_FILE_OVERWRITE, ++ ), ++ ) ++ ++ def test_warn_when_config_exists_and_overwrite_allowed(self, mock_exists): ++ report_processor=MockLibraryReportProcessor() ++ self.check(report_processor, can_overwrite_existing=True) ++ assert_report_item_list_equal(report_processor.report_item_list, [( ++ severities.WARNING, ++ report_codes.FILE_ALREADY_EXISTS, ++ { ++ "file_path": "/etc/booth/some-name.conf" ++ }, ++ )]) ++ ++class RealFileWriteTest(TestCase): ++ def test_success_write_content_to_path(self): ++ mock_open = mock.mock_open() ++ mock_file_operation = mock.Mock() ++ with mock.patch("pcs.lib.env_file.open", mock_open, create=True): ++ RealFile("some role", "/etc/booth/some-name.conf").write( ++ "config content", ++ file_operation=mock_file_operation ++ ) ++ mock_open.assert_called_once_with("/etc/booth/some-name.conf", "w") ++ mock_open().write.assert_called_once_with("config content") ++ mock_file_operation.assert_called_once_with( ++ "/etc/booth/some-name.conf" ++ ) ++ ++ def test_success_binary(self): ++ mock_open = mock.mock_open() ++ mock_file_operation = mock.Mock() ++ with mock.patch("pcs.lib.env_file.open", mock_open, create=True): ++ RealFile("some role", "/etc/booth/some-name.conf").write( ++ "config content".encode("utf-8"), ++ file_operation=mock_file_operation, ++ is_binary=True ++ ) ++ mock_open.assert_called_once_with("/etc/booth/some-name.conf", "wb") ++ mock_open().write.assert_called_once_with( ++ "config content".encode("utf-8") ++ ) ++ mock_file_operation.assert_called_once_with( ++ "/etc/booth/some-name.conf" ++ ) ++ ++ def test_raises_when_could_not_write(self): ++ assert_raise_library_error( ++ lambda: ++ RealFile("some role", "/no/existing/file.path").write(["content"]), ++ ( ++ severities.ERROR, ++ report_codes.FILE_IO_ERROR, ++ { ++ "reason": ++ "No such file or directory: '/no/existing/file.path'" ++ , ++ } ++ ) ++ ) ++ ++class RealFileReadTest(TestCase): ++ def test_success_read_content_from_file(self): ++ mock_open = mock.mock_open() ++ with mock.patch("pcs.lib.env_file.open", mock_open, create=True): ++ mock_open().read.return_value = "test booth\nconfig" ++ self.assertEqual( ++ "test booth\nconfig", ++ RealFile("some role", "/path/to.file").read() ++ ) ++ ++ def test_raises_when_could_not_read(self): ++ assert_raise_library_error( ++ lambda: RealFile("some role", "/no/existing/file.path").read(), ++ ( ++ severities.ERROR, ++ report_codes.FILE_IO_ERROR, ++ { ++ "reason": ++ "No such file or directory: '/no/existing/file.path'" ++ , ++ } ++ ) ++ ) ++ ++class RealFileRemoveTest(TestCase): ++ @mock.patch("pcs.lib.env_file.os.remove") ++ @mock.patch("pcs.lib.env_file.os.path.exists", return_value=True) ++ def test_success_remove_file(self, _, mock_remove): ++ RealFile("some role", "/path/to.file").remove() ++ mock_remove.assert_called_once_with("/path/to.file") ++ ++ @mock.patch( ++ "pcs.lib.env_file.os.remove", ++ side_effect=EnvironmentError(1, "mock remove failed", "/path/to.file") ++ ) ++ @mock.patch("pcs.lib.env_file.os.path.exists", return_value=True) ++ def test_raise_library_error_when_remove_failed(self, _, dummy): ++ assert_raise_library_error( ++ lambda: RealFile("some role", "/path/to.file").remove(), ++ ( ++ severities.ERROR, ++ report_codes.FILE_IO_ERROR, ++ { ++ 'reason': "mock remove failed: '/path/to.file'", ++ 'file_role': 'some role', ++ 'file_path': '/path/to.file' ++ } ++ ) ++ ) ++ ++ @mock.patch("pcs.lib.env_file.os.path.exists", return_value=False) ++ def test_existence_is_required(self, _): ++ assert_raise_library_error( ++ lambda: RealFile("some role", "/path/to.file").remove(), ++ ( ++ severities.ERROR, ++ report_codes.FILE_IO_ERROR, ++ { ++ 'reason': "File does not exist", ++ 'file_role': 'some role', ++ 'file_path': '/path/to.file' ++ } ++ ) ++ ) ++ ++ @mock.patch("pcs.lib.env_file.os.path.exists", return_value=False) ++ def test_noexistent_can_be_silenced(self, _): ++ RealFile("some role", "/path/to.file").remove(silence_no_existence=True) +diff --git a/pcs/lib/test/test_errors.py b/pcs/lib/test/test_errors.py +new file mode 100644 +index 0000000..2e99e19 +--- /dev/null ++++ b/pcs/lib/test/test_errors.py +@@ -0,0 +1,20 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from unittest import TestCase ++ ++from pcs.lib.errors import LibraryEnvError ++ ++ ++class LibraryEnvErrorTest(TestCase): ++ def test_can_sign_solved_reports(self): ++ e = LibraryEnvError("first", "second", "third") ++ for report in e.args: ++ if report == "second": ++ e.sign_processed(report) ++ ++ self.assertEqual(["first", "third"], e.unprocessed) +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 09c0235..52497a0 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -45,6 +45,9 @@ Manage quorum device provider on the local host. + quorum + Manage cluster quorum settings. + .TP ++booth ++Manage booth (cluster ticket manager). ++.TP + status + View cluster status. + .TP +@@ -573,6 +576,55 @@ Cancel waiting for all nodes when establishing quorum. Useful in situations whe + .TP + update [auto_tie_breaker=[0|1]] [last_man_standing=[0|1]] [last_man_standing_window=[<time in ms>]] [wait_for_all=[0|1]] + Add/Change quorum options. At least one option must be specified. Options are documented in corosync's votequorum(5) man page. Requires the cluster to be stopped. ++.SS "booth" ++.TP ++setup sites <address> <address> [<address>...] [arbitrators <address> ...] [\fB\-\-force\fR] ++Write new booth configuration with specified sites and arbitrators. Total number of peers (sites and arbitrators) must be odd. When the configuration file already exists, command fails unless \fB\-\-force\fR is specified. ++.TP ++destroy ++Remove booth configuration files. ++.TP ++ticket add <ticket> ++Add new ticket to the current configuration. ++.TP ++ticket remove <ticket> ++Remove the specified ticket from the current configuration. ++.TP ++config ++Show booth configuration. ++.TP ++create ip <address> ++Make the cluster run booth service on the specified ip address as a cluster resource. Typically this is used to run booth site. ++.TP ++remove ++Remove booth resources created by the "pcs booth create" command. ++.TP ++ticket grant <ticket> [<site address>] ++Grant the ticket for the site specified by address. Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted. ++.TP ++ticket revoke <ticket> [<site address>] ++Revoke the ticket for the site specified by address. Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted. ++.TP ++status ++Print current status of booth on the local node. ++.TP ++pull <node> ++Pull booth configuration from the specified node. ++.TP ++sync [\fB\-\-skip\-offline\fR] ++Send booth configuration from the local node to all nodes in the cluster. ++.TP ++enable ++Enable booth arbitrator service. ++.TP ++disable ++Disable booth arbitrator service. ++.TP ++start ++Start booth arbitrator service. ++.TP ++stop ++Stop booth arbitrator service. + .SS "status" + .TP + [status] [\fB\-\-full\fR | \fB\-\-hide-inactive\fR] +diff --git a/pcs/resource.py b/pcs/resource.py +index a85f46f..66c743c 100644 +--- a/pcs/resource.py ++++ b/pcs/resource.py +@@ -60,7 +60,10 @@ def resource_cmd(argv): + argv, with_clone=True + ) + try: +- resource_create(res_id, res_type, ra_values, op_values, meta_values, clone_opts) ++ resource_create( ++ res_id, res_type, ra_values, op_values, meta_values, clone_opts, ++ group=utils.pcs_options.get("--group", None) ++ ) + except CmdLineInputError as e: + utils.exit_on_cmdline_input_errror(e, "resource", 'create') + elif (sub_cmd == "move"): +@@ -437,7 +440,10 @@ def format_desc(indent, desc): + + # Create a resource using cibadmin + # ra_class, ra_type & ra_provider must all contain valid info +-def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_opts=[]): ++def resource_create( ++ ra_id, ra_type, ra_values, op_values, meta_values=[], clone_opts=[], ++ group=None ++): + if "--wait" in utils.pcs_options: + wait_timeout = utils.validate_wait_get_timeout() + if "--disabled" in utils.pcs_options: +@@ -588,7 +594,7 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_ + + if "--clone" in utils.pcs_options or len(clone_opts) > 0: + dom, dummy_clone_id = resource_clone_create(dom, [ra_id] + clone_opts) +- if "--group" in utils.pcs_options: ++ if group: + print("Warning: --group ignored when creating a clone") + if "--master" in utils.pcs_options: + print("Warning: --master ignored when creating a clone") +@@ -596,11 +602,10 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_ + dom, dummy_master_id = resource_master_create( + dom, [ra_id] + master_meta_values + ) +- if "--group" in utils.pcs_options: ++ if group: + print("Warning: --group ignored when creating a master") +- elif "--group" in utils.pcs_options: +- groupname = utils.pcs_options["--group"] +- dom = resource_group_add(dom, groupname, [ra_id]) ++ elif group: ++ dom = resource_group_add(dom, group, [ra_id]) + + utils.replace_cib_configuration(dom) + +diff --git a/pcs/settings_default.py b/pcs/settings_default.py +index 15421fd..86913bf 100644 +--- a/pcs/settings_default.py ++++ b/pcs/settings_default.py +@@ -41,3 +41,5 @@ nagios_metadata_path = "/usr/share/pacemaker/nagios/plugins-metadata/" + sbd_watchdog_default = "/dev/watchdog" + sbd_config = "/etc/sysconfig/sbd" + pacemaker_wait_timeout_status = 62 ++booth_config_dir = "/etc/booth" ++booth_binary = "/usr/sbin/booth" +diff --git a/pcs/stonith.py b/pcs/stonith.py +index c02f35a..93332ef 100644 +--- a/pcs/stonith.py ++++ b/pcs/stonith.py +@@ -174,7 +174,8 @@ def stonith_create(argv): + utils.process_library_reports(e.args) + + resource.resource_create( +- stonith_id, "stonith:" + stonith_type, st_values, op_values, meta_values ++ stonith_id, "stonith:" + stonith_type, st_values, op_values, meta_values, ++ group=utils.pcs_options.get("--group", None) + ) + + def stonith_level(argv): +diff --git a/pcs/test/resources/.gitignore b/pcs/test/resources/.gitignore +index 8c710cf..b0434e7 100644 +--- a/pcs/test/resources/.gitignore ++++ b/pcs/test/resources/.gitignore +@@ -1,2 +1,3 @@ + *.tmp + temp*.xml ++temp-* +diff --git a/pcs/test/resources/tmp_keyfile b/pcs/test/resources/tmp_keyfile +new file mode 100644 +index 0000000..6b584e8 +--- /dev/null ++++ b/pcs/test/resources/tmp_keyfile +@@ -0,0 +1 @@ ++content +\ No newline at end of file +diff --git a/pcs/test/suite.py b/pcs/test/suite.py +index 5b29918..b6c7be2 100755 +--- a/pcs/test/suite.py ++++ b/pcs/test/suite.py +@@ -9,19 +9,12 @@ from __future__ import ( + import sys + import os.path + +-major, minor = sys.version_info[:2] +-if major == 2 and minor == 6: +- import unittest2 as unittest +-else: +- import unittest +- +- + PACKAGE_DIR = os.path.dirname(os.path.dirname(os.path.dirname( + os.path.abspath(__file__) + ))) ++sys.path.insert(0, PACKAGE_DIR) + +-def put_package_to_path(): +- sys.path.insert(0, PACKAGE_DIR) ++from pcs.test.tools import pcs_unittest as unittest + + def prepare_test_name(test_name): + """ +@@ -65,18 +58,17 @@ def discover_tests(explicitly_enumerated_tests, exclude_enumerated_tests=False): + return unittest.TestLoader().loadTestsFromNames(explicitly_enumerated_tests) + + def run_tests(tests, verbose=False, color=False): +- resultclass = unittest.runner.TextTestResult ++ resultclass = unittest.TextTestResult + if color: + from pcs.test.tools.color_text_runner import ColorTextTestResult + resultclass = ColorTextTestResult + +- testRunner = unittest.runner.TextTestRunner( ++ testRunner = unittest.TextTestRunner( + verbosity=2 if verbose else 1, + resultclass=resultclass + ) + return testRunner.run(tests) + +-put_package_to_path() + explicitly_enumerated_tests = [ + prepare_test_name(arg) for arg in sys.argv[1:] if arg not in ( + "-v", +diff --git a/pcs/test/test_alert.py b/pcs/test/test_alert.py +index bb61600..f6ea70d 100644 +--- a/pcs/test/test_alert.py ++++ b/pcs/test/test_alert.py +@@ -7,7 +7,6 @@ from __future__ import ( + ) + + import shutil +-import sys + + from pcs.test.tools.misc import ( + get_test_resource as rc, +@@ -15,12 +14,7 @@ from pcs.test.tools.misc import ( + ) + from pcs.test.tools.assertions import AssertPcsMixin + from pcs.test.tools.pcs_runner import PcsRunner +- +-major, minor = sys.version_info[:2] +-if major == 2 and minor == 6: +- import unittest2 as unittest +-else: +- import unittest ++from pcs.test.tools import pcs_unittest as unittest + + + old_cib = rc("cib-empty.xml") +diff --git a/pcs/test/test_booth.py b/pcs/test/test_booth.py +new file mode 100644 +index 0000000..5ddc06d +--- /dev/null ++++ b/pcs/test/test_booth.py +@@ -0,0 +1,342 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import os ++import shutil ++ ++from pcs.test.tools import pcs_unittest as unittest ++from pcs.test.tools.assertions import AssertPcsMixin, console_report ++from pcs.test.tools.misc import get_test_resource as rc ++from pcs.test.tools.pcs_runner import PcsRunner ++from pcs import settings ++ ++ ++EMPTY_CIB = rc("cib-empty.xml") ++TEMP_CIB = rc("temp-cib.xml") ++ ++BOOTH_CONFIG_FILE = rc("temp-booth.cfg") ++BOOTH_KEY_FILE = rc("temp-booth.key") ++ ++BOOTH_RESOURCE_AGENT_INSTALLED = "booth-site" in os.listdir( ++ os.path.join(settings.ocf_resources, "pacemaker") ++) ++need_booth_resource_agent = unittest.skipUnless( ++ BOOTH_RESOURCE_AGENT_INSTALLED, ++ "test requires resource agent ocf:pacemaker:booth-site" ++ " which is not istalled" ++) ++ ++ ++def fake_file(command): ++ return "{0} --booth-conf={1} --booth-key={2}".format( ++ command, ++ BOOTH_CONFIG_FILE, ++ BOOTH_KEY_FILE, ++ ) ++ ++def ensure_booth_config_exists(): ++ if not os.path.exists(BOOTH_CONFIG_FILE): ++ with open(BOOTH_CONFIG_FILE, "w") as config_file: ++ config_file.write("") ++ ++def ensure_booth_config_not_exists(): ++ if os.path.exists(BOOTH_CONFIG_FILE): ++ os.remove(BOOTH_CONFIG_FILE) ++ if os.path.exists(BOOTH_KEY_FILE): ++ os.remove(BOOTH_KEY_FILE) ++ ++class BoothMixin(AssertPcsMixin): ++ def setUp(self): ++ shutil.copy(EMPTY_CIB, TEMP_CIB) ++ self.pcs_runner = PcsRunner(TEMP_CIB) ++ ++ def assert_pcs_success(self, command, *args, **kwargs): ++ return super(BoothMixin, self).assert_pcs_success( ++ fake_file(command), *args, **kwargs ++ ) ++ ++ def assert_pcs_fail(self, command, *args, **kwargs): ++ return super(BoothMixin, self).assert_pcs_fail( ++ fake_file(command), *args, **kwargs ++ ) ++ ++ def assert_pcs_fail_original(self, *args, **kwargs): ++ return super(BoothMixin, self).assert_pcs_fail(*args, **kwargs) ++ ++class SetupTest(BoothMixin, unittest.TestCase): ++ def test_sucess_setup_booth_config(self): ++ ensure_booth_config_not_exists() ++ self.assert_pcs_success( ++ "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3" ++ ) ++ self.assert_pcs_success( ++ "booth config", ++ stdout_full=console_report( ++ "site = 1.1.1.1", ++ "site = 2.2.2.2", ++ "arbitrator = 3.3.3.3", ++ "authfile = {0}".format(BOOTH_KEY_FILE), ++ ) ++ ) ++ with open(BOOTH_KEY_FILE) as key_file: ++ self.assertEqual(64, len(key_file.read())) ++ ++ ++ def test_fail_when_config_exists_already(self): ++ ensure_booth_config_exists() ++ try: ++ self.assert_pcs_fail( ++ "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3", ++ ( ++ "Error: booth config file {0} already exists, use --force" ++ " to override\n" ++ ).format(BOOTH_CONFIG_FILE) ++ ) ++ finally: ++ if os.path.exists(BOOTH_CONFIG_FILE): ++ os.remove(BOOTH_CONFIG_FILE) ++ ++ def test_warn_when_config_file_exists_already_but_is_forced(self): ++ ensure_booth_config_exists() ++ self.assert_pcs_success( ++ "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3 --force", ++ stdout_full=[ ++ "Warning: booth config file" ++ " {0} already exists".format(BOOTH_CONFIG_FILE) ++ , ++ "Warning: booth key file" ++ " {0} already exists".format(BOOTH_KEY_FILE) ++ , ++ ] ++ ) ++ ensure_booth_config_not_exists() ++ ++ ++ def test_fail_on_multiple_reasons(self): ++ self.assert_pcs_fail( ++ "booth setup sites 1.1.1.1 arbitrators 1.1.1.1 2.2.2.2 3.3.3.3", ++ console_report( ++ "Error: lack of sites for booth configuration (need 2 at least)" ++ ": sites 1.1.1.1" ++ , ++ "Error: odd number of peers is required (entered 4 peers)", ++ "Error: duplicate address for booth configuration: 1.1.1.1", ++ ) ++ ) ++ ++ def test_refuse_partialy_mocked_environment(self): ++ self.assert_pcs_fail_original( ++ "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3" ++ " --booth-conf=/some/file" #no --booth-key! ++ , ++ "Error: With --booth-conf must be specified --booth-key as well\n" ++ ) ++ self.assert_pcs_fail_original( ++ "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3" ++ " --booth-key=/some/file" #no --booth-conf! ++ , ++ "Error: With --booth-key must be specified --booth-conf as well\n" ++ ) ++ ++ def test_show_usage_when_no_site_specified(self): ++ self.assert_pcs_fail("booth setup arbitrators 3.3.3.3", stdout_start=[ ++ "", ++ "Usage: pcs booth <command>" ++ ]) ++ ++ ++class DestroyTest(BoothMixin, unittest.TestCase): ++ def test_failed_when_using_mocked_booth_env(self): ++ self.assert_pcs_fail( ++ "booth destroy", ++ "Error: This command does not support --booth-conf, --booth-key\n" ++ ) ++ ++ @need_booth_resource_agent ++ def test_failed_when_booth_in_cib(self): ++ ensure_booth_config_not_exists() ++ name = " --name=some-weird-booth-name" ++ self.assert_pcs_success( ++ "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3" + name ++ ) ++ self.assert_pcs_success("booth create ip 1.1.1.1" + name) ++ self.assert_pcs_fail_original( ++ "booth destroy" + name, ++ #If there is booth@some-weird-booth-name in systemd (enabled or ++ #started) the message continue with it because destroy command works ++ #always on live environment. "Cleaner" solution takes more effort ++ #than what it's worth ++ stdout_start=( ++ "Error: booth instance 'some-weird-booth-name' is used in" ++ " cluster resource\n" ++ ), ++ ) ++ ++class BoothTest(unittest.TestCase, BoothMixin): ++ def setUp(self): ++ shutil.copy(EMPTY_CIB, TEMP_CIB) ++ self.pcs_runner = PcsRunner(TEMP_CIB) ++ ensure_booth_config_not_exists() ++ self.assert_pcs_success( ++ "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3" ++ ) ++ ++class AddTicketTest(BoothTest): ++ def test_success_add_ticket(self): ++ self.assert_pcs_success("booth ticket add TicketA") ++ self.assert_pcs_success("booth config", stdout_full=console_report( ++ "site = 1.1.1.1", ++ "site = 2.2.2.2", ++ "arbitrator = 3.3.3.3", ++ "authfile = {0}".format(BOOTH_KEY_FILE), ++ 'ticket = "TicketA"', ++ )) ++ ++ def test_fail_on_bad_ticket_name(self): ++ self.assert_pcs_fail( ++ "booth ticket add @TicketA", ++ "Error: booth ticket name '@TicketA' is not valid, use alphanumeric" ++ " chars or dash\n" ++ ) ++ ++ def test_fail_on_duplicit_ticket_name(self): ++ self.assert_pcs_success("booth ticket add TicketA") ++ self.assert_pcs_fail( ++ "booth ticket add TicketA", ++ "Error: booth ticket name 'TicketA' already exists in configuration" ++ "\n" ++ ) ++ ++class RemoveTicketTest(BoothTest): ++ def test_success_remove_ticket(self): ++ self.assert_pcs_success("booth ticket add TicketA") ++ self.assert_pcs_success("booth config", stdout_full=console_report( ++ "site = 1.1.1.1", ++ "site = 2.2.2.2", ++ "arbitrator = 3.3.3.3", ++ "authfile = {0}".format(BOOTH_KEY_FILE), ++ 'ticket = "TicketA"', ++ )) ++ self.assert_pcs_success("booth ticket remove TicketA") ++ self.assert_pcs_success("booth config", stdout_full=console_report( ++ "site = 1.1.1.1", ++ "site = 2.2.2.2", ++ "arbitrator = 3.3.3.3", ++ "authfile = {0}".format(BOOTH_KEY_FILE), ++ )) ++ ++ def test_fail_when_ticket_does_not_exist(self): ++ self.assert_pcs_fail( ++ "booth ticket remove TicketA", ++ "Error: booth ticket name 'TicketA' does not exist\n" ++ ) ++ ++@need_booth_resource_agent ++class CreateTest(BoothTest): ++ def test_sucessfully_create_booth_resource_group(self): ++ self.assert_pcs_success("resource show", "NO resources configured\n") ++ self.assert_pcs_success("booth create ip 192.168.122.120") ++ self.assert_pcs_success("resource show", [ ++ " Resource Group: booth-booth-group", ++ " booth-booth-ip (ocf::heartbeat:IPaddr2): Stopped", ++ " booth-booth-service (ocf::pacemaker:booth-site): Stopped", ++ ]) ++ self.assert_pcs_success("resource show booth-booth-ip", [ ++ " Resource: booth-booth-ip (class=ocf provider=heartbeat type=IPaddr2)", ++ " Attributes: ip=192.168.122.120", ++ " Operations: start interval=0s timeout=20s (booth-booth-ip-start-interval-0s)", ++ " stop interval=0s timeout=20s (booth-booth-ip-stop-interval-0s)", ++ " monitor interval=10s timeout=20s (booth-booth-ip-monitor-interval-10s)", ++ ]) ++ ++ def test_refuse_create_booth_when_config_is_already_in_use(self): ++ self.assert_pcs_success("booth create ip 192.168.122.120") ++ self.assert_pcs_fail("booth create ip 192.168.122.121", [ ++ "Error: booth instance 'booth' is already created as cluster" ++ " resource" ++ ]) ++ ++@need_booth_resource_agent ++class RemoveTest(BoothTest): ++ def test_failed_when_no_booth_configuration_created(self): ++ self.assert_pcs_success("resource show", "NO resources configured\n") ++ self.assert_pcs_fail("booth remove", [ ++ "Error: booth instance 'booth' not found in cib" ++ ]) ++ ++ def test_failed_when_multiple_booth_configuration_created(self): ++ self.assert_pcs_success("resource show", "NO resources configured\n") ++ self.assert_pcs_success("booth create ip 192.168.122.120") ++ self.assert_pcs_success( ++ "resource create some-id ocf:pacemaker:booth-site" ++ " config=/etc/booth/booth.conf" ++ ) ++ self.assert_pcs_success("resource show", [ ++ " Resource Group: booth-booth-group", ++ " booth-booth-ip (ocf::heartbeat:IPaddr2): Stopped", ++ " booth-booth-service (ocf::pacemaker:booth-site): Stopped", ++ " some-id (ocf::pacemaker:booth-site): Stopped", ++ ]) ++ self.assert_pcs_fail("booth remove", [ ++ "Error: found more than one booth instance 'booth' in cib, use" ++ " --force to override" ++ ]) ++ ++ ++ def test_remove_added_booth_configuration(self): ++ self.assert_pcs_success("resource show", "NO resources configured\n") ++ self.assert_pcs_success("booth create ip 192.168.122.120") ++ self.assert_pcs_success("resource show", [ ++ " Resource Group: booth-booth-group", ++ " booth-booth-ip (ocf::heartbeat:IPaddr2): Stopped", ++ " booth-booth-service (ocf::pacemaker:booth-site): Stopped", ++ ]) ++ self.assert_pcs_success("booth remove", [ ++ "Deleting Resource - booth-booth-ip", ++ "Deleting Resource (and group) - booth-booth-service", ++ ]) ++ self.assert_pcs_success("resource show", "NO resources configured\n") ++ ++ def test_fail_when_booth_is_not_currently_configured(self): ++ pass ++ ++class TicketGrantTest(BoothTest): ++ def test_failed_when_implicit_site_but_not_correct_confgiuration_in_cib( ++ self ++ ): ++ self.assert_pcs_success("booth ticket add T1") ++ #no resource in cib ++ self.assert_pcs_fail("booth ticket grant T1", [ ++ "Error: cannot determine local site ip, please specify site" ++ " parameter" ++ , ++ ]) ++ ++class TicketRevokeTest(BoothTest): ++ def test_failed_when_implicit_site_but_not_correct_confgiuration_in_cib( ++ self ++ ): ++ self.assert_pcs_success("booth ticket add T1") ++ #no resource in cib ++ self.assert_pcs_fail("booth ticket revoke T1", [ ++ "Error: cannot determine local site ip, please specify site" ++ " parameter" ++ , ++ ]) ++ ++class ConfigTest(unittest.TestCase, BoothMixin): ++ def setUp(self): ++ shutil.copy(EMPTY_CIB, TEMP_CIB) ++ self.pcs_runner = PcsRunner(TEMP_CIB) ++ def test_fail_when_config_file_do_not_exists(self): ++ ensure_booth_config_not_exists() ++ self.assert_pcs_fail( ++ "booth config", ++ "Error: Booth config file '{0}' does not exist\n".format( ++ BOOTH_CONFIG_FILE ++ ) ++ ) +diff --git a/pcs/test/test_lib_cib_tools.py b/pcs/test/test_lib_cib_tools.py +index e1f2313..10f8a96 100644 +--- a/pcs/test/test_lib_cib_tools.py ++++ b/pcs/test/test_lib_cib_tools.py +@@ -136,6 +136,27 @@ class GetConstraintsTest(CibToolsTest): + ), + ) + ++class GetResourcesTest(CibToolsTest): ++ def test_success_if_exists(self): ++ self.assertEqual( ++ "resources", ++ lib.get_resources(self.cib.tree).tag ++ ) ++ ++ def test_raise_if_missing(self): ++ for section in self.cib.tree.findall(".//configuration/resources"): ++ section.getparent().remove(section) ++ assert_raise_library_error( ++ lambda: lib.get_resources(self.cib.tree), ++ ( ++ severities.ERROR, ++ report_codes.CIB_CANNOT_FIND_MANDATORY_SECTION, ++ { ++ "section": "configuration/resources", ++ } ++ ), ++ ) ++ + + class GetAclsTest(CibToolsTest): + def setUp(self): +diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py +index 929a50d..a4ec0f9 100644 +--- a/pcs/test/test_lib_external.py ++++ b/pcs/test/test_lib_external.py +@@ -1068,6 +1068,25 @@ class DisableServiceTest(TestCase): + lib.disable_service(self.mock_runner, self.service) + self.assertEqual(self.mock_runner.run.call_count, 0) + ++ def test_instance_systemctl(self, mock_systemctl): ++ mock_systemctl.return_value = True ++ self.mock_runner.run.return_value = ("", 0) ++ lib.disable_service(self.mock_runner, self.service, instance="test") ++ self.mock_runner.run.assert_called_once_with([ ++ "systemctl", ++ "disable", ++ "{0}@{1}.service".format(self.service, "test") ++ ]) ++ ++ @mock.patch("pcs.lib.external.is_service_installed") ++ def test_instance_not_systemctl(self, mock_is_installed, mock_systemctl): ++ mock_is_installed.return_value = True ++ mock_systemctl.return_value = False ++ self.mock_runner.run.return_value = ("", 0) ++ lib.disable_service(self.mock_runner, self.service, instance="test") ++ self.mock_runner.run.assert_called_once_with( ++ ["chkconfig", self.service, "off"] ++ ) + + @mock.patch("pcs.lib.external.is_systemctl") + class EnableServiceTest(TestCase): +@@ -1113,6 +1132,24 @@ class EnableServiceTest(TestCase): + ["chkconfig", self.service, "on"] + ) + ++ def test_instance_systemctl(self, mock_systemctl): ++ mock_systemctl.return_value = True ++ self.mock_runner.run.return_value = ("", 0) ++ lib.enable_service(self.mock_runner, self.service, instance="test") ++ self.mock_runner.run.assert_called_once_with([ ++ "systemctl", ++ "enable", ++ "{0}@{1}.service".format(self.service, "test") ++ ]) ++ ++ def test_instance_not_systemctl(self, mock_systemctl): ++ mock_systemctl.return_value = False ++ self.mock_runner.run.return_value = ("", 0) ++ lib.enable_service(self.mock_runner, self.service, instance="test") ++ self.mock_runner.run.assert_called_once_with( ++ ["chkconfig", self.service, "on"] ++ ) ++ + + @mock.patch("pcs.lib.external.is_systemctl") + class StartServiceTest(TestCase): +@@ -1158,6 +1195,22 @@ class StartServiceTest(TestCase): + ["service", self.service, "start"] + ) + ++ def test_instance_systemctl(self, mock_systemctl): ++ mock_systemctl.return_value = True ++ self.mock_runner.run.return_value = ("", 0) ++ lib.start_service(self.mock_runner, self.service, instance="test") ++ self.mock_runner.run.assert_called_once_with([ ++ "systemctl", "start", "{0}@{1}.service".format(self.service, "test") ++ ]) ++ ++ def test_instance_not_systemctl(self, mock_systemctl): ++ mock_systemctl.return_value = False ++ self.mock_runner.run.return_value = ("", 0) ++ lib.start_service(self.mock_runner, self.service, instance="test") ++ self.mock_runner.run.assert_called_once_with( ++ ["service", self.service, "start"] ++ ) ++ + + @mock.patch("pcs.lib.external.is_systemctl") + class StopServiceTest(TestCase): +@@ -1203,6 +1256,22 @@ class StopServiceTest(TestCase): + ["service", self.service, "stop"] + ) + ++ def test_instance_systemctl(self, mock_systemctl): ++ mock_systemctl.return_value = True ++ self.mock_runner.run.return_value = ("", 0) ++ lib.stop_service(self.mock_runner, self.service, instance="test") ++ self.mock_runner.run.assert_called_once_with([ ++ "systemctl", "stop", "{0}@{1}.service".format(self.service, "test") ++ ]) ++ ++ def test_instance_not_systemctl(self, mock_systemctl): ++ mock_systemctl.return_value = False ++ self.mock_runner.run.return_value = ("", 0) ++ lib.stop_service(self.mock_runner, self.service, instance="test") ++ self.mock_runner.run.assert_called_once_with( ++ ["service", self.service, "stop"] ++ ) ++ + + class KillServicesTest(TestCase): + def setUp(self): +@@ -1470,3 +1539,20 @@ pacemaker 0:off 1:off 2:off 3:off 4:off 5:off 6:off + self.assertEqual(lib.get_non_systemd_services(self.mock_runner), []) + self.assertEqual(mock_is_systemctl.call_count, 1) + self.assertEqual(self.mock_runner.call_count, 0) ++ ++@mock.patch("pcs.lib.external.is_systemctl") ++class EnsureIsSystemctlTest(TestCase): ++ def test_systemd(self, mock_is_systemctl): ++ mock_is_systemctl.return_value = True ++ lib.ensure_is_systemd() ++ ++ def test_not_systemd(self, mock_is_systemctl): ++ mock_is_systemctl.return_value = False ++ assert_raise_library_error( ++ lib.ensure_is_systemd, ++ ( ++ severity.ERROR, ++ report_codes.UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS, ++ {} ++ ) ++ ) +diff --git a/pcs/test/tools/color_text_runner.py b/pcs/test/tools/color_text_runner.py +index 78a0787..b8383f6 100644 +--- a/pcs/test/tools/color_text_runner.py ++++ b/pcs/test/tools/color_text_runner.py +@@ -5,12 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-import sys +-major, minor = sys.version_info[:2] +-if major == 2 and minor == 6: +- import unittest2 as unittest +-else: +- import unittest ++from pcs.test.tools import pcs_unittest as unittest + + + palete = { +@@ -37,7 +32,7 @@ palete = { + def apply(key_list, text): + return("".join([palete[key] for key in key_list]) + text + palete["end"]) + +-TextTestResult = unittest.runner.TextTestResult ++TextTestResult = unittest.TextTestResult + #pylint: disable=bad-super-call + class ColorTextTestResult(TextTestResult): + def addSuccess(self, test): +diff --git a/pcs/test/tools/pcs_unittest.py b/pcs/test/tools/pcs_unittest.py +new file mode 100644 +index 0000000..4a3205d +--- /dev/null ++++ b/pcs/test/tools/pcs_unittest.py +@@ -0,0 +1,7 @@ ++import sys ++major, minor = sys.version_info[:2] ++if major == 2 and minor == 6: ++ from unittest2 import * ++else: ++ from unittest import * ++del major, minor, sys +diff --git a/pcs/usage.py b/pcs/usage.py +index ef60b64..baa70d0 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -21,6 +21,7 @@ def full_usage(): + out += strip_extras(acl([],False)) + out += strip_extras(qdevice([],False)) + out += strip_extras(quorum([],False)) ++ out += strip_extras(booth([],False)) + out += strip_extras(status([],False)) + out += strip_extras(config([],False)) + out += strip_extras(pcsd([],False)) +@@ -167,6 +168,7 @@ Commands: + acl Set pacemaker access control lists. + qdevice Manage quorum device provider. + quorum Manage cluster quorum settings. ++ booth Manage booth (cluster ticket manager). + status View cluster status. + config View and manage cluster configuration. + pcsd Manage pcs daemon. +@@ -1407,6 +1409,75 @@ Commands: + else: + return output + ++def booth(args=[], pout=True): ++ output = """ ++Usage: pcs booth <command> ++Manage booth (cluster ticket manager) ++ ++Commands: ++ setup sites <address> <address> [<address>...] [arbitrators <address> ...] ++ [--force] ++ Write new booth configuration with specified sites and arbitrators. ++ Total number of peers (sites and arbitrators) must be odd. When ++ the configuration file already exists, command fails unless --force ++ is specified. ++ ++ destroy ++ Remove booth configuration files. ++ ++ ticket add <ticket> ++ Add new ticket to the current configuration. ++ ++ ticket remove <ticket> ++ Remove the specified ticket from the current configuration. ++ ++ config ++ Show booth configuration. ++ ++ create ip <address> ++ Make the cluster run booth service on the specified ip address as ++ a cluster resource. Typically this is used to run booth site. ++ ++ remove ++ Remove booth resources created by the "pcs booth create" command. ++ ++ ticket grant <ticket> [<site address>] ++ Grant the ticket for the site specified by address. Site address which ++ has been specified with 'pcs booth create' command is used if ++ 'site address' is omitted. ++ ++ ticket revoke <ticket> [<site address>] ++ Revoke the ticket for the site specified by address. Site address which ++ has been specified with 'pcs booth create' command is used if ++ 'site address' is omitted. ++ ++ status ++ Print current status of booth on the local node. ++ ++ pull <node> ++ Pull booth configuration from the specified node. ++ ++ sync [--skip-offline] ++ Send booth configuration from the local node to all nodes ++ in the cluster. ++ ++ enable ++ Enable booth arbitrator service. ++ ++ disable ++ Disable booth arbitrator service. ++ ++ start ++ Start booth arbitrator service. ++ ++ stop ++ Stop booth arbitrator service. ++""" ++ if pout: ++ print(sub_usage(args, output)) ++ else: ++ return output ++ + + def alert(args=[], pout=True): + output = """ +@@ -1460,6 +1531,7 @@ def show(main_usage_name, rest_usage_names): + "property": property, + "qdevice": qdevice, + "quorum": quorum, ++ "booth": booth, + "resource": resource, + "status": status, + "stonith": stonith, +diff --git a/pcs/utils.py b/pcs/utils.py +index 25274dc..8b2cf7c 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -25,35 +25,6 @@ import base64 + import threading + import logging + +-try: +- # python2 +- from urllib import urlencode as urllib_urlencode +-except ImportError: +- # python3 +- from urllib.parse import urlencode as urllib_urlencode +-try: +- # python2 +- from urllib2 import ( +- build_opener as urllib_build_opener, +- install_opener as urllib_install_opener, +- HTTPCookieProcessor as urllib_HTTPCookieProcessor, +- HTTPSHandler as urllib_HTTPSHandler, +- HTTPError as urllib_HTTPError, +- URLError as urllib_URLError +- ) +-except ImportError: +- # python3 +- from urllib.request import ( +- build_opener as urllib_build_opener, +- install_opener as urllib_install_opener, +- HTTPCookieProcessor as urllib_HTTPCookieProcessor, +- HTTPSHandler as urllib_HTTPSHandler +- ) +- from urllib.error import ( +- HTTPError as urllib_HTTPError, +- URLError as urllib_URLError +- ) +- + + from pcs import settings, usage + from pcs.cli.common.reports import ( +@@ -89,6 +60,40 @@ from pcs.lib.pacemaker_values import( + from pcs.cli.common import middleware + from pcs.cli.common.env import Env + from pcs.cli.common.lib_wrapper import Library ++from pcs.cli.booth.command import DEFAULT_BOOTH_NAME ++import pcs.cli.booth.env ++ ++ ++try: ++ # python2 ++ from urllib import urlencode as urllib_urlencode ++except ImportError: ++ # python3 ++ from urllib.parse import urlencode as urllib_urlencode ++try: ++ # python2 ++ from urllib2 import ( ++ build_opener as urllib_build_opener, ++ install_opener as urllib_install_opener, ++ HTTPCookieProcessor as urllib_HTTPCookieProcessor, ++ HTTPSHandler as urllib_HTTPSHandler, ++ HTTPError as urllib_HTTPError, ++ URLError as urllib_URLError ++ ) ++except ImportError: ++ # python3 ++ from urllib.request import ( ++ build_opener as urllib_build_opener, ++ install_opener as urllib_install_opener, ++ HTTPCookieProcessor as urllib_HTTPCookieProcessor, ++ HTTPSHandler as urllib_HTTPSHandler ++ ) ++ from urllib.error import ( ++ HTTPError as urllib_HTTPError, ++ URLError as urllib_URLError ++ ) ++ ++ + + + PYTHON2 = sys.version[0] == "2" +@@ -2691,6 +2696,11 @@ def get_middleware_factory(): + cib=middleware.cib(usefile, get_cib, replace_cib_configuration), + corosync_conf_existing=middleware.corosync_conf_existing( + pcs_options.get("--corosync_conf", None) ++ ), ++ booth_conf=pcs.cli.booth.env.middleware_config( ++ pcs_options.get("--name", DEFAULT_BOOTH_NAME), ++ pcs_options.get("--booth-conf", None), ++ pcs_options.get("--booth-key", None), + ) + ) + +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index 553a20c..d46cd62 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -8,6 +8,7 @@ require 'net/https' + require 'json' + require 'fileutils' + require 'backports' ++require 'base64' + + require 'config.rb' + require 'cfgsync.rb' +@@ -19,6 +20,9 @@ require 'auth.rb' + class NotImplementedException < NotImplementedError + end + ++class InvalidFileNameException < NameError ++end ++ + def getAllSettings(auth_user, cib_dom=nil) + unless cib_dom + cib_dom = get_cib_dom(auth_user) +@@ -1357,10 +1361,10 @@ def pcsd_restart_nodes(auth_user, nodes) + } + end + +-def write_file_lock(path, perm, data) ++def write_file_lock(path, perm, data, binary=false) ++ file = nil + begin +- file = nil +- file = File.open(path, 'w', perm) ++ file = File.open(path, binary ? 'wb' : 'w', perm) + file.flock(File::LOCK_EX) + file.write(data) + rescue => e +@@ -1374,6 +1378,23 @@ def write_file_lock(path, perm, data) + end + end + ++def read_file_lock(path, binary=false) ++ file = nil ++ begin ++ file = File.open(path, binary ? 'rb' : 'r') ++ file.flock(File::LOCK_SH) ++ return file.read() ++ rescue => e ++ $logger.error("Cannot read file '#{path}': #{e.message}") ++ raise ++ ensure ++ unless file.nil? ++ file.flock(File::LOCK_UN) ++ file.close() ++ end ++ end ++end ++ + def verify_cert_key_pair(cert, key) + errors = [] + cert_modulus = nil +@@ -2028,3 +2049,52 @@ def get_parsed_local_sbd_config() + return nil + end + end ++ ++def write_booth_config(config, data) ++ if config.include?('/') ++ raise InvalidFileNameException.new(config) ++ end ++ write_file_lock(File.join(BOOTH_CONFIG_DIR, config), nil, data) ++end ++ ++def read_booth_config(config) ++ if config.include?('/') ++ raise InvalidFileNameException.new(config) ++ end ++ config_path = File.join(BOOTH_CONFIG_DIR, config) ++ unless File.file?(config_path) ++ return nil ++ end ++ return read_file_lock(config_path) ++end ++ ++def write_booth_authfile(filename, data) ++ if filename.include?('/') ++ raise InvalidFileNameException.new(filename) ++ end ++ write_file_lock( ++ File.join(BOOTH_CONFIG_DIR, filename), 0600, Base64.decode64(data), true ++ ) ++end ++ ++def read_booth_authfile(filename) ++ if filename.include?('/') ++ raise InvalidFileNameException.new(filename) ++ end ++ return Base64.strict_encode64( ++ read_file_lock(File.join(BOOTH_CONFIG_DIR, filename), true) ++ ) ++end ++ ++def get_authfile_from_booth_config(config_data) ++ authfile_path = nil ++ config_data.split("\n").each {|line| ++ if line.include?('=') ++ parts = line.split('=', 2) ++ if parts[0].strip == 'authfile' ++ authfile_path = parts[1].strip ++ end ++ end ++ } ++ return authfile_path ++end +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index ebf425c..134ac5d 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -83,6 +83,10 @@ def remote(params, request, auth_user) + :qdevice_client_disable => method(:qdevice_client_disable), + :qdevice_client_start => method(:qdevice_client_start), + :qdevice_client_stop => method(:qdevice_client_stop), ++ :booth_set_config => method(:booth_set_config), ++ :booth_save_files => method(:booth_save_files), ++ :booth_get_config => method(:booth_get_config), ++ + } + remote_cmd_with_pacemaker = { + :pacemaker_node_status => method(:remote_pacemaker_node_status), +@@ -2677,3 +2681,143 @@ def unmanage_resource(param, request, auth_user) + return [400, 'Invalid input data format'] + end + end ++ ++def booth_set_config(params, request, auth_user) ++ unless allowed_for_local_cluster(auth_user, Permissions::WRITE) ++ return 403, 'Permission denied' ++ end ++ begin ++ unless params[:data_json] ++ return [400, "Missing required parameter 'data_json'"] ++ end ++ data = JSON.parse(params[:data_json], {:symbolize_names => true}) ++ rescue JSON::ParserError ++ return [400, 'Invalid input data format'] ++ end ++ config = data[:config] ++ authfile = data[:authfile] ++ return [400, 'Invalid input data format'] unless ( ++ config and config[:name] and config[:data] ++ ) ++ return [400, 'Invalid input data format'] if ( ++ authfile and (not authfile[:name] or not authfile[:data]) ++ ) ++ begin ++ write_booth_config(config[:name], config[:data]) ++ if authfile ++ write_booth_authfile(authfile[:name], authfile[:data]) ++ end ++ rescue InvalidFileNameException => e ++ return [400, "Invalid format of config/key file name '#{e.message}'"] ++ rescue => e ++ msg = "Unable to save booth configuration: #{e.message}" ++ $logger.error(msg) ++ return [400, msg] ++ end ++ msg = 'Booth configuration saved.' ++ $logger.info(msg) ++ return [200, msg] ++end ++ ++def booth_save_files(params, request, auth_user) ++ unless allowed_for_local_cluster(auth_user, Permissions::WRITE) ++ return 403, 'Permission denied' ++ end ++ begin ++ data = JSON.parse(params[:data_json], {:symbolize_names => true}) ++ data.each { |file| ++ unless file[:name] and file[:data] ++ return [400, 'Invalid input data format'] ++ end ++ if file[:name].include?('/') ++ return [400, "Invalid file name format '#{file[:name]}'"] ++ end ++ } ++ rescue JSON::ParserError, NoMethodError ++ return [400, 'Invalid input data format'] ++ end ++ rewrite_existing = ( ++ params.include?('rewrite_existing') || params.include?(:rewrite_existing) ++ ) ++ ++ conflict_files = [] ++ data.each { |file| ++ next unless File.file?(File.join(BOOTH_CONFIG_DIR, file[:name])) ++ if file[:is_authfile] ++ cur_data = read_booth_authfile(file[:name]) ++ else ++ cur_data = read_booth_config(file[:name]) ++ end ++ if cur_data != file[:data] ++ conflict_files << file[:name] ++ end ++ } ++ ++ write_failed = {} ++ saved_files = [] ++ data.each { |file| ++ next if conflict_files.include?(file[:name]) and not rewrite_existing ++ begin ++ if file[:is_authfile] ++ write_booth_authfile(file[:name], file[:data]) ++ else ++ write_booth_config(file[:name], file[:data]) ++ end ++ saved_files << file[:name] ++ rescue => e ++ msg = "Unable to save file (#{file[:name]}): #{e.message}" ++ $logger.error(msg) ++ write_failed[file[:name]] = e ++ end ++ } ++ return [200, JSON.generate({ ++ :existing => conflict_files, ++ :saved => saved_files, ++ :failed => write_failed ++ })] ++end ++ ++def booth_get_config(params, request, auth_user) ++ unless allowed_for_local_cluster(auth_user, Permissions::READ) ++ return 403, 'Permission denied' ++ end ++ name = params[:name] ++ if name ++ config_file_name = "#{name}.conf" ++ else ++ config_file_name = 'booth.conf' ++ end ++ if config_file_name.include?('/') ++ return [400, 'Invalid name of booth configuration'] ++ end ++ begin ++ config_data = read_booth_config(config_file_name) ++ unless config_data ++ return [400, "Config doesn't exist"] ++ end ++ authfile_name = nil ++ authfile_data = nil ++ authfile_path = get_authfile_from_booth_config(config_data) ++ if authfile_path ++ if File.dirname(authfile_path) != BOOTH_CONFIG_DIR ++ return [ ++ 400, "Authfile of specified config is not in '#{BOOTH_CONFIG_DIR}'" ++ ] ++ end ++ authfile_name = File.basename(authfile_path) ++ authfile_data = read_booth_authfile(authfile_name) ++ end ++ return [200, JSON.generate({ ++ :config => { ++ :name => config_file_name, ++ :data => config_data ++ }, ++ :authfile => { ++ :name => authfile_name, ++ :data => authfile_data ++ } ++ })] ++ rescue => e ++ return [400, "Unable to read booth config/key file: #{e.message}"] ++ end ++end +diff --git a/pcsd/settings.rb b/pcsd/settings.rb +index 51f00ac..e702585 100644 +--- a/pcsd/settings.rb ++++ b/pcsd/settings.rb +@@ -20,6 +20,7 @@ PACEMAKERD = "/usr/sbin/pacemakerd" + CIBADMIN = "/usr/sbin/cibadmin" + SBD_CONFIG = '/etc/sysconfig/sbd' + CIB_PATH='/var/lib/pacemaker/cib/cib.xml' ++BOOTH_CONFIG_DIR='/etc/booth' + + COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR = "/etc/corosync/qnetd/nssdb" + COROSYNC_QDEVICE_NET_SERVER_CA_FILE = ( +-- +1.8.3.1 + diff --git a/SOURCES/bz1308514-02-booth-support-improvements.patch b/SOURCES/bz1308514-02-booth-support-improvements.patch new file mode 100644 index 0000000..9cb5f3c --- /dev/null +++ b/SOURCES/bz1308514-02-booth-support-improvements.patch @@ -0,0 +1,1904 @@ +From 798a8ab276fb816c3d9cfa5ba0a8ed55a3ed6cd2 Mon Sep 17 00:00:00 2001 +From: Ivan Devat <idevat@redhat.com> +Date: Mon, 29 Aug 2016 15:14:25 +0200 +Subject: [PATCH] squash bz1308514 Wider support for booth configura + +50fb38db5e26 append a new line at the end of the booth config + +a03b98c0f9e1 add bash completion for booth + +52b97fa9ef32 clean up ip resource if creating booth res. fails + +3d0e698a83fc fix allow force remove multiple booth resources + +1ac88efab2cd refactor booth remove + +6b41c5cc1661 add booth restart command + +706c6f32f172 fix usage: ticket grant/revoke not for arbitrator + +26fa2a241227 complete man (stayed behind usage) + +75f8da852641 modify exchange format of booth config + +ffe6ec7ea8d2 show all booth config lines including unsupported + +8722fb7ede2e add support for options during add booth ticket + +50eb49a4350b fix naming of booth reportes and report codes according to convetions + +6dfb7c82d802 simplify booth config distribution reports + +116a7a311cd7 fix adding node to cluster when booth is not installed + +23abb122e2d1 fix getting a list of existing booth config files + +ebd9fc496e24 display booth config as it is (plain, not parsed) + +0183814dd57a add ability to display booth config from a remote node +--- + pcs/booth.py | 6 +- + pcs/cli/booth/command.py | 72 ++++++++++-------- + pcs/cli/booth/test/test_command.py | 21 ++++-- + pcs/cli/common/lib_wrapper.py | 3 +- + pcs/common/report_codes.py | 9 +-- + pcs/lib/booth/config_exchange.py | 47 ++++-------- + pcs/lib/booth/config_files.py | 15 +++- + pcs/lib/booth/config_parser.py | 3 +- + pcs/lib/booth/config_structure.py | 35 ++++++++- + pcs/lib/booth/reports.py | 97 +++++++++++++----------- + pcs/lib/booth/resource.py | 49 ++++-------- + pcs/lib/booth/sync.py | 12 +-- + pcs/lib/booth/test/test_config_exchange.py | 56 ++++++-------- + pcs/lib/booth/test/test_config_files.py | 32 ++++++-- + pcs/lib/booth/test/test_config_parser.py | 2 + + pcs/lib/booth/test/test_config_structure.py | 66 ++++++++++++++++- + pcs/lib/booth/test/test_resource.py | 111 ++++++++++++---------------- + pcs/lib/booth/test/test_sync.py | 56 +++++++------- + pcs/lib/commands/booth.py | 88 ++++++++++++++-------- + pcs/lib/commands/test/test_booth.py | 50 +++++++------ + pcs/pcs.8 | 11 ++- + pcs/test/test_booth.py | 77 +++++++++++++++++-- + pcs/usage.py | 13 +++- + 23 files changed, 564 insertions(+), 367 deletions(-) + +diff --git a/pcs/booth.py b/pcs/booth.py +index 764dcd8..5ec41bf 100644 +--- a/pcs/booth.py ++++ b/pcs/booth.py +@@ -12,7 +12,7 @@ from pcs import utils + from pcs.cli.booth import command + from pcs.cli.common.errors import CmdLineInputError + from pcs.lib.errors import LibraryError +-from pcs.resource import resource_create, resource_remove ++from pcs.resource import resource_create, resource_remove, resource_restart + + + def booth_cmd(lib, argv, modifiers): +@@ -47,13 +47,15 @@ def booth_cmd(lib, argv, modifiers): + else: + raise CmdLineInputError() + elif sub_cmd == "create": +- command.get_create_in_cluster(resource_create)( ++ command.get_create_in_cluster(resource_create, resource_remove)( + lib, argv_next, modifiers + ) + elif sub_cmd == "remove": + command.get_remove_from_cluster(resource_remove)( + lib, argv_next, modifiers + ) ++ elif sub_cmd == "restart": ++ command.get_restart(resource_restart)(lib, argv_next, modifiers) + elif sub_cmd == "sync": + command.sync(lib, argv_next, modifiers) + elif sub_cmd == "pull": +diff --git a/pcs/cli/booth/command.py b/pcs/cli/booth/command.py +index bea6582..0b71a01 100644 +--- a/pcs/cli/booth/command.py ++++ b/pcs/cli/booth/command.py +@@ -6,7 +6,7 @@ from __future__ import ( + ) + + from pcs.cli.common.errors import CmdLineInputError +-from pcs.cli.common.parse_args import group_by_keywords ++from pcs.cli.common.parse_args import group_by_keywords, prepare_options + + + DEFAULT_BOOTH_NAME = "booth" +@@ -18,15 +18,25 @@ def config_setup(lib, arg_list, modifiers): + """ + create booth config + """ +- booth_configuration = group_by_keywords( ++ peers = group_by_keywords( + arg_list, + set(["sites", "arbitrators"]), + keyword_repeat_allowed=False + ) +- if "sites" not in booth_configuration or not booth_configuration["sites"]: ++ if "sites" not in peers or not peers["sites"]: + raise CmdLineInputError() + +- lib.booth.config_setup(booth_configuration, modifiers["force"]) ++ booth_config = [] ++ for site in peers["sites"]: ++ booth_config.append({"key": "site", "value": site, "details": []}) ++ for arbitrator in peers["arbitrators"]: ++ booth_config.append({ ++ "key": "arbitrator", ++ "value": arbitrator, ++ "details": [], ++ }) ++ ++ lib.booth.config_setup(booth_config, modifiers["force"]) + + def config_destroy(lib, arg_list, modifiers): + """ +@@ -41,36 +51,20 @@ def config_show(lib, arg_list, modifiers): + """ + print booth config + """ +- booth_configuration = lib.booth.config_show() +- authfile_lines = [] +- if booth_configuration["authfile"]: +- authfile_lines.append( +- "authfile = {0}".format(booth_configuration["authfile"]) +- ) ++ if len(arg_list) > 1: ++ raise CmdLineInputError() ++ node = None if not arg_list else arg_list[0] ++ ++ print(lib.booth.config_text(DEFAULT_BOOTH_NAME, node), end="") + +- line_list = ( +- ["site = {0}".format(site) for site in booth_configuration["sites"]] +- + +- [ +- "arbitrator = {0}".format(arbitrator) +- for arbitrator in booth_configuration["arbitrators"] +- ] +- + authfile_lines + +- [ +- 'ticket = "{0}"'.format(ticket) +- for ticket in booth_configuration["tickets"] +- ] +- ) +- for line in line_list: +- print(line) + + def config_ticket_add(lib, arg_list, modifiers): + """ + add ticket to current configuration + """ +- if len(arg_list) != 1: ++ if not arg_list: + raise CmdLineInputError +- lib.booth.config_ticket_add(arg_list[0]) ++ lib.booth.config_ticket_add(arg_list[0], prepare_options(arg_list[1:])) + + def config_ticket_remove(lib, arg_list, modifiers): + """ +@@ -96,7 +90,7 @@ def ticket_revoke(lib, arg_list, modifiers): + def ticket_grant(lib, arg_list, modifiers): + ticket_operation(lib.booth.ticket_grant, arg_list, modifiers) + +-def get_create_in_cluster(resource_create): ++def get_create_in_cluster(resource_create, resource_remove): + #TODO resource_remove is provisional hack until resources are not moved to + #lib + def create_in_cluster(lib, arg_list, modifiers): +@@ -108,6 +102,7 @@ def get_create_in_cluster(resource_create): + __get_name(modifiers), + ip, + resource_create, ++ resource_remove, + ) + return create_in_cluster + +@@ -118,10 +113,28 @@ def get_remove_from_cluster(resource_remove): + if arg_list: + raise CmdLineInputError() + +- lib.booth.remove_from_cluster(__get_name(modifiers), resource_remove) ++ lib.booth.remove_from_cluster( ++ __get_name(modifiers), ++ resource_remove, ++ modifiers["force"], ++ ) + + return remove_from_cluster + ++def get_restart(resource_restart): ++ #TODO resource_restart is provisional hack until resources are not moved to ++ #lib ++ def restart(lib, arg_list, modifiers): ++ if arg_list: ++ raise CmdLineInputError() ++ ++ lib.booth.restart( ++ __get_name(modifiers), ++ resource_restart, ++ modifiers["force"], ++ ) ++ ++ return restart + + def sync(lib, arg_list, modifiers): + if arg_list: +@@ -175,3 +188,4 @@ def status(lib, arg_list, modifiers): + if booth_status.get("status"): + print("DAEMON STATUS:") + print(booth_status["status"]) ++ +diff --git a/pcs/cli/booth/test/test_command.py b/pcs/cli/booth/test/test_command.py +index 00216f2..019a74f 100644 +--- a/pcs/cli/booth/test/test_command.py ++++ b/pcs/cli/booth/test/test_command.py +@@ -28,10 +28,12 @@ class ConfigSetupTest(TestCase): + } + ) + lib.booth.config_setup.assert_called_once_with( +- { +- "sites": ["1.1.1.1", "2.2.2.2", "4.4.4.4"], +- "arbitrators": ["3.3.3.3"], +- }, ++ [ ++ {"key": "site", "value": "1.1.1.1", "details": []}, ++ {"key": "site", "value": "2.2.2.2", "details": []}, ++ {"key": "site", "value": "4.4.4.4", "details": []}, ++ {"key": "arbitrator", "value": "3.3.3.3", "details": []}, ++ ], + False + ) + +@@ -40,5 +42,12 @@ class ConfigTicketAddTest(TestCase): + lib = mock.MagicMock() + lib.booth = mock.MagicMock() + lib.booth.config_ticket_add = mock.MagicMock() +- command.config_ticket_add(lib, arg_list=["TICKET_A"], modifiers={}) +- lib.booth.config_ticket_add.assert_called_once_with("TICKET_A") ++ command.config_ticket_add( ++ lib, ++ arg_list=["TICKET_A", "timeout=10"], ++ modifiers={} ++ ) ++ lib.booth.config_ticket_add.assert_called_once_with( ++ "TICKET_A", ++ {"timeout": "10"}, ++ ) +diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py +index c836575..94a1311 100644 +--- a/pcs/cli/common/lib_wrapper.py ++++ b/pcs/cli/common/lib_wrapper.py +@@ -209,11 +209,12 @@ def load_module(env, middleware_factory, name): + { + "config_setup": booth.config_setup, + "config_destroy": booth.config_destroy, +- "config_show": booth.config_show, ++ "config_text": booth.config_text, + "config_ticket_add": booth.config_ticket_add, + "config_ticket_remove": booth.config_ticket_remove, + "create_in_cluster": booth.create_in_cluster, + "remove_from_cluster": booth.remove_from_cluster, ++ "restart": booth.restart, + "config_sync": booth.config_sync, + "enable": booth.enable_booth, + "disable": booth.disable_booth, +diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py +index 672c2e3..5e46a1f 100644 +--- a/pcs/common/report_codes.py ++++ b/pcs/common/report_codes.py +@@ -29,16 +29,15 @@ BOOTH_ADDRESS_DUPLICATION = "BOOTH_ADDRESS_DUPLICATION" + BOOTH_ALREADY_IN_CIB = "BOOTH_ALREADY_IN_CIB" + BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP = "BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP" + BOOTH_CANNOT_IDENTIFY_KEYFILE = "BOOTH_CANNOT_IDENTIFY_KEYFILE" ++BOOTH_CONFIG_ACCEPTED_BY_NODE = "BOOTH_CONFIG_ACCEPTED_BY_NODE" ++BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR = "BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR" ++BOOTH_CONFIG_DISTRIBUTION_STARTED = "BOOTH_CONFIG_DISTRIBUTION_STARTED" + BOOTH_CONFIG_FILE_ALREADY_EXISTS = "BOOTH_CONFIG_FILE_ALREADY_EXISTS" + BOOTH_CONFIG_IO_ERROR = "BOOTH_CONFIG_IO_ERROR" + BOOTH_CONFIG_IS_USED = "BOOTH_CONFIG_IS_USED" + BOOTH_CONFIG_READ_ERROR = "BOOTH_CONFIG_READ_ERROR" +-BOOTH_CONFIG_WRITE_ERROR = "BOOTH_CONFIG_WRITE_ERROR" + BOOTH_CONFIG_UNEXPECTED_LINES = "BOOTH_CONFIG_UNEXPECTED_LINES" +-BOOTH_CONFIGS_SAVED_ON_NODE = "BOOTH_CONFIGS_SAVED_ON_NODE" +-BOOTH_CONFIGS_SAVING_ON_NODE = "BOOTH_CONFIGS_SAVING_ON_NODE" + BOOTH_DAEMON_STATUS_ERROR = "BOOTH_DAEMON_STATUS_ERROR" +-BOOTH_DISTRIBUTING_CONFIG = "BOOTH_DISTRIBUTING_CONFIG" + BOOTH_EVEN_PEERS_NUM = "BOOTH_EVEN_PEERS_NUM" + BOOTH_FETCHING_CONFIG_FROM_NODE = "BOOTH_FETCHING_CONFIG_FROM_NODE" + BOOTH_INVALID_CONFIG_NAME = "BOOTH_INVALID_CONFIG_NAME" +@@ -50,8 +49,8 @@ BOOTH_PEERS_STATUS_ERROR = "BOOTH_PEERS_STATUS_ERROR" + BOOTH_SKIPPING_CONFIG = "BOOTH_SKIPPING_CONFIG" + BOOTH_TICKET_DOES_NOT_EXIST = "BOOTH_TICKET_DOES_NOT_EXIST" + BOOTH_TICKET_DUPLICATE = "BOOTH_TICKET_DUPLICATE" +-BOOTH_TICKET_OPERATION_FAILED = "BOOTH_TICKET_OPERATION_FAILED" + BOOTH_TICKET_NAME_INVALID = "BOOTH_TICKET_NAME_INVALID" ++BOOTH_TICKET_OPERATION_FAILED = "BOOTH_TICKET_OPERATION_FAILED" + BOOTH_TICKET_STATUS_ERROR = "BOOTH_TICKET_STATUS_ERROR" + BOOTH_UNSUPORTED_FILE_LOCATION = "BOOTH_UNSUPORTED_FILE_LOCATION" + CIB_ALERT_NOT_FOUND = "CIB_ALERT_NOT_FOUND" +diff --git a/pcs/lib/booth/config_exchange.py b/pcs/lib/booth/config_exchange.py +index e0569ba..377af1d 100644 +--- a/pcs/lib/booth/config_exchange.py ++++ b/pcs/lib/booth/config_exchange.py +@@ -6,38 +6,23 @@ from __future__ import ( + ) + from pcs.lib.booth.config_structure import ConfigItem + +-EXCHANGE_PRIMITIVES = ["authfile"] +-EXCHANGE_LISTS = [ +- ("site", "sites"), +- ("arbitrator", "arbitrators"), +- ("ticket", "tickets"), +-] +- +- + def to_exchange_format(booth_configuration): +- exchange_lists = dict(EXCHANGE_LISTS) +- exchange = dict( +- (exchange_key, []) for exchange_key in exchange_lists.values() +- ) +- +- for key, value, _ in booth_configuration: +- if key in exchange_lists: +- exchange[exchange_lists[key]].append(value) +- if key in EXCHANGE_PRIMITIVES: +- exchange[key] = value +- +- return exchange ++ return [ ++ { ++ "key": item.key, ++ "value": item.value, ++ "details": to_exchange_format(item.details), ++ } ++ for item in booth_configuration ++ ] + + + def from_exchange_format(exchange_format): +- booth_config = [] +- for key in EXCHANGE_PRIMITIVES: +- if key in exchange_format: +- booth_config.append(ConfigItem(key, exchange_format[key])) +- +- for key, exchange_key in EXCHANGE_LISTS: +- booth_config.extend([ +- ConfigItem(key, value) +- for value in exchange_format.get(exchange_key, []) +- ]) +- return booth_config ++ return [ ++ ConfigItem( ++ item["key"], ++ item["value"], ++ from_exchange_format(item["details"]), ++ ) ++ for item in exchange_format ++ ] +diff --git a/pcs/lib/booth/config_files.py b/pcs/lib/booth/config_files.py +index aaad951..7b91379 100644 +--- a/pcs/lib/booth/config_files.py ++++ b/pcs/lib/booth/config_files.py +@@ -24,10 +24,17 @@ def get_all_configs_file_names(): + Returns list of all file names ending with '.conf' in booth configuration + directory. + """ ++ if not os.path.isdir(BOOTH_CONFIG_DIR): ++ return [] + return [ +- file_name for file_name in os.listdir(BOOTH_CONFIG_DIR) +- if os.path.isfile(file_name) and file_name.endswith(".conf") and +- len(file_name) > len(".conf") ++ file_name ++ for file_name in os.listdir(BOOTH_CONFIG_DIR) ++ if ++ file_name.endswith(".conf") ++ and ++ len(file_name) > len(".conf") ++ and ++ os.path.isfile(os.path.join(BOOTH_CONFIG_DIR, file_name)) + ] + + +@@ -55,7 +62,7 @@ def read_configs(reporter, skip_wrong_config=False): + try: + output[file_name] = _read_config(file_name) + except EnvironmentError: +- report_list.append(reports.booth_config_unable_to_read( ++ report_list.append(reports.booth_config_read_error( + file_name, + ( + ReportItemSeverity.WARNING if skip_wrong_config +diff --git a/pcs/lib/booth/config_parser.py b/pcs/lib/booth/config_parser.py +index 62d2203..bdc79fd 100644 +--- a/pcs/lib/booth/config_parser.py ++++ b/pcs/lib/booth/config_parser.py +@@ -23,7 +23,8 @@ def parse(content): + ) + + def build(config_line_list): +- return "\n".join(build_to_lines(config_line_list)) ++ newline = [""] ++ return "\n".join(build_to_lines(config_line_list) + newline) + + def build_to_lines(config_line_list, deep=0): + line_list = [] +diff --git a/pcs/lib/booth/config_structure.py b/pcs/lib/booth/config_structure.py +index c92f718..8977b7a 100644 +--- a/pcs/lib/booth/config_structure.py ++++ b/pcs/lib/booth/config_structure.py +@@ -7,6 +7,7 @@ from __future__ import ( + + import re + ++import pcs.lib.reports as common_reports + from pcs.lib.booth import reports + from pcs.lib.errors import LibraryError + from collections import namedtuple +@@ -66,6 +67,15 @@ def validate_peers(site_list, arbitrator_list): + if report: + raise LibraryError(*report) + ++def take_peers(booth_configuration): ++ return ( ++ pick_list_by_key(booth_configuration, "site"), ++ pick_list_by_key(booth_configuration, "arbitrator"), ++ ) ++ ++def pick_list_by_key(booth_configuration, key): ++ return [item.value for item in booth_configuration if item.key == key] ++ + def remove_ticket(booth_configuration, ticket_name): + validate_ticket_exists(booth_configuration, ticket_name) + return [ +@@ -73,11 +83,14 @@ def remove_ticket(booth_configuration, ticket_name): + if config_item.key != "ticket" or config_item.value != ticket_name + ] + +-def add_ticket(booth_configuration, ticket_name): ++def add_ticket(booth_configuration, ticket_name, options): + validate_ticket_name(ticket_name) + validate_ticket_unique(booth_configuration, ticket_name) ++ validate_ticket_options(options) + return booth_configuration + [ +- ConfigItem("ticket", ticket_name) ++ ConfigItem("ticket", ticket_name, [ ++ ConfigItem(key, value) for key, value in options.items() ++ ]) + ] + + def validate_ticket_exists(booth_configuration, ticket_name): +@@ -88,6 +101,24 @@ def validate_ticket_unique(booth_configuration, ticket_name): + if ticket_exists(booth_configuration, ticket_name): + raise LibraryError(reports.booth_ticket_duplicate(ticket_name)) + ++def validate_ticket_options(options): ++ reports = [] ++ for key in sorted(options): ++ if key in GLOBAL_KEYS: ++ reports.append( ++ common_reports.invalid_option(key, TICKET_KEYS, "booth ticket") ++ ) ++ ++ if not options[key].strip(): ++ reports.append(common_reports.invalid_option_value( ++ key, ++ options[key], ++ "no-empty", ++ )) ++ ++ if reports: ++ raise LibraryError(*reports) ++ + def ticket_exists(booth_configuration, ticket_name): + return any( + value for key, value, _ in booth_configuration +diff --git a/pcs/lib/booth/reports.py b/pcs/lib/booth/reports.py +index 8a804e0..6aa9d3d 100644 +--- a/pcs/lib/booth/reports.py ++++ b/pcs/lib/booth/reports.py +@@ -197,22 +197,17 @@ def booth_multiple_times_in_cib( + ) + + +-def booth_distributing_config(name=None): ++def booth_config_distribution_started(): + """ +- Sending booth config to all nodes in cluster. +- +- name -- name of booth instance ++ booth configuration is about to be sent to nodes + """ + return ReportItem.info( +- report_codes.BOOTH_DISTRIBUTING_CONFIG, +- "Sending booth config{0} to all cluster nodes.".format( +- " ({name})" if name and name != "booth" else "" +- ), +- info={"name": name} ++ report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED, ++ "Sending booth configuration to cluster nodes..." + ) + + +-def booth_config_saved(node=None, name_list=None): ++def booth_config_accepted_by_node(node=None, name_list=None): + """ + Booth config has been saved on specified node. + +@@ -229,7 +224,7 @@ def booth_config_saved(node=None, name_list=None): + msg = "Booth config saved." + name = None + return ReportItem.info( +- report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, + msg if node is None else "{node}: " + msg, + info={ + "node": node, +@@ -239,30 +234,7 @@ def booth_config_saved(node=None, name_list=None): + ) + + +-def booth_config_unable_to_read( +- name, severity=ReportItemSeverity.ERROR, forceable=None +-): +- """ +- Unable to read from specified booth instance config. +- +- name -- name of booth instance +- severity -- severity of report item +- forceable -- is this report item forceable? by what category? +- """ +- if name and name != "booth": +- msg = "Unable to read booth config ({name})." +- else: +- msg = "Unable to read booth config." +- return ReportItem( +- report_codes.BOOTH_CONFIG_READ_ERROR, +- severity, +- msg, +- info={"name": name}, +- forceable=forceable +- ) +- +- +-def booth_config_not_saved(node, reason, name=None): ++def booth_config_distribution_node_error(node, reason, name=None): + """ + Saving booth config failed on specified node. + +@@ -275,7 +247,7 @@ def booth_config_not_saved(node, reason, name=None): + else: + msg = "Unable to save booth config on node '{node}': {reason}" + return ReportItem.error( +- report_codes.BOOTH_CONFIG_WRITE_ERROR, ++ report_codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR, + msg, + info={ + "node": node, +@@ -285,20 +257,36 @@ def booth_config_not_saved(node, reason, name=None): + ) + + +-def booth_sending_local_configs_to_node(node): ++def booth_config_read_error( ++ name, severity=ReportItemSeverity.ERROR, forceable=None ++): + """ +- Sending all local booth configs to node ++ Unable to read from specified booth instance config. + +- node -- node name ++ name -- name of booth instance ++ severity -- severity of report item ++ forceable -- is this report item forceable? by what category? + """ +- return ReportItem.info( +- report_codes.BOOTH_CONFIGS_SAVING_ON_NODE, +- "{node}: Saving booth config(s)...", +- info={"node": node} ++ if name and name != "booth": ++ msg = "Unable to read booth config ({name})." ++ else: ++ msg = "Unable to read booth config." ++ return ReportItem( ++ report_codes.BOOTH_CONFIG_READ_ERROR, ++ severity, ++ msg, ++ info={"name": name}, ++ forceable=forceable + ) + + +-def booth_fetching_config_from_node(node, config=None): ++def booth_fetching_config_from_node_started(node, config=None): ++ """ ++ fetching of booth config from specified node started ++ ++ node -- node from which config is fetching ++ config -- config name ++ """ + if config or config == 'booth': + msg = "Fetching booth config from node '{node}'..." + else: +@@ -314,6 +302,12 @@ def booth_fetching_config_from_node(node, config=None): + + + def booth_unsupported_file_location(file): ++ """ ++ location of booth configuration file (config, authfile) file is not ++ supported (not in /etc/booth/) ++ ++ file -- file path ++ """ + return ReportItem.warning( + report_codes.BOOTH_UNSUPORTED_FILE_LOCATION, + "skipping file {file}: unsupported file location", +@@ -322,6 +316,11 @@ def booth_unsupported_file_location(file): + + + def booth_daemon_status_error(reason): ++ """ ++ Unable to get status of booth daemon because of error. ++ ++ reason -- reason ++ """ + return ReportItem.error( + report_codes.BOOTH_DAEMON_STATUS_ERROR, + "unable to get status of booth daemon: {reason}", +@@ -330,6 +329,11 @@ def booth_daemon_status_error(reason): + + + def booth_tickets_status_error(reason=None): ++ """ ++ Unable to get status of booth tickets because of error. ++ ++ reason -- reason ++ """ + return ReportItem.error( + report_codes.BOOTH_TICKET_STATUS_ERROR, + "unable to get status of booth tickets", +@@ -340,6 +344,11 @@ def booth_tickets_status_error(reason=None): + + + def booth_peers_status_error(reason=None): ++ """ ++ Unable to get status of booth peers because of error. ++ ++ reason -- reason ++ """ + return ReportItem.error( + report_codes.BOOTH_PEERS_STATUS_ERROR, + "unable to get status of booth peers", +diff --git a/pcs/lib/booth/resource.py b/pcs/lib/booth/resource.py +index e793713..a4b7b1e 100644 +--- a/pcs/lib/booth/resource.py ++++ b/pcs/lib/booth/resource.py +@@ -8,18 +8,12 @@ from __future__ import ( + from pcs.lib.cib.tools import find_unique_id + + +-class BoothNotFoundInCib(Exception): +- pass +- +-class BoothMultipleOccurenceFoundInCib(Exception): +- pass +- + def create_resource_id(resources_section, name, suffix): + return find_unique_id( + resources_section.getroottree(), "booth-{0}-{1}".format(name, suffix) + ) + +-def get_creator(resource_create): ++def get_creator(resource_create, resource_remove=None): + #TODO resource_create is provisional hack until resources are not moved to + #lib + def create_booth_in_cluster(ip, booth_config_file_path, create_id): +@@ -36,15 +30,18 @@ def get_creator(resource_create): + clone_opts=[], + group=group_id, + ) +- resource_create( +- ra_id=booth_id, +- ra_type="ocf:pacemaker:booth-site", +- ra_values=["config={0}".format(booth_config_file_path)], +- op_values=[], +- meta_values=[], +- clone_opts=[], +- group=group_id, +- ) ++ try: ++ resource_create( ++ ra_id=booth_id, ++ ra_type="ocf:pacemaker:booth-site", ++ ra_values=["config={0}".format(booth_config_file_path)], ++ op_values=[], ++ meta_values=[], ++ clone_opts=[], ++ group=group_id, ++ ) ++ except SystemExit: ++ resource_remove(ip_id) + return create_booth_in_cluster + + def is_ip_resource(resource_element): +@@ -64,28 +61,12 @@ def find_grouped_ip_element_to_remove(booth_element): + return None + + def get_remover(resource_remove): +- def remove_from_cluster( +- resources_section, booth_config_file_path, remove_multiple=False +- ): +- element_list = find_for_config( +- resources_section, +- booth_config_file_path +- ) +- if not element_list: +- raise BoothNotFoundInCib() +- +- if len(element_list) > 1 and not remove_multiple: +- raise BoothMultipleOccurenceFoundInCib() +- +- number_of_removed_booth_elements = 0 +- for element in element_list: ++ def remove_from_cluster(booth_element_list): ++ for element in booth_element_list: + ip_resource_to_remove = find_grouped_ip_element_to_remove(element) + if ip_resource_to_remove is not None: + resource_remove(ip_resource_to_remove.attrib["id"]) + resource_remove(element.attrib["id"]) +- number_of_removed_booth_elements += 1 +- +- return number_of_removed_booth_elements + + return remove_from_cluster + +diff --git a/pcs/lib/booth/sync.py b/pcs/lib/booth/sync.py +index c9bc30b..374b96d 100644 +--- a/pcs/lib/booth/sync.py ++++ b/pcs/lib/booth/sync.py +@@ -57,7 +57,7 @@ def _set_config_on_node( + "remote/booth_set_config", + NodeCommunicator.format_data_dict([("data_json", json.dumps(data))]) + ) +- reporter.process(reports.booth_config_saved(node.label, [name])) ++ reporter.process(reports.booth_config_accepted_by_node(node.label, [name])) + + + def send_config_to_all_nodes( +@@ -77,7 +77,7 @@ def send_config_to_all_nodes( + authfile_data -- content of authfile as bytes + skip_offline -- if True offline nodes will be skipped + """ +- reporter.process(reports.booth_distributing_config(name)) ++ reporter.process(reports.booth_config_distribution_started()) + parallel_nodes_communication_helper( + _set_config_on_node, + [ +@@ -115,6 +115,9 @@ def send_all_config_to_node( + config_dict = booth_conf.read_configs(reporter, skip_wrong_config) + if not config_dict: + return ++ ++ reporter.process(reports.booth_config_distribution_started()) ++ + file_list = [] + for config, config_data in sorted(config_dict.items()): + try: +@@ -145,7 +148,6 @@ def send_all_config_to_node( + if rewrite_existing: + data.append(("rewrite_existing", "1")) + +- reporter.process(reports.booth_sending_local_configs_to_node(node.label)) + try: + response = json.loads(communicator.call_node( + node, +@@ -165,12 +167,12 @@ def send_all_config_to_node( + node.label + )) + for file, reason in response["failed"].items(): +- report_list.append(reports.booth_config_not_saved( ++ report_list.append(reports.booth_config_distribution_node_error( + node.label, reason, file + )) + reporter.process_list(report_list) + reporter.process( +- reports.booth_config_saved(node.label, response["saved"]) ++ reports.booth_config_accepted_by_node(node.label, response["saved"]) + ) + except NodeCommunicationException as e: + raise LibraryError(node_communicator_exception_to_report_item(e)) +diff --git a/pcs/lib/booth/test/test_config_exchange.py b/pcs/lib/booth/test/test_config_exchange.py +index a9a40ce..eb1885c 100644 +--- a/pcs/lib/booth/test/test_config_exchange.py ++++ b/pcs/lib/booth/test/test_config_exchange.py +@@ -17,47 +17,35 @@ class FromExchangeFormatTest(TestCase): + config_structure.ConfigItem("site", "2.2.2.2"), + config_structure.ConfigItem("arbitrator", "3.3.3.3"), + config_structure.ConfigItem("ticket", "TA"), +- config_structure.ConfigItem("ticket", "TB"), ++ config_structure.ConfigItem("ticket", "TB", [ ++ config_structure.ConfigItem("expire", "10") ++ ]), + ], +- config_exchange.from_exchange_format( +- { +- "sites": ["1.1.1.1", "2.2.2.2"], +- "arbitrators": ["3.3.3.3"], +- "tickets": ["TA", "TB"], +- "authfile": "/path/to/auth.file", +- }, +- ) ++ config_exchange.from_exchange_format([ ++ {"key": "authfile","value": "/path/to/auth.file","details": []}, ++ {"key": "site", "value": "1.1.1.1", "details": []}, ++ {"key": "site", "value": "2.2.2.2", "details": []}, ++ {"key": "arbitrator", "value": "3.3.3.3", "details": []}, ++ {"key": "ticket", "value": "TA", "details": []}, ++ {"key": "ticket", "value": "TB", "details": [ ++ {"key": "expire", "value": "10", "details": []} ++ ]}, ++ ]) + ) + + + class GetExchenageFormatTest(TestCase): + def test_convert_parsed_config_to_exchange_format(self): + self.assertEqual( +- { +- "sites": ["1.1.1.1", "2.2.2.2"], +- "arbitrators": ["3.3.3.3"], +- "tickets": ["TA", "TB"], +- "authfile": "/path/to/auth.file", +- }, +- config_exchange.to_exchange_format([ +- config_structure.ConfigItem("site", "1.1.1.1"), +- config_structure.ConfigItem("site", "2.2.2.2"), +- config_structure.ConfigItem("arbitrator", "3.3.3.3"), +- config_structure.ConfigItem("authfile", "/path/to/auth.file"), +- config_structure.ConfigItem("ticket", "TA"), +- config_structure.ConfigItem("ticket", "TB", [ +- config_structure.ConfigItem("timeout", "10") +- ]), +- ]) +- ) +- +- def test_convert_parsed_config_to_exchange_format_without_authfile(self): +- self.assertEqual( +- { +- "sites": ["1.1.1.1", "2.2.2.2"], +- "arbitrators": ["3.3.3.3"], +- "tickets": ["TA", "TB"], +- }, ++ [ ++ {"key": "site", "value": "1.1.1.1", "details": []}, ++ {"key": "site", "value": "2.2.2.2", "details": []}, ++ {"key": "arbitrator", "value": "3.3.3.3", "details": []}, ++ {"key": "ticket", "value": "TA", "details": []}, ++ {"key": "ticket", "value": "TB", "details": [ ++ {"key": "timeout", "value": "10", "details": []} ++ ]}, ++ ], + config_exchange.to_exchange_format([ + config_structure.ConfigItem("site", "1.1.1.1"), + config_structure.ConfigItem("site", "2.2.2.2"), +diff --git a/pcs/lib/booth/test/test_config_files.py b/pcs/lib/booth/test/test_config_files.py +index 2d4c3ea..8266cac 100644 +--- a/pcs/lib/booth/test/test_config_files.py ++++ b/pcs/lib/booth/test/test_config_files.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from os.path import join ++import os.path + from unittest import TestCase + + from pcs.common import report_codes, env_file_role_codes as file_roles +@@ -21,20 +21,38 @@ def patch_config_files(target, *args, **kwargs): + "pcs.lib.booth.config_files.{0}".format(target), *args, **kwargs + ) + ++@mock.patch("os.path.isdir") + @mock.patch("os.listdir") + @mock.patch("os.path.isfile") + class GetAllConfigsFileNamesTest(TestCase): +- def test_success(self, mock_is_file, mock_listdir): ++ def test_booth_config_dir_is_no_dir( ++ self, mock_is_file, mock_listdir, mock_isdir ++ ): ++ mock_isdir.return_value = False ++ self.assertEqual([], config_files.get_all_configs_file_names()) ++ mock_isdir.assert_called_once_with(BOOTH_CONFIG_DIR) ++ self.assertEqual(0, mock_is_file.call_count) ++ self.assertEqual(0, mock_listdir.call_count) ++ ++ def test_success(self, mock_is_file, mock_listdir, mock_isdir): + def mock_is_file_fn(file_name): +- if file_name in ["dir.cong", "dir"]: ++ if file_name in [ ++ os.path.join(BOOTH_CONFIG_DIR, name) ++ for name in ("dir.cong", "dir") ++ ]: + return False + elif file_name in [ +- "name1", "name2.conf", "name.conf.conf", ".conf", "name3.conf" ++ os.path.join(BOOTH_CONFIG_DIR, name) ++ for name in ( ++ "name1", "name2.conf", "name.conf.conf", ".conf", ++ "name3.conf" ++ ) + ]: + return True + else: + raise AssertionError("unexpected input") + ++ mock_isdir.return_value = True + mock_is_file.side_effect = mock_is_file_fn + mock_listdir.return_value = [ + "name1", "name2.conf", "name.conf.conf", ".conf", "name3.conf", +@@ -59,7 +77,7 @@ class ReadConfigTest(TestCase): + + self.assertEqual( + [ +- mock.call(join(BOOTH_CONFIG_DIR, "my-file.conf"), "r"), ++ mock.call(os.path.join(BOOTH_CONFIG_DIR, "my-file.conf"), "r"), + mock.call().__enter__(), + mock.call().read(), + mock.call().__exit__(None, None, None) +@@ -193,7 +211,7 @@ class ReadAuthfileTest(TestCase): + self.maxDiff = None + + def test_success(self): +- path = join(BOOTH_CONFIG_DIR, "file.key") ++ path = os.path.join(BOOTH_CONFIG_DIR, "file.key") + mock_open = mock.mock_open(read_data="key") + + with patch_config_files("open", mock_open, create=True): +@@ -248,7 +266,7 @@ class ReadAuthfileTest(TestCase): + + @patch_config_files("format_environment_error", return_value="reason") + def test_read_failure(self, _): +- path = join(BOOTH_CONFIG_DIR, "file.key") ++ path = os.path.join(BOOTH_CONFIG_DIR, "file.key") + mock_open = mock.mock_open() + mock_open().read.side_effect = EnvironmentError() + +diff --git a/pcs/lib/booth/test/test_config_parser.py b/pcs/lib/booth/test/test_config_parser.py +index 684fc79..c04f451 100644 +--- a/pcs/lib/booth/test/test_config_parser.py ++++ b/pcs/lib/booth/test/test_config_parser.py +@@ -24,6 +24,7 @@ class BuildTest(TestCase): + 'ticket = "TA"', + 'ticket = "TB"', + " timeout = 10", ++ "", #newline at the end + ]), + config_parser.build([ + ConfigItem("authfile", "/path/to/auth.file"), +@@ -105,6 +106,7 @@ class ParseRawLinesTest(TestCase): + "arbitrator=3.3.3.3", + "syntactically_correct = nonsense", + "line-with = hash#literal", ++ "", + ])) + ) + +diff --git a/pcs/lib/booth/test/test_config_structure.py b/pcs/lib/booth/test/test_config_structure.py +index 27faca5..1dd07cb 100644 +--- a/pcs/lib/booth/test/test_config_structure.py ++++ b/pcs/lib/booth/test/test_config_structure.py +@@ -47,6 +47,46 @@ class ValidateTicketUniqueTest(TestCase): + def test_do_not_raises_when_no_duplicated_ticket(self): + config_structure.validate_ticket_unique([], "A") + ++class ValidateTicketOptionsTest(TestCase): ++ def test_raises_on_invalid_options(self): ++ assert_raise_library_error( ++ lambda: config_structure.validate_ticket_options({ ++ "site": "a", ++ "port": "b", ++ "timeout": " ", ++ }), ++ ( ++ severities.ERROR, ++ report_codes.INVALID_OPTION, ++ { ++ "option_name": "site", ++ "option_type": "booth ticket", ++ "allowed": list(config_structure.TICKET_KEYS), ++ }, ++ ), ++ ( ++ severities.ERROR, ++ report_codes.INVALID_OPTION, ++ { ++ "option_name": "port", ++ "option_type": "booth ticket", ++ "allowed": list(config_structure.TICKET_KEYS), ++ }, ++ ), ++ ( ++ severities.ERROR, ++ report_codes.INVALID_OPTION_VALUE, ++ { ++ "option_name": "timeout", ++ "option_value": " ", ++ "allowed_values": "no-empty", ++ }, ++ ), ++ ) ++ ++ def test_success_on_valid_options(self): ++ config_structure.validate_ticket_options({"timeout": "10"}) ++ + class TicketExistsTest(TestCase): + def test_returns_true_if_ticket_in_structure(self): + self.assertTrue(config_structure.ticket_exists( +@@ -183,10 +223,14 @@ class AddTicketTest(TestCase): + config_structure.ConfigItem("ticket", "some-ticket"), + ] + self.assertEqual( +- config_structure.add_ticket(configuration, "new-ticket"), ++ config_structure.add_ticket(configuration, "new-ticket", { ++ "timeout": "10", ++ }), + [ + config_structure.ConfigItem("ticket", "some-ticket"), +- config_structure.ConfigItem("ticket", "new-ticket"), ++ config_structure.ConfigItem("ticket", "new-ticket", [ ++ config_structure.ConfigItem("timeout", "10"), ++ ]), + ], + ) + +@@ -222,3 +266,21 @@ class SetAuthfileTest(TestCase): + "/path/to/auth.file" + ) + ) ++ ++class TakePeersTest(TestCase): ++ def test_returns_site_list_and_arbitrators_list(self): ++ self.assertEqual( ++ ( ++ ["1.1.1.1", "2.2.2.2", "3.3.3.3"], ++ ["4.4.4.4", "5.5.5.5"] ++ ), ++ config_structure.take_peers( ++ [ ++ config_structure.ConfigItem("site", "1.1.1.1"), ++ config_structure.ConfigItem("site", "2.2.2.2"), ++ config_structure.ConfigItem("site", "3.3.3.3"), ++ config_structure.ConfigItem("arbitrator", "4.4.4.4"), ++ config_structure.ConfigItem("arbitrator", "5.5.5.5"), ++ ], ++ ) ++ ) +diff --git a/pcs/lib/booth/test/test_resource.py b/pcs/lib/booth/test/test_resource.py +index 440ddde..dd72c1e 100644 +--- a/pcs/lib/booth/test/test_resource.py ++++ b/pcs/lib/booth/test/test_resource.py +@@ -11,6 +11,7 @@ from lxml import etree + + import pcs.lib.booth.resource as booth_resource + from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.misc import get_test_resource as rc + + + def fixture_resources_with_booth(booth_config_file_path): +@@ -85,73 +86,24 @@ class FindBoothResourceElementsTest(TestCase): + ) + + class RemoveFromClusterTest(TestCase): +- def call(self, resources_section, remove_multiple=False): ++ def call(self, element_list): + mock_resource_remove = mock.Mock() +- num_of_removed_booth_resources = booth_resource.get_remover( +- mock_resource_remove +- )( +- resources_section, +- "/PATH/TO/CONF", +- remove_multiple, +- ) +- return ( +- mock_resource_remove, +- num_of_removed_booth_resources +- ) +- +- def fixture_resources_including_two_booths(self): +- resources_section = etree.fromstring('<resources/>') +- first = fixture_booth_element("first", "/PATH/TO/CONF") +- second = fixture_booth_element("second", "/PATH/TO/CONF") +- resources_section.append(first) +- resources_section.append(second) +- return resources_section +- +- def test_raises_when_booth_resource_not_found(self): +- self.assertRaises( +- booth_resource.BoothNotFoundInCib, +- lambda: self.call(etree.fromstring('<resources/>')), +- ) +- +- def test_raises_when_more_booth_resources_found(self): +- resources_section = self.fixture_resources_including_two_booths() +- self.assertRaises( +- booth_resource.BoothMultipleOccurenceFoundInCib, +- lambda: self.call(resources_section), +- ) +- +- def test_returns_number_of_removed_elements(self): +- resources_section = self.fixture_resources_including_two_booths() +- mock_resource_remove, num_of_removed_booth_resources = self.call( +- resources_section, +- remove_multiple=True +- ) +- self.assertEqual(num_of_removed_booth_resources, 2) +- self.assertEqual( +- mock_resource_remove.mock_calls, [ +- mock.call('first'), +- mock.call('second'), +- ] +- ) ++ booth_resource.get_remover(mock_resource_remove)(element_list) ++ return mock_resource_remove + + def test_remove_ip_when_is_only_booth_sibling_in_group(self): +- resources_section = etree.fromstring(''' +- <resources> +- <group> +- <primitive id="ip" type="IPaddr2"/> +- <primitive id="booth" type="booth-site"> +- <instance_attributes> +- <nvpair name="config" value="/PATH/TO/CONF"/> +- </instance_attributes> +- </primitive> +- </group> +- </resources> ++ group = etree.fromstring(''' ++ <group> ++ <primitive id="ip" type="IPaddr2"/> ++ <primitive id="booth" type="booth-site"> ++ <instance_attributes> ++ <nvpair name="config" value="/PATH/TO/CONF"/> ++ </instance_attributes> ++ </primitive> ++ </group> + ''') + +- mock_resource_remove, _ = self.call( +- resources_section, +- remove_multiple=True +- ) ++ mock_resource_remove = self.call(group.getchildren()[1:]) + self.assertEqual( + mock_resource_remove.mock_calls, [ + mock.call('ip'), +@@ -159,6 +111,41 @@ class RemoveFromClusterTest(TestCase): + ] + ) + ++class CreateInClusterTest(TestCase): ++ def test_remove_ip_when_booth_resource_add_failed(self): ++ mock_resource_create = mock.Mock(side_effect=[None, SystemExit(1)]) ++ mock_resource_remove = mock.Mock() ++ mock_create_id = mock.Mock(side_effect=["ip_id","booth_id","group_id"]) ++ ip = "1.2.3.4" ++ booth_config_file_path = rc("/path/to/booth.conf") ++ ++ booth_resource.get_creator(mock_resource_create, mock_resource_remove)( ++ ip, ++ booth_config_file_path, ++ mock_create_id ++ ) ++ self.assertEqual(mock_resource_create.mock_calls, [ ++ mock.call( ++ clone_opts=[], ++ group=u'group_id', ++ meta_values=[], ++ op_values=[], ++ ra_id=u'ip_id', ++ ra_type=u'ocf:heartbeat:IPaddr2', ++ ra_values=[u'ip=1.2.3.4'], ++ ), ++ mock.call( ++ clone_opts=[], ++ group='group_id', ++ meta_values=[], ++ op_values=[], ++ ra_id='booth_id', ++ ra_type='ocf:pacemaker:booth-site', ++ ra_values=['config=/path/to/booth.conf'], ++ ) ++ ]) ++ mock_resource_remove.assert_called_once_with("ip_id") ++ + + class FindBindedIpTest(TestCase): + def fixture_resource_section(self, ip_element_list): +diff --git a/pcs/lib/booth/test/test_sync.py b/pcs/lib/booth/test/test_sync.py +index 58500cc..9ba6e80 100644 +--- a/pcs/lib/booth/test/test_sync.py ++++ b/pcs/lib/booth/test/test_sync.py +@@ -74,7 +74,7 @@ class SetConfigOnNodeTest(TestCase): + self.mock_rep.report_item_list, + [( + Severities.INFO, +- report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, + { + "node": self.node.label, + "name": "cfg_name", +@@ -104,7 +104,7 @@ class SetConfigOnNodeTest(TestCase): + self.mock_rep.report_item_list, + [( + Severities.INFO, +- report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, + { + "node": self.node.label, + "name": "cfg_name", +@@ -175,8 +175,8 @@ class SyncConfigInCluster(TestCase): + self.mock_reporter.report_item_list, + [( + Severities.INFO, +- report_codes.BOOTH_DISTRIBUTING_CONFIG, +- {"name": "cfg_name"} ++ report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED, ++ {} + )] + ) + +@@ -213,8 +213,8 @@ class SyncConfigInCluster(TestCase): + self.mock_reporter.report_item_list, + [( + Severities.INFO, +- report_codes.BOOTH_DISTRIBUTING_CONFIG, +- {"name": "cfg_name"} ++ report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED, ++ {} + )] + ) + +@@ -252,8 +252,8 @@ class SyncConfigInCluster(TestCase): + self.mock_reporter.report_item_list, + [( + Severities.INFO, +- report_codes.BOOTH_DISTRIBUTING_CONFIG, +- {"name": "cfg_name"} ++ report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED, ++ {} + )] + ) + +@@ -375,12 +375,12 @@ class SendAllConfigToNodeTest(TestCase): + [ + ( + Severities.INFO, +- report_codes.BOOTH_CONFIGS_SAVING_ON_NODE, +- {"node": self.node.label} ++ report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED, ++ {} + ), + ( + Severities.INFO, +- report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, + { + "node": self.node.label, + "name": "name1.conf, file1.key, name2.conf, file2.key", +@@ -489,8 +489,8 @@ class SendAllConfigToNodeTest(TestCase): + [ + ( + Severities.INFO, +- report_codes.BOOTH_CONFIGS_SAVING_ON_NODE, +- {"node": self.node.label} ++ report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED, ++ {} + ), + ( + Severities.ERROR, +@@ -593,8 +593,8 @@ class SendAllConfigToNodeTest(TestCase): + [ + ( + Severities.INFO, +- report_codes.BOOTH_CONFIGS_SAVING_ON_NODE, +- {"node": self.node.label} ++ report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED, ++ {} + ), + ( + Severities.WARNING, +@@ -616,7 +616,7 @@ class SendAllConfigToNodeTest(TestCase): + ), + ( + Severities.INFO, +- report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, + { + "node": self.node.label, + "name": "name2.conf, file2.key", +@@ -652,7 +652,7 @@ class SendAllConfigToNodeTest(TestCase): + ), + ( + Severities.ERROR, +- report_codes.BOOTH_CONFIG_WRITE_ERROR, ++ report_codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR, + { + "node": self.node.label, + "name": "name1.conf", +@@ -661,7 +661,7 @@ class SendAllConfigToNodeTest(TestCase): + ), + ( + Severities.ERROR, +- report_codes.BOOTH_CONFIG_WRITE_ERROR, ++ report_codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR, + { + "node": self.node.label, + "name": "file1.key", +@@ -724,12 +724,12 @@ class SendAllConfigToNodeTest(TestCase): + [ + ( + Severities.INFO, +- report_codes.BOOTH_CONFIGS_SAVING_ON_NODE, +- {"node": self.node.label} ++ report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED, ++ {} + ), + ( + Severities.ERROR, +- report_codes.BOOTH_CONFIG_WRITE_ERROR, ++ report_codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR, + { + "node": self.node.label, + "name": "name1.conf", +@@ -738,7 +738,7 @@ class SendAllConfigToNodeTest(TestCase): + ), + ( + Severities.ERROR, +- report_codes.BOOTH_CONFIG_WRITE_ERROR, ++ report_codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR, + { + "node": self.node.label, + "name": "file1.key", +@@ -1058,12 +1058,12 @@ class SendAllConfigToNodeTest(TestCase): + [ + ( + Severities.INFO, +- report_codes.BOOTH_CONFIGS_SAVING_ON_NODE, +- {"node": self.node.label} ++ report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED, ++ {} + ), + ( + Severities.INFO, +- report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, + { + "node": self.node.label, + "name": "name1.conf, name2.conf, file2.key", +@@ -1143,8 +1143,8 @@ class SendAllConfigToNodeTest(TestCase): + [ + ( + Severities.INFO, +- report_codes.BOOTH_CONFIGS_SAVING_ON_NODE, +- {"node": self.node.label} ++ report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED, ++ {} + ), + ( + Severities.WARNING, +@@ -1155,7 +1155,7 @@ class SendAllConfigToNodeTest(TestCase): + ), + ( + Severities.INFO, +- report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, + { + "node": self.node.label, + "name": "name2.conf, file2.key", +diff --git a/pcs/lib/commands/booth.py b/pcs/lib/commands/booth.py +index 43ea9dd..7a3d348 100644 +--- a/pcs/lib/commands/booth.py ++++ b/pcs/lib/commands/booth.py +@@ -34,11 +34,10 @@ def config_setup(env, booth_configuration, overwrite_existing=False): + list arbitrator_list contains arbitrator adresses of multisite + """ + ++ config_content = config_exchange.from_exchange_format(booth_configuration) + config_structure.validate_peers( +- booth_configuration.get("sites", []), +- booth_configuration.get("arbitrators", []) ++ *config_structure.take_peers(config_content) + ) +- config_content = config_exchange.from_exchange_format(booth_configuration) + + env.booth.create_key(config_files.generate_key(), overwrite_existing) + config_content = config_structure.set_authfile( +@@ -99,21 +98,34 @@ def config_destroy(env, ignore_config_load_problems=False): + env.booth.remove_key() + env.booth.remove_config() + +-def config_show(env): ++ ++def config_text(env, name, node_name=None): + """ +- return configuration as tuple of sites list and arbitrators list ++ get configuration in raw format ++ string name -- name of booth instance whose config should be returned ++ string node_name -- get the config from specified node or local host if None + """ +- return config_exchange.to_exchange_format( +- parse(env.booth.get_config_content()) ++ if node_name is None: ++ # TODO add name support ++ return env.booth.get_config_content() ++ ++ remote_data = sync.pull_config_from_node( ++ env.node_communicator(), NodeAddresses(node_name), name + ) ++ try: ++ return remote_data["config"]["data"] ++ except KeyError: ++ raise LibraryError(reports.invalid_response_format(node_name)) + +-def config_ticket_add(env, ticket_name): ++ ++def config_ticket_add(env, ticket_name, options): + """ + add ticket to booth configuration + """ + booth_configuration = config_structure.add_ticket( + parse(env.booth.get_config_content()), +- ticket_name ++ ticket_name, ++ options, + ) + env.booth.push_config(build(booth_configuration)) + +@@ -127,7 +139,7 @@ def config_ticket_remove(env, ticket_name): + ) + env.booth.push_config(build(booth_configuration)) + +-def create_in_cluster(env, name, ip, resource_create): ++def create_in_cluster(env, name, ip, resource_create, resource_remove): + #TODO resource_create is provisional hack until resources are not moved to + #lib + resources_section = get_resources(env.get_cib()) +@@ -136,7 +148,7 @@ def create_in_cluster(env, name, ip, resource_create): + if resource.find_for_config(resources_section, booth_config_file_path): + raise LibraryError(booth_reports.booth_already_in_cib(name)) + +- resource.get_creator(resource_create)( ++ resource.get_creator(resource_create, resource_remove)( + ip, + booth_config_file_path, + create_id = partial( +@@ -146,25 +158,20 @@ def create_in_cluster(env, name, ip, resource_create): + ) + ) + +-def remove_from_cluster(env, name, resource_remove): ++def remove_from_cluster(env, name, resource_remove, allow_remove_multiple): + #TODO resource_remove is provisional hack until resources are not moved to + #lib +- try: +- num_of_removed_booth_resources = resource.get_remover(resource_remove)( +- get_resources(env.get_cib()), +- get_config_file_name(name), +- ) +- if num_of_removed_booth_resources > 1: +- env.report_processor.process( +- booth_reports.booth_multiple_times_in_cib( +- name, +- severity=ReportItemSeverity.WARNING, +- ) +- ) +- except resource.BoothNotFoundInCib: +- raise LibraryError(booth_reports.booth_not_exists_in_cib(name)) +- except resource.BoothMultipleOccurenceFoundInCib: +- raise LibraryError(booth_reports.booth_multiple_times_in_cib(name)) ++ resource.get_remover(resource_remove)( ++ _find_resource_elements_for_operation(env, name, allow_remove_multiple) ++ ) ++ ++def restart(env, name, resource_restart, allow_multiple): ++ #TODO resource_restart is provisional hack until resources are not moved to ++ #lib ++ for booth_element in _find_resource_elements_for_operation( ++ env, name, allow_multiple ++ ): ++ resource_restart([booth_element.attrib["id"]]) + + def ticket_operation(operation, env, name, ticket, site_ip): + if not site_ip: +@@ -314,7 +321,7 @@ def pull_config(env, node_name, name): + name -- string, name of booth instance of which config should be fetched + """ + env.report_processor.process( +- booth_reports.booth_fetching_config_from_node(node_name, name) ++ booth_reports.booth_fetching_config_from_node_started(node_name, name) + ) + output = sync.pull_config_from_node( + env.node_communicator(), NodeAddresses(node_name), name +@@ -335,7 +342,7 @@ def pull_config(env, node_name, name): + True + ) + env.report_processor.process( +- booth_reports.booth_config_saved(name_list=[name]) ++ booth_reports.booth_config_accepted_by_node(name_list=[name]) + ) + except KeyError: + raise LibraryError(reports.invalid_response_format(node_name)) +@@ -347,3 +354,24 @@ def get_status(env, name=None): + "ticket": status.get_tickets_status(env.cmd_runner(), name), + "peers": status.get_peers_status(env.cmd_runner(), name), + } ++ ++def _find_resource_elements_for_operation(env, name, allow_multiple): ++ booth_element_list = resource.find_for_config( ++ get_resources(env.get_cib()), ++ get_config_file_name(name), ++ ) ++ ++ if not booth_element_list: ++ raise LibraryError(booth_reports.booth_not_exists_in_cib(name)) ++ ++ if len(booth_element_list) > 1: ++ if not allow_multiple: ++ raise LibraryError(booth_reports.booth_multiple_times_in_cib(name)) ++ env.report_processor.process( ++ booth_reports.booth_multiple_times_in_cib( ++ name, ++ severity=ReportItemSeverity.WARNING, ++ ) ++ ) ++ ++ return booth_element_list +diff --git a/pcs/lib/commands/test/test_booth.py b/pcs/lib/commands/test/test_booth.py +index 20bf06a..d2429b6 100644 +--- a/pcs/lib/commands/test/test_booth.py ++++ b/pcs/lib/commands/test/test_booth.py +@@ -19,7 +19,6 @@ from pcs.test.tools.assertions import ( + + from pcs import settings + from pcs.common import report_codes +-from pcs.lib.booth import resource as booth_resource + from pcs.lib.env import LibraryEnvironment + from pcs.lib.node import NodeAddresses + from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities +@@ -48,10 +47,10 @@ class ConfigSetupTest(TestCase): + env = mock.MagicMock() + commands.config_setup( + env, +- booth_configuration={ +- "sites": ["1.1.1.1"], +- "arbitrators": ["2.2.2.2"], +- }, ++ booth_configuration=[ ++ {"key": "site", "value": "1.1.1.1", "details": []}, ++ {"key": "arbitrator", "value": "2.2.2.2", "details": []}, ++ ], + ) + env.booth.create_config.assert_called_once_with( + "config content", +@@ -426,7 +425,7 @@ class PullConfigTest(TestCase): + ), + ( + Severities.INFO, +- report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, + { + "node": None, + "name": "name", +@@ -467,7 +466,7 @@ class PullConfigTest(TestCase): + ), + ( + Severities.INFO, +- report_codes.BOOTH_CONFIGS_SAVED_ON_NODE, ++ report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE, + { + "node": None, + "name": "name", +@@ -548,7 +547,8 @@ class CreateInClusterTest(TestCase): + def test_raises_when_is_created_already(self): + assert_raise_library_error( + lambda: commands.create_in_cluster( +- mock.MagicMock(), "somename", ip="1.2.3.4", resource_create=None ++ mock.MagicMock(), "somename", ip="1.2.3.4", ++ resource_create=None, resource_remove=None, + ), + ( + Severities.ERROR, +@@ -559,14 +559,14 @@ class CreateInClusterTest(TestCase): + ), + ) + +-class RemoveFromClusterTest(TestCase): +- @patch_commands("resource.get_remover", mock.Mock(return_value = mock.Mock( +- side_effect=booth_resource.BoothNotFoundInCib() +- ))) ++class FindResourceElementsForOperationTest(TestCase): ++ @patch_commands("resource.find_for_config", mock.Mock(return_value=[])) + def test_raises_when_no_booth_resource_found(self): + assert_raise_library_error( +- lambda: commands.remove_from_cluster( +- mock.MagicMock(), "somename", resource_remove=None ++ lambda: commands._find_resource_elements_for_operation( ++ mock.MagicMock(), ++ "somename", ++ allow_multiple=False + ), + ( + Severities.ERROR, +@@ -577,13 +577,15 @@ class RemoveFromClusterTest(TestCase): + ), + ) + +- @patch_commands("resource.get_remover", mock.Mock(return_value = mock.Mock( +- side_effect=booth_resource.BoothMultipleOccurenceFoundInCib() +- ))) ++ @patch_commands( ++ "resource.find_for_config", mock.Mock(return_value=["b_el1", "b_el2"]) ++ ) + def test_raises_when_multiple_booth_resource_found(self): + assert_raise_library_error( +- lambda: commands.remove_from_cluster( +- mock.MagicMock(), "somename", resource_remove=None ++ lambda: commands._find_resource_elements_for_operation( ++ mock.MagicMock(), ++ "somename", ++ allow_multiple=False + ), + ( + Severities.ERROR, +@@ -595,15 +597,15 @@ class RemoveFromClusterTest(TestCase): + ), + ) + +- @patch_commands("resource.get_remover", mock.Mock(return_value = mock.Mock( +- return_value=2 +- ))) ++ @patch_commands("get_resources", mock.Mock(return_value="resources")) ++ @patch_commands("resource.get_remover", mock.MagicMock()) ++ @patch_commands("resource.find_for_config", mock.Mock(return_value=[1, 2])) + def test_warn_when_multiple_booth_resources_removed(self): + report_processor=MockLibraryReportProcessor() +- commands.remove_from_cluster( ++ commands._find_resource_elements_for_operation( + mock.MagicMock(report_processor=report_processor), + "somename", +- resource_remove=None ++ allow_multiple=True, + ) + assert_report_item_list_equal(report_processor.report_item_list, [( + Severities.WARNING, +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index b3c4877..270ad2d 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -590,8 +590,8 @@ Add new ticket to the current configuration. + ticket remove <ticket> + Remove the specified ticket from the current configuration. + .TP +-config +-Show booth configuration. ++config [<node>] ++Show booth configuration from the specified node or from the current node if node not specified. + .TP + create ip <address> + Make the cluster run booth service on the specified ip address as a cluster resource. Typically this is used to run booth site. +@@ -599,11 +599,14 @@ Make the cluster run booth service on the specified ip address as a cluster reso + remove + Remove booth resources created by the "pcs booth create" command. + .TP ++restart ++Restart booth resources created by the "pcs booth create" command. ++.TP + ticket grant <ticket> [<site address>] +-Grant the ticket for the site specified by address. Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted. ++Grant the ticket for the site specified by address. Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted. Cannot be run on an arbitrator. + .TP + ticket revoke <ticket> [<site address>] +-Revoke the ticket for the site specified by address. Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted. ++Revoke the ticket for the site specified by address. Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted. Cannot be run on an arbitrator. + .TP + status + Print current status of booth on the local node. +diff --git a/pcs/test/test_booth.py b/pcs/test/test_booth.py +index 5ddc06d..3356e71 100644 +--- a/pcs/test/test_booth.py ++++ b/pcs/test/test_booth.py +@@ -76,10 +76,10 @@ class SetupTest(BoothMixin, unittest.TestCase): + self.assert_pcs_success( + "booth config", + stdout_full=console_report( ++ "authfile = {0}".format(BOOTH_KEY_FILE), + "site = 1.1.1.1", + "site = 2.2.2.2", + "arbitrator = 3.3.3.3", +- "authfile = {0}".format(BOOTH_KEY_FILE), + ) + ) + with open(BOOTH_KEY_FILE) as key_file: +@@ -187,13 +187,14 @@ class BoothTest(unittest.TestCase, BoothMixin): + + class AddTicketTest(BoothTest): + def test_success_add_ticket(self): +- self.assert_pcs_success("booth ticket add TicketA") ++ self.assert_pcs_success("booth ticket add TicketA expire=10") + self.assert_pcs_success("booth config", stdout_full=console_report( ++ "authfile = {0}".format(BOOTH_KEY_FILE), + "site = 1.1.1.1", + "site = 2.2.2.2", + "arbitrator = 3.3.3.3", +- "authfile = {0}".format(BOOTH_KEY_FILE), + 'ticket = "TicketA"', ++ " expire = 10", + )) + + def test_fail_on_bad_ticket_name(self): +@@ -211,22 +212,33 @@ class AddTicketTest(BoothTest): + "\n" + ) + ++ def test_fail_on_invalid_options(self): ++ self.assert_pcs_fail( ++ "booth ticket add TicketA site=a timeout=", console_report( ++ "Error: invalid booth ticket option 'site', allowed options" ++ " are: acquire-after, attr-prereq, before-acquire-handler," ++ " expire, renewal-freq, retries, timeout, weights" ++ , ++ "Error: '' is not a valid timeout value, use no-empty", ++ ) ++ ) ++ + class RemoveTicketTest(BoothTest): + def test_success_remove_ticket(self): + self.assert_pcs_success("booth ticket add TicketA") + self.assert_pcs_success("booth config", stdout_full=console_report( ++ "authfile = {0}".format(BOOTH_KEY_FILE), + "site = 1.1.1.1", + "site = 2.2.2.2", + "arbitrator = 3.3.3.3", +- "authfile = {0}".format(BOOTH_KEY_FILE), + 'ticket = "TicketA"', + )) + self.assert_pcs_success("booth ticket remove TicketA") + self.assert_pcs_success("booth config", stdout_full=console_report( ++ "authfile = {0}".format(BOOTH_KEY_FILE), + "site = 1.1.1.1", + "site = 2.2.2.2", + "arbitrator = 3.3.3.3", +- "authfile = {0}".format(BOOTH_KEY_FILE), + )) + + def test_fail_when_ticket_does_not_exist(self): +@@ -286,7 +298,6 @@ class RemoveTest(BoothTest): + " --force to override" + ]) + +- + def test_remove_added_booth_configuration(self): + self.assert_pcs_success("resource show", "NO resources configured\n") + self.assert_pcs_success("booth create ip 192.168.122.120") +@@ -301,8 +312,27 @@ class RemoveTest(BoothTest): + ]) + self.assert_pcs_success("resource show", "NO resources configured\n") + +- def test_fail_when_booth_is_not_currently_configured(self): +- pass ++ ++ def test_remove_multiple_booth_configuration(self): ++ self.assert_pcs_success("resource show", "NO resources configured\n") ++ self.assert_pcs_success("booth create ip 192.168.122.120") ++ self.assert_pcs_success( ++ "resource create some-id ocf:pacemaker:booth-site" ++ " config=/etc/booth/booth.conf" ++ ) ++ self.assert_pcs_success("resource show", [ ++ " Resource Group: booth-booth-group", ++ " booth-booth-ip (ocf::heartbeat:IPaddr2): Stopped", ++ " booth-booth-service (ocf::pacemaker:booth-site): Stopped", ++ " some-id (ocf::pacemaker:booth-site): Stopped", ++ ]) ++ self.assert_pcs_success("booth remove --force", [ ++ "Warning: found more than one booth instance 'booth' in cib", ++ "Deleting Resource - booth-booth-ip", ++ "Deleting Resource (and group) - booth-booth-service", ++ "Deleting Resource - some-id", ++ ]) ++ + + class TicketGrantTest(BoothTest): + def test_failed_when_implicit_site_but_not_correct_confgiuration_in_cib( +@@ -332,6 +362,7 @@ class ConfigTest(unittest.TestCase, BoothMixin): + def setUp(self): + shutil.copy(EMPTY_CIB, TEMP_CIB) + self.pcs_runner = PcsRunner(TEMP_CIB) ++ + def test_fail_when_config_file_do_not_exists(self): + ensure_booth_config_not_exists() + self.assert_pcs_fail( +@@ -340,3 +371,33 @@ class ConfigTest(unittest.TestCase, BoothMixin): + BOOTH_CONFIG_FILE + ) + ) ++ ++ def test_too_much_args(self): ++ self.assert_pcs_fail( ++ "booth config nodename surplus", ++ stdout_start="\nUsage: pcs booth <command>\n config [" ++ ) ++ ++ def test_show_unsupported_values(self): ++ ensure_booth_config_not_exists() ++ self.assert_pcs_success( ++ "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3" ++ ) ++ with open(BOOTH_CONFIG_FILE, "a") as config_file: ++ config_file.write("some = nonsense") ++ self.assert_pcs_success("booth ticket add TicketA") ++ with open(BOOTH_CONFIG_FILE, "a") as config_file: ++ config_file.write("another = nonsense") ++ ++ self.assert_pcs_success( ++ "booth config", ++ stdout_full="\n".join(( ++ "authfile = {0}".format(BOOTH_KEY_FILE), ++ "site = 1.1.1.1", ++ "site = 2.2.2.2", ++ "arbitrator = 3.3.3.3", ++ "some = nonsense", ++ 'ticket = "TicketA"', ++ "another = nonsense", ++ )) ++ ) +diff --git a/pcs/usage.py b/pcs/usage.py +index 78e340b..088dec9 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -118,6 +118,7 @@ def generate_completion_tree_from_usage(): + tree["pcsd"] = generate_tree(pcsd([],False)) + tree["node"] = generate_tree(node([], False)) + tree["alert"] = generate_tree(alert([], False)) ++ tree["booth"] = generate_tree(booth([], False)) + return tree + + def generate_tree(usage_txt): +@@ -1438,8 +1439,9 @@ Commands: + ticket remove <ticket> + Remove the specified ticket from the current configuration. + +- config +- Show booth configuration. ++ config [<node>] ++ Show booth configuration from the specified node or from the current ++ node if node not specified. + + create ip <address> + Make the cluster run booth service on the specified ip address as +@@ -1448,15 +1450,18 @@ Commands: + remove + Remove booth resources created by the "pcs booth create" command. + ++ restart ++ Restart booth resources created by the "pcs booth create" command. ++ + ticket grant <ticket> [<site address>] + Grant the ticket for the site specified by address. Site address which + has been specified with 'pcs booth create' command is used if +- 'site address' is omitted. ++ 'site address' is omitted. Cannot be run on an arbitrator. + + ticket revoke <ticket> [<site address>] + Revoke the ticket for the site specified by address. Site address which + has been specified with 'pcs booth create' command is used if +- 'site address' is omitted. ++ 'site address' is omitted. Cannot be run on an arbitrator. + + status + Print current status of booth on the local node. +-- +1.8.3.1 + diff --git a/SOURCES/bz1308514-03-wider-support-for-booth-configuration-beside-mere.patch b/SOURCES/bz1308514-03-wider-support-for-booth-configuration-beside-mere.patch new file mode 100644 index 0000000..95e102c --- /dev/null +++ b/SOURCES/bz1308514-03-wider-support-for-booth-configuration-beside-mere.patch @@ -0,0 +1,397 @@ +From 8707ba13053e172d148ec12820a4259ffa371000 Mon Sep 17 00:00:00 2001 +From: Ivan Devat <idevat@redhat.com> +Date: Wed, 14 Sep 2016 09:04:57 +0200 +Subject: [PATCH] squash bz1308514 Wider support for booth configura + +de5edd583d82 correct booth documentation (ticket grant/revoke) + +d98e6b04da8d make forceable uknown booth ticket option +--- + pcs/cli/booth/command.py | 6 +- + pcs/cli/booth/test/test_command.py | 3 +- + pcs/lib/booth/config_structure.py | 31 ++++++-- + pcs/lib/booth/test/test_config_structure.py | 107 ++++++++++++++++++++++++---- + pcs/lib/commands/booth.py | 7 +- + pcs/pcs.8 | 7 +- + pcs/test/test_booth.py | 17 +++++ + pcs/usage.py | 11 +-- + 8 files changed, 161 insertions(+), 28 deletions(-) + +diff --git a/pcs/cli/booth/command.py b/pcs/cli/booth/command.py +index 0b71a01..72b2c73 100644 +--- a/pcs/cli/booth/command.py ++++ b/pcs/cli/booth/command.py +@@ -64,7 +64,11 @@ def config_ticket_add(lib, arg_list, modifiers): + """ + if not arg_list: + raise CmdLineInputError +- lib.booth.config_ticket_add(arg_list[0], prepare_options(arg_list[1:])) ++ lib.booth.config_ticket_add( ++ arg_list[0], ++ prepare_options(arg_list[1:]), ++ allow_unknown_options=modifiers["force"] ++ ) + + def config_ticket_remove(lib, arg_list, modifiers): + """ +diff --git a/pcs/cli/booth/test/test_command.py b/pcs/cli/booth/test/test_command.py +index 44d7a12..8ba2c0e 100644 +--- a/pcs/cli/booth/test/test_command.py ++++ b/pcs/cli/booth/test/test_command.py +@@ -45,9 +45,10 @@ class ConfigTicketAddTest(TestCase): + command.config_ticket_add( + lib, + arg_list=["TICKET_A", "timeout=10"], +- modifiers={} ++ modifiers={"force": True} + ) + lib.booth.config_ticket_add.assert_called_once_with( + "TICKET_A", + {"timeout": "10"}, ++ allow_unknown_options=True + ) +diff --git a/pcs/lib/booth/config_structure.py b/pcs/lib/booth/config_structure.py +index 8977b7a..09ff1a7 100644 +--- a/pcs/lib/booth/config_structure.py ++++ b/pcs/lib/booth/config_structure.py +@@ -9,7 +9,8 @@ import re + + import pcs.lib.reports as common_reports + from pcs.lib.booth import reports +-from pcs.lib.errors import LibraryError ++from pcs.lib.errors import LibraryError, ReportItemSeverity as severities ++from pcs.common import report_codes + from collections import namedtuple + + GLOBAL_KEYS = ( +@@ -83,10 +84,13 @@ def remove_ticket(booth_configuration, ticket_name): + if config_item.key != "ticket" or config_item.value != ticket_name + ] + +-def add_ticket(booth_configuration, ticket_name, options): ++def add_ticket( ++ report_processor, booth_configuration, ticket_name, options, ++ allow_unknown_options ++): + validate_ticket_name(ticket_name) + validate_ticket_unique(booth_configuration, ticket_name) +- validate_ticket_options(options) ++ validate_ticket_options(report_processor, options, allow_unknown_options) + return booth_configuration + [ + ConfigItem("ticket", ticket_name, [ + ConfigItem(key, value) for key, value in options.items() +@@ -101,7 +105,7 @@ def validate_ticket_unique(booth_configuration, ticket_name): + if ticket_exists(booth_configuration, ticket_name): + raise LibraryError(reports.booth_ticket_duplicate(ticket_name)) + +-def validate_ticket_options(options): ++def validate_ticket_options(report_processor, options, allow_unknown_options): + reports = [] + for key in sorted(options): + if key in GLOBAL_KEYS: +@@ -109,6 +113,22 @@ def validate_ticket_options(options): + common_reports.invalid_option(key, TICKET_KEYS, "booth ticket") + ) + ++ elif key not in TICKET_KEYS: ++ reports.append( ++ common_reports.invalid_option( ++ key, TICKET_KEYS, ++ "booth ticket", ++ severity=( ++ severities.WARNING if allow_unknown_options ++ else severities.ERROR ++ ), ++ forceable=( ++ None if allow_unknown_options ++ else report_codes.FORCE_OPTIONS ++ ), ++ ) ++ ) ++ + if not options[key].strip(): + reports.append(common_reports.invalid_option_value( + key, +@@ -116,8 +136,7 @@ def validate_ticket_options(options): + "no-empty", + )) + +- if reports: +- raise LibraryError(*reports) ++ report_processor.process_list(reports) + + def ticket_exists(booth_configuration, ticket_name): + return any( +diff --git a/pcs/lib/booth/test/test_config_structure.py b/pcs/lib/booth/test/test_config_structure.py +index 5e7ac68..40618b2 100644 +--- a/pcs/lib/booth/test/test_config_structure.py ++++ b/pcs/lib/booth/test/test_config_structure.py +@@ -10,7 +10,11 @@ from pcs.test.tools.pcs_unittest import TestCase + from pcs.common import report_codes + from pcs.lib.booth import config_structure + from pcs.lib.errors import ReportItemSeverity as severities +-from pcs.test.tools.assertions import assert_raise_library_error ++from pcs.test.tools.assertions import ( ++ assert_raise_library_error, ++ assert_report_item_list_equal, ++) ++from pcs.test.tools.custom_mock import MockLibraryReportProcessor + from pcs.test.tools.pcs_unittest import mock + + +@@ -49,12 +53,8 @@ class ValidateTicketUniqueTest(TestCase): + + class ValidateTicketOptionsTest(TestCase): + def test_raises_on_invalid_options(self): +- assert_raise_library_error( +- lambda: config_structure.validate_ticket_options({ +- "site": "a", +- "port": "b", +- "timeout": " ", +- }), ++ report_processor = MockLibraryReportProcessor() ++ expected_errors = [ + ( + severities.ERROR, + report_codes.INVALID_OPTION, +@@ -82,10 +82,81 @@ class ValidateTicketOptionsTest(TestCase): + "allowed_values": "no-empty", + }, + ), ++ ( ++ severities.ERROR, ++ report_codes.INVALID_OPTION, ++ { ++ "option_name": "unknown", ++ "option_type": "booth ticket", ++ "allowed": list(config_structure.TICKET_KEYS), ++ }, ++ report_codes.FORCE_OPTIONS ++ ), ++ ] ++ assert_raise_library_error( ++ lambda: config_structure.validate_ticket_options( ++ report_processor, ++ { ++ "site": "a", ++ "port": "b", ++ "timeout": " ", ++ "unknown": "c", ++ }, ++ allow_unknown_options=False, ++ ), ++ *expected_errors ++ ) ++ assert_report_item_list_equal( ++ report_processor.report_item_list, ++ expected_errors ++ ) ++ ++ def test_unknown_options_are_forceable(self): ++ report_processor = MockLibraryReportProcessor() ++ expected_errors = [ ++ ( ++ severities.ERROR, ++ report_codes.INVALID_OPTION, ++ { ++ "option_name": "site", ++ "option_type": "booth ticket", ++ "allowed": list(config_structure.TICKET_KEYS), ++ }, ++ ), ++ ] ++ assert_raise_library_error( ++ lambda: config_structure.validate_ticket_options( ++ report_processor, { ++ "site": "a", ++ "unknown": "c", ++ }, ++ allow_unknown_options=True, ++ ), ++ *expected_errors ++ ) ++ assert_report_item_list_equal( ++ report_processor.report_item_list, ++ expected_errors + [ ++ ( ++ severities.WARNING, ++ report_codes.INVALID_OPTION, ++ { ++ "option_name": "unknown", ++ "option_type": "booth ticket", ++ "allowed": list(config_structure.TICKET_KEYS), ++ }, ++ ), ++ ] + ) + + def test_success_on_valid_options(self): +- config_structure.validate_ticket_options({"timeout": "10"}) ++ report_processor = MockLibraryReportProcessor() ++ config_structure.validate_ticket_options( ++ report_processor, ++ {"timeout": "10"}, ++ allow_unknown_options=False, ++ ) ++ assert_report_item_list_equal(report_processor.report_item_list, []) + + class TicketExistsTest(TestCase): + def test_returns_true_if_ticket_in_structure(self): +@@ -214,18 +285,25 @@ class RemoveTicketTest(TestCase): + ) + + class AddTicketTest(TestCase): ++ @mock.patch("pcs.lib.booth.config_structure.validate_ticket_options") + @mock.patch("pcs.lib.booth.config_structure.validate_ticket_unique") + @mock.patch("pcs.lib.booth.config_structure.validate_ticket_name") + def test_successfully_add_ticket( +- self, mock_validate_name, mock_validate_uniq ++ self, mock_validate_name, mock_validate_uniq, mock_validate_options + ): + configuration = [ + config_structure.ConfigItem("ticket", "some-ticket"), + ] ++ + self.assertEqual( +- config_structure.add_ticket(configuration, "new-ticket", { +- "timeout": "10", +- }), ++ config_structure.add_ticket( ++ None, configuration, ++ "new-ticket", ++ { ++ "timeout": "10", ++ }, ++ allow_unknown_options=False, ++ ), + [ + config_structure.ConfigItem("ticket", "some-ticket"), + config_structure.ConfigItem("ticket", "new-ticket", [ +@@ -236,6 +314,11 @@ class AddTicketTest(TestCase): + + mock_validate_name.assert_called_once_with("new-ticket") + mock_validate_uniq.assert_called_once_with(configuration, "new-ticket") ++ mock_validate_options.assert_called_once_with( ++ None, ++ {"timeout": "10"}, ++ False ++ ) + + class SetAuthfileTest(TestCase): + def test_add_authfile(self): +diff --git a/pcs/lib/commands/booth.py b/pcs/lib/commands/booth.py +index bea966c..705900a 100644 +--- a/pcs/lib/commands/booth.py ++++ b/pcs/lib/commands/booth.py +@@ -119,14 +119,19 @@ def config_text(env, name, node_name=None): + raise LibraryError(reports.invalid_response_format(node_name)) + + +-def config_ticket_add(env, ticket_name, options): ++def config_ticket_add(env, ticket_name, options, allow_unknown_options): + """ + add ticket to booth configuration ++ dict options contains options for ticket ++ bool allow_unknown_options decide if can be used options not listed in ++ ticket options nor global options + """ + booth_configuration = config_structure.add_ticket( ++ env.report_processor, + parse(env.booth.get_config_content()), + ticket_name, + options, ++ allow_unknown_options, + ) + env.booth.push_config(build(booth_configuration)) + +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 270ad2d..61abe67 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -585,7 +585,8 @@ destroy + Remove booth configuration files. + .TP + ticket add <ticket> +-Add new ticket to the current configuration. ++Add new ticket to the current configuration. Ticket options are specified in booth manpage. ++ + .TP + ticket remove <ticket> + Remove the specified ticket from the current configuration. +@@ -603,10 +604,10 @@ restart + Restart booth resources created by the "pcs booth create" command. + .TP + ticket grant <ticket> [<site address>] +-Grant the ticket for the site specified by address. Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted. Cannot be run on an arbitrator. ++Grant the ticket for the site specified by address. Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted. Specifying site address is mandatory when running this command on an arbitrator. + .TP + ticket revoke <ticket> [<site address>] +-Revoke the ticket for the site specified by address. Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted. Cannot be run on an arbitrator. ++Revoke the ticket for the site specified by address. Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted. Specifying site address is mandatory when running this command on an arbitrator. + .TP + status + Print current status of booth on the local node. +diff --git a/pcs/test/test_booth.py b/pcs/test/test_booth.py +index 3356e71..c12391b 100644 +--- a/pcs/test/test_booth.py ++++ b/pcs/test/test_booth.py +@@ -223,6 +223,23 @@ class AddTicketTest(BoothTest): + ) + ) + ++ def test_forceable_fail_on_unknown_options(self): ++ msg = ( ++ "invalid booth ticket option 'unknown', allowed options" ++ " are: acquire-after, attr-prereq, before-acquire-handler," ++ " expire, renewal-freq, retries, timeout, weights" ++ ) ++ self.assert_pcs_fail( ++ "booth ticket add TicketA unknown=a", console_report( ++ "Error: "+msg+", use --force to override", ++ ) ++ ) ++ self.assert_pcs_success( ++ "booth ticket add TicketA unknown=a --force", ++ "Warning: {0}\n".format(msg), ++ ) ++ ++ + class RemoveTicketTest(BoothTest): + def test_success_remove_ticket(self): + self.assert_pcs_success("booth ticket add TicketA") +diff --git a/pcs/usage.py b/pcs/usage.py +index 088dec9..9d4617f 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -1433,8 +1433,9 @@ Commands: + destroy + Remove booth configuration files. + +- ticket add <ticket> +- Add new ticket to the current configuration. ++ ticket add <ticket> [<name>=<value> ...] ++ Add new ticket to the current configuration. Ticket options are ++ specified in booth manpage. + + ticket remove <ticket> + Remove the specified ticket from the current configuration. +@@ -1456,12 +1457,14 @@ Commands: + ticket grant <ticket> [<site address>] + Grant the ticket for the site specified by address. Site address which + has been specified with 'pcs booth create' command is used if +- 'site address' is omitted. Cannot be run on an arbitrator. ++ 'site address' is omitted. Specifying site address is mandatory when ++ running this command on an arbitrator. + + ticket revoke <ticket> [<site address>] + Revoke the ticket for the site specified by address. Site address which + has been specified with 'pcs booth create' command is used if +- 'site address' is omitted. Cannot be run on an arbitrator. ++ 'site address' is omitted. Specifying site address is mandatory when ++ running this command on an arbitrator. + + status + Print current status of booth on the local node. +-- +1.8.3.1 + diff --git a/SOURCES/bz1315371-01-add-support-for-pacemaker-alerts.patch b/SOURCES/bz1315371-01-add-support-for-pacemaker-alerts.patch new file mode 100644 index 0000000..969a63b --- /dev/null +++ b/SOURCES/bz1315371-01-add-support-for-pacemaker-alerts.patch @@ -0,0 +1,4195 @@ +From ae514b04a95cadb3ac1819a9097dbee694f4596b Mon Sep 17 00:00:00 2001 +From: Ondrej Mular <omular@redhat.com> +Date: Tue, 21 Jun 2016 15:23:07 +0200 +Subject: [PATCH] bz1315371-01-add support for pacemaker alerts + +--- + .pylintrc | 2 +- + pcs/alert.py | 237 +++++++++ + pcs/app.py | 6 + + pcs/cli/common/env.py | 1 + + pcs/cli/common/lib_wrapper.py | 28 +- + pcs/cli/common/middleware.py | 2 +- + pcs/common/report_codes.py | 6 + + pcs/config.py | 4 + + pcs/lib/cib/alert.py | 281 +++++++++++ + pcs/lib/cib/nvpair.py | 90 ++++ + pcs/lib/cib/test/test_alert.py | 931 +++++++++++++++++++++++++++++++++++ + pcs/lib/cib/test/test_nvpair.py | 206 ++++++++ + pcs/lib/cib/tools.py | 127 +++++ + pcs/lib/commands/alert.py | 169 +++++++ + pcs/lib/commands/test/test_alert.py | 639 ++++++++++++++++++++++++ + pcs/lib/commands/test/test_ticket.py | 2 +- + pcs/lib/env.py | 32 +- + pcs/lib/pacemaker.py | 17 +- + pcs/lib/reports.py | 91 ++++ + pcs/pcs.8 | 25 + + pcs/test/resources/cib-empty-2.5.xml | 10 + + pcs/test/test_alert.py | 363 ++++++++++++++ + pcs/test/test_lib_cib_tools.py | 181 ++++++- + pcs/test/test_lib_env.py | 140 +++++- + pcs/test/test_lib_pacemaker.py | 24 +- + pcs/test/test_resource.py | 6 + + pcs/test/test_stonith.py | 3 + + pcs/test/tools/color_text_runner.py | 10 + + pcs/usage.py | 43 ++ + pcs/utils.py | 9 +- + 30 files changed, 3649 insertions(+), 36 deletions(-) + create mode 100644 pcs/alert.py + create mode 100644 pcs/lib/cib/alert.py + create mode 100644 pcs/lib/cib/nvpair.py + create mode 100644 pcs/lib/cib/test/test_alert.py + create mode 100644 pcs/lib/cib/test/test_nvpair.py + create mode 100644 pcs/lib/commands/alert.py + create mode 100644 pcs/lib/commands/test/test_alert.py + create mode 100644 pcs/test/resources/cib-empty-2.5.xml + create mode 100644 pcs/test/test_alert.py + +diff --git a/.pylintrc b/.pylintrc +index 661f3d2..e378e6a 100644 +--- a/.pylintrc ++++ b/.pylintrc +@@ -92,7 +92,7 @@ dummy-variables-rgx=_$|dummy + + [FORMAT] + # Maximum number of lines in a module +-max-module-lines=4571 ++max-module-lines=4577 + # Maximum number of characters on a single line. + max-line-length=1291 + +diff --git a/pcs/alert.py b/pcs/alert.py +new file mode 100644 +index 0000000..d3a6e28 +--- /dev/null ++++ b/pcs/alert.py +@@ -0,0 +1,237 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import sys ++ ++from pcs import ( ++ usage, ++ utils, ++) ++from pcs.cli.common.errors import CmdLineInputError ++from pcs.cli.common.parse_args import prepare_options ++from pcs.cli.common.console_report import indent ++from pcs.lib.errors import LibraryError ++ ++ ++def alert_cmd(*args): ++ argv = args[1] ++ if not argv: ++ sub_cmd = "config" ++ else: ++ sub_cmd = argv.pop(0) ++ try: ++ if sub_cmd == "help": ++ usage.alert(argv) ++ elif sub_cmd == "create": ++ alert_add(*args) ++ elif sub_cmd == "update": ++ alert_update(*args) ++ elif sub_cmd == "remove": ++ alert_remove(*args) ++ elif sub_cmd == "config" or sub_cmd == "show": ++ print_alert_config(*args) ++ elif sub_cmd == "recipient": ++ recipient_cmd(*args) ++ else: ++ raise CmdLineInputError() ++ except LibraryError as e: ++ utils.process_library_reports(e.args) ++ except CmdLineInputError as e: ++ utils.exit_on_cmdline_input_errror(e, "alert", sub_cmd) ++ ++ ++def recipient_cmd(*args): ++ argv = args[1] ++ ++ if not argv: ++ usage.alert(["recipient"]) ++ sys.exit(1) ++ ++ sub_cmd = argv.pop(0) ++ try: ++ if sub_cmd == "help": ++ usage.alert(["recipient"]) ++ elif sub_cmd == "add": ++ recipient_add(*args) ++ elif sub_cmd == "update": ++ recipient_update(*args) ++ elif sub_cmd == "remove": ++ recipient_remove(*args) ++ except CmdLineInputError as e: ++ utils.exit_on_cmdline_input_errror( ++ e, "alert", "recipient {0}".format(sub_cmd) ++ ) ++ ++ ++def parse_cmd_sections(arg_list, section_list): ++ output = dict([(section, []) for section in section_list + ["main"]]) ++ cur_section = "main" ++ for arg in arg_list: ++ if arg in section_list: ++ cur_section = arg ++ continue ++ output[cur_section].append(arg) ++ ++ return output ++ ++ ++def ensure_only_allowed_options(parameter_dict, allowed_list): ++ for arg, value in parameter_dict.items(): ++ if arg not in allowed_list: ++ raise CmdLineInputError( ++ "Unexpected parameter '{0}={1}'".format(arg, value) ++ ) ++ ++ ++def alert_add(lib, argv, modifiers): ++ if not argv: ++ raise CmdLineInputError() ++ ++ sections = parse_cmd_sections(argv, ["options", "meta"]) ++ main_args = prepare_options(sections["main"]) ++ ensure_only_allowed_options(main_args, ["id", "description", "path"]) ++ ++ lib.alert.create_alert( ++ main_args.get("id", None), ++ main_args.get("path", None), ++ prepare_options(sections["options"]), ++ prepare_options(sections["meta"]), ++ main_args.get("description", None) ++ ) ++ ++ ++def alert_update(lib, argv, modifiers): ++ if not argv: ++ raise CmdLineInputError() ++ ++ alert_id = argv[0] ++ ++ sections = parse_cmd_sections(argv[1:], ["options", "meta"]) ++ main_args = prepare_options(sections["main"]) ++ ensure_only_allowed_options(main_args, ["description", "path"]) ++ ++ lib.alert.update_alert( ++ alert_id, ++ main_args.get("path", None), ++ prepare_options(sections["options"]), ++ prepare_options(sections["meta"]), ++ main_args.get("description", None) ++ ) ++ ++ ++def alert_remove(lib, argv, modifiers): ++ if len(argv) != 1: ++ raise CmdLineInputError() ++ ++ lib.alert.remove_alert(argv[0]) ++ ++ ++def recipient_add(lib, argv, modifiers): ++ if len(argv) < 2: ++ raise CmdLineInputError() ++ ++ alert_id = argv[0] ++ recipient_value = argv[1] ++ ++ sections = parse_cmd_sections(argv[2:], ["options", "meta"]) ++ main_args = prepare_options(sections["main"]) ++ ensure_only_allowed_options(main_args, ["description"]) ++ ++ lib.alert.add_recipient( ++ alert_id, ++ recipient_value, ++ prepare_options(sections["options"]), ++ prepare_options(sections["meta"]), ++ main_args.get("description", None) ++ ) ++ ++ ++def recipient_update(lib, argv, modifiers): ++ if len(argv) < 2: ++ raise CmdLineInputError() ++ ++ alert_id = argv[0] ++ recipient_value = argv[1] ++ ++ sections = parse_cmd_sections(argv[2:], ["options", "meta"]) ++ main_args = prepare_options(sections["main"]) ++ ensure_only_allowed_options(main_args, ["description"]) ++ ++ lib.alert.update_recipient( ++ alert_id, ++ recipient_value, ++ prepare_options(sections["options"]), ++ prepare_options(sections["meta"]), ++ main_args.get("description", None) ++ ) ++ ++ ++def recipient_remove(lib, argv, modifiers): ++ if len(argv) != 2: ++ raise CmdLineInputError() ++ ++ lib.alert.remove_recipient(argv[0], argv[1]) ++ ++ ++def _nvset_to_str(nvset_obj): ++ output = [] ++ for nvpair_obj in nvset_obj: ++ output.append("{key}={value}".format( ++ key=nvpair_obj["name"], value=nvpair_obj["value"] ++ )) ++ return " ".join(output) ++ ++ ++def __description_attributes_to_str(obj): ++ output = [] ++ if obj.get("description"): ++ output.append("Description: {desc}".format(desc=obj["description"])) ++ if obj.get("instance_attributes"): ++ output.append("Options: {attributes}".format( ++ attributes=_nvset_to_str(obj["instance_attributes"]) ++ )) ++ if obj.get("meta_attributes"): ++ output.append("Meta options: {attributes}".format( ++ attributes=_nvset_to_str(obj["meta_attributes"]) ++ )) ++ return output ++ ++ ++def _alert_to_str(alert): ++ content = [] ++ content.extend(__description_attributes_to_str(alert)) ++ ++ recipients = [] ++ for recipient in alert.get("recipient_list", []): ++ recipients.extend( _recipient_to_str(recipient)) ++ ++ if recipients: ++ content.append("Recipients:") ++ content.extend(indent(recipients, 1)) ++ ++ return ["Alert: {alert_id} (path={path})".format( ++ alert_id=alert["id"], path=alert["path"] ++ )] + indent(content, 1) ++ ++ ++def _recipient_to_str(recipient): ++ return ["Recipient: {value}".format(value=recipient["value"])] + indent( ++ __description_attributes_to_str(recipient), 1 ++ ) ++ ++ ++def print_alert_config(lib, argv, modifiers): ++ if argv: ++ raise CmdLineInputError() ++ ++ print("Alerts:") ++ alert_list = lib.alert.get_all_alerts() ++ if alert_list: ++ for alert in alert_list: ++ print("\n".join(indent(_alert_to_str(alert), 1))) ++ else: ++ print(" No alerts defined") +diff --git a/pcs/app.py b/pcs/app.py +index 3c4865f..3758ee4 100644 +--- a/pcs/app.py ++++ b/pcs/app.py +@@ -27,6 +27,7 @@ from pcs import ( + stonith, + usage, + utils, ++ alert, + ) + + from pcs.cli.common import completion +@@ -193,6 +194,11 @@ def main(argv=None): + argv, + utils.get_modificators() + ), ++ "alert": lambda args: alert.alert_cmd( ++ utils.get_library_wrapper(), ++ args, ++ utils.get_modificators() ++ ), + } + if command not in cmd_map: + usage.main() +diff --git a/pcs/cli/common/env.py b/pcs/cli/common/env.py +index f407981..2ba4f70 100644 +--- a/pcs/cli/common/env.py ++++ b/pcs/cli/common/env.py +@@ -8,6 +8,7 @@ from __future__ import ( + class Env(object): + def __init__(self): + self.cib_data = None ++ self.cib_upgraded = False + self.user = None + self.groups = None + self.corosync_conf_data = None +diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py +index 909b435..2ba5602 100644 +--- a/pcs/cli/common/lib_wrapper.py ++++ b/pcs/cli/common/lib_wrapper.py +@@ -19,6 +19,7 @@ from pcs.lib.commands import ( + quorum, + qdevice, + sbd, ++ alert, + ) + from pcs.cli.common.reports import ( + LibraryReportProcessorToConsole as LibraryReportProcessorToConsole, +@@ -42,6 +43,14 @@ def cli_env_to_lib_env(cli_env): + cli_env.auth_tokens_getter, + ) + ++def lib_env_to_cli_env(lib_env, cli_env): ++ if not lib_env.is_cib_live: ++ cli_env.cib_data = lib_env._get_cib_xml() ++ cli_env.cib_upgraded = lib_env.cib_upgraded ++ if not lib_env.is_corosync_conf_live: ++ cli_env.corosync_conf_data = lib_env.get_corosync_conf_data() ++ return cli_env ++ + def bind(cli_env, run_with_middleware, run_library_command): + def run(cli_env, *args, **kwargs): + lib_env = cli_env_to_lib_env(cli_env) +@@ -50,10 +59,7 @@ def bind(cli_env, run_with_middleware, run_library_command): + + #midlewares needs finish its work and they see only cli_env + #so we need reflect some changes to cli_env +- if not lib_env.is_cib_live: +- cli_env.cib_data = lib_env.get_cib_xml() +- if not lib_env.is_corosync_conf_live: +- cli_env.corosync_conf_data = lib_env.get_corosync_conf_data() ++ lib_env_to_cli_env(lib_env, cli_env) + + return lib_call_result + return partial(run_with_middleware, run, cli_env) +@@ -140,6 +146,20 @@ def load_module(env, middleware_factory, name): + "get_local_sbd_config": sbd.get_local_sbd_config, + } + ) ++ if name == "alert": ++ return bind_all( ++ env, ++ middleware.build(middleware_factory.cib), ++ { ++ "create_alert": alert.create_alert, ++ "update_alert": alert.update_alert, ++ "remove_alert": alert.remove_alert, ++ "add_recipient": alert.add_recipient, ++ "update_recipient": alert.update_recipient, ++ "remove_recipient": alert.remove_recipient, ++ "get_all_alerts": alert.get_all_alerts, ++ } ++ ) + + raise Exception("No library part '{0}'".format(name)) + +diff --git a/pcs/cli/common/middleware.py b/pcs/cli/common/middleware.py +index 16618e1..e53e138 100644 +--- a/pcs/cli/common/middleware.py ++++ b/pcs/cli/common/middleware.py +@@ -34,7 +34,7 @@ def cib(use_local_cib, load_cib_content, write_cib): + result_of_next = next_in_line(env, *args, **kwargs) + + if use_local_cib: +- write_cib(env.cib_data) ++ write_cib(env.cib_data, env.cib_upgraded) + + return result_of_next + return apply +diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py +index 927df35..bda982a 100644 +--- a/pcs/common/report_codes.py ++++ b/pcs/common/report_codes.py +@@ -20,11 +20,17 @@ SKIP_OFFLINE_NODES = "SKIP_OFFLINE_NODES" + AGENT_GENERAL_ERROR = "AGENT_GENERAL_ERROR" + AGENT_NOT_FOUND = "AGENT_NOT_FOUND" + BAD_CLUSTER_STATE_FORMAT = 'BAD_CLUSTER_STATE_FORMAT' ++CIB_ALERT_NOT_FOUND = "CIB_ALERT_NOT_FOUND" ++CIB_ALERT_RECIPIENT_ALREADY_EXISTS = "CIB_ALERT_RECIPIENT_ALREADY_EXISTS" ++CIB_ALERT_RECIPIENT_NOT_FOUND = "CIB_ALERT_RECIPIENT_NOT_FOUND" + CIB_CANNOT_FIND_MANDATORY_SECTION = "CIB_CANNOT_FIND_MANDATORY_SECTION" + CIB_LOAD_ERROR_BAD_FORMAT = "CIB_LOAD_ERROR_BAD_FORMAT" + CIB_LOAD_ERROR = "CIB_LOAD_ERROR" + CIB_LOAD_ERROR_SCOPE_MISSING = "CIB_LOAD_ERROR_SCOPE_MISSING" + CIB_PUSH_ERROR = "CIB_PUSH_ERROR" ++CIB_UPGRADE_FAILED = "CIB_UPGRADE_FAILED" ++CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION = "CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION" ++CIB_UPGRADE_SUCCESSFUL = "CIB_UPGRADE_SUCCESSFUL" + CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES = "CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES" + CMAN_BROADCAST_ALL_RINGS = 'CMAN_BROADCAST_ALL_RINGS' + CMAN_UDPU_RESTART_REQUIRED = 'CMAN_UDPU_RESTART_REQUIRED' +diff --git a/pcs/config.py b/pcs/config.py +index 51de822..4659c5b 100644 +--- a/pcs/config.py ++++ b/pcs/config.py +@@ -38,6 +38,7 @@ from pcs import ( + stonith, + usage, + utils, ++ alert, + ) + from pcs.lib.errors import LibraryError + from pcs.lib.commands import quorum as lib_quorum +@@ -123,6 +124,9 @@ def config_show_cib(): + ticket_command.show(lib, [], modificators) + + print() ++ alert.print_alert_config(lib, [], modificators) ++ ++ print() + del utils.pcs_options["--all"] + print("Resources Defaults:") + resource.show_defaults("rsc_defaults", indent=" ") +diff --git a/pcs/lib/cib/alert.py b/pcs/lib/cib/alert.py +new file mode 100644 +index 0000000..6b72996 +--- /dev/null ++++ b/pcs/lib/cib/alert.py +@@ -0,0 +1,281 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from lxml import etree ++ ++from pcs.lib import reports ++from pcs.lib.errors import LibraryError ++from pcs.lib.cib.nvpair import update_nvset, get_nvset ++from pcs.lib.cib.tools import ( ++ check_new_id_applicable, ++ get_sub_element, ++ find_unique_id, ++ get_alerts, ++) ++ ++ ++def update_instance_attributes(tree, element, attribute_dict): ++ """ ++ Updates instance attributes of element. Returns updated instance ++ attributes element. ++ ++ tree -- cib etree node ++ element -- parent element of instance attributes ++ attribute_dict -- dictionary of nvpairs ++ """ ++ return update_nvset("instance_attributes", tree, element, attribute_dict) ++ ++ ++def update_meta_attributes(tree, element, attribute_dict): ++ """ ++ Updates meta attributes of element. Returns updated meta attributes element. ++ ++ tree -- cib etree node ++ element -- parent element of meta attributes ++ attribute_dict -- dictionary of nvpairs ++ """ ++ return update_nvset("meta_attributes", tree, element, attribute_dict) ++ ++ ++def _update_optional_attribute(element, attribute, value): ++ """ ++ Update optional attribute of element. Remove existing element if value ++ is empty. ++ ++ element -- parent element of specified attribute ++ attribute -- attribute to be updated ++ value -- new value ++ """ ++ if value is None: ++ return ++ if value: ++ element.set(attribute, value) ++ elif attribute in element.attrib: ++ del element.attrib[attribute] ++ ++ ++def get_alert_by_id(tree, alert_id): ++ """ ++ Returns alert element with specified id. ++ Raises AlertNotFound if alert with specified id doesn't exist. ++ ++ tree -- cib etree node ++ alert_id -- id of alert ++ """ ++ alert = get_alerts(tree).find("./alert[@id='{0}']".format(alert_id)) ++ if alert is None: ++ raise LibraryError(reports.cib_alert_not_found(alert_id)) ++ return alert ++ ++ ++def get_recipient(alert, recipient_value): ++ """ ++ Returns recipient element with value recipient_value which belong to ++ specified alert. ++ Raises RecipientNotFound if recipient doesn't exist. ++ ++ alert -- parent element of required recipient ++ recipient_value -- value of recipient ++ """ ++ recipient = alert.find( ++ "./recipient[@value='{0}']".format(recipient_value) ++ ) ++ if recipient is None: ++ raise LibraryError(reports.cib_alert_recipient_not_found( ++ alert.get("id"), recipient_value ++ )) ++ return recipient ++ ++ ++def create_alert(tree, alert_id, path, description=""): ++ """ ++ Create new alert element. Returns newly created element. ++ Raises LibraryError if element with specified id already exists. ++ ++ tree -- cib etree node ++ alert_id -- id of new alert, it will be generated if it is None ++ path -- path to script ++ description -- description ++ """ ++ if alert_id: ++ check_new_id_applicable(tree, "alert-id", alert_id) ++ else: ++ alert_id = find_unique_id(tree, "alert") ++ ++ alert = etree.SubElement(get_alerts(tree), "alert", id=alert_id, path=path) ++ if description: ++ alert.set("description", description) ++ ++ return alert ++ ++ ++def update_alert(tree, alert_id, path, description=None): ++ """ ++ Update existing alert. Return updated alert element. ++ Raises AlertNotFound if alert with specified id doesn't exist. ++ ++ tree -- cib etree node ++ alert_id -- id of alert to be updated ++ path -- new value of path, stay unchanged if None ++ description -- new value of description, stay unchanged if None, remove ++ if empty ++ """ ++ alert = get_alert_by_id(tree, alert_id) ++ if path: ++ alert.set("path", path) ++ _update_optional_attribute(alert, "description", description) ++ return alert ++ ++ ++def remove_alert(tree, alert_id): ++ """ ++ Remove alert with specified id. ++ Raises AlertNotFound if alert with specified id doesn't exist. ++ ++ tree -- cib etree node ++ alert_id -- id of alert which should be removed ++ """ ++ alert = get_alert_by_id(tree, alert_id) ++ alert.getparent().remove(alert) ++ ++ ++def add_recipient( ++ tree, ++ alert_id, ++ recipient_value, ++ description="" ++): ++ """ ++ Add recipient to alert with specified id. Returns added recipient element. ++ Raises AlertNotFound if alert with specified id doesn't exist. ++ Raises LibraryError if recipient already exists. ++ ++ tree -- cib etree node ++ alert_id -- id of alert which should be parent of new recipient ++ recipient_value -- value of recipient ++ description -- description of recipient ++ """ ++ alert = get_alert_by_id(tree, alert_id) ++ ++ recipient = alert.find( ++ "./recipient[@value='{0}']".format(recipient_value) ++ ) ++ if recipient is not None: ++ raise LibraryError(reports.cib_alert_recipient_already_exists( ++ alert_id, recipient_value ++ )) ++ ++ recipient = etree.SubElement( ++ alert, ++ "recipient", ++ id=find_unique_id(tree, "{0}-recipient".format(alert_id)), ++ value=recipient_value ++ ) ++ ++ if description: ++ recipient.set("description", description) ++ ++ return recipient ++ ++ ++def update_recipient(tree, alert_id, recipient_value, description): ++ """ ++ Update specified recipient. Returns updated recipient element. ++ Raises AlertNotFound if alert with specified id doesn't exist. ++ Raises RecipientNotFound if recipient doesn't exist. ++ ++ tree -- cib etree node ++ alert_id -- id of alert, parent element of recipient ++ recipient_value -- recipient value ++ description -- description, if empty it will be removed, stay unchanged ++ if None ++ """ ++ recipient = get_recipient( ++ get_alert_by_id(tree, alert_id), recipient_value ++ ) ++ _update_optional_attribute(recipient, "description", description) ++ return recipient ++ ++ ++def remove_recipient(tree, alert_id, recipient_value): ++ """ ++ Remove specified recipient. ++ Raises AlertNotFound if alert with specified id doesn't exist. ++ Raises RecipientNotFound if recipient doesn't exist. ++ ++ tree -- cib etree node ++ alert_id -- id of alert, parent element of recipient ++ recipient_value -- recipient value ++ """ ++ recipient = get_recipient( ++ get_alert_by_id(tree, alert_id), recipient_value ++ ) ++ recipient.getparent().remove(recipient) ++ ++ ++def get_all_recipients(alert): ++ """ ++ Returns list of all recipient of specified alert. Format: ++ [ ++ { ++ "id": <id of recipient>, ++ "value": <value of recipient>, ++ "description": <recipient description>, ++ "instance_attributes": <list of nvpairs>, ++ "meta_attributes": <list of nvpairs> ++ } ++ ] ++ ++ alert -- parent element of recipients to return ++ """ ++ recipient_list = [] ++ for recipient in alert.findall("./recipient"): ++ recipient_list.append({ ++ "id": recipient.get("id"), ++ "value": recipient.get("value"), ++ "description": recipient.get("description", ""), ++ "instance_attributes": get_nvset( ++ get_sub_element(recipient, "instance_attributes") ++ ), ++ "meta_attributes": get_nvset( ++ get_sub_element(recipient, "meta_attributes") ++ ) ++ }) ++ return recipient_list ++ ++ ++def get_all_alerts(tree): ++ """ ++ Returns list of all alerts specified in tree. Format: ++ [ ++ { ++ "id": <id of alert>, ++ "path": <path to script>, ++ "description": <alert description>, ++ "instance_attributes": <list of nvpairs>, ++ "meta_attributes": <list of nvpairs>, ++ "recipients_list": <list of alert's recipients> ++ } ++ ] ++ ++ tree -- cib etree node ++ """ ++ alert_list = [] ++ for alert in get_alerts(tree).findall("./alert"): ++ alert_list.append({ ++ "id": alert.get("id"), ++ "path": alert.get("path"), ++ "description": alert.get("description", ""), ++ "instance_attributes": get_nvset( ++ get_sub_element(alert, "instance_attributes") ++ ), ++ "meta_attributes": get_nvset( ++ get_sub_element(alert, "meta_attributes") ++ ), ++ "recipient_list": get_all_recipients(alert) ++ }) ++ return alert_list +diff --git a/pcs/lib/cib/nvpair.py b/pcs/lib/cib/nvpair.py +new file mode 100644 +index 0000000..d1a0cae +--- /dev/null ++++ b/pcs/lib/cib/nvpair.py +@@ -0,0 +1,90 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from lxml import etree ++ ++from pcs.lib.cib.tools import ( ++ get_sub_element, ++ find_unique_id, ++) ++ ++ ++def update_nvpair(tree, element, name, value): ++ """ ++ Update nvpair, create new if it doesn't yet exist or remove existing ++ nvpair if value is empty. Returns created/updated/removed nvpair element. ++ ++ tree -- cib etree node ++ element -- element in which nvpair should be added/updated/removed ++ name -- name of nvpair ++ value -- value of nvpair ++ """ ++ nvpair = element.find("./nvpair[@name='{0}']".format(name)) ++ if nvpair is None: ++ if not value: ++ return None ++ nvpair_id = find_unique_id( ++ tree, "{0}-{1}".format(element.get("id"), name) ++ ) ++ nvpair = etree.SubElement( ++ element, "nvpair", id=nvpair_id, name=name, value=value ++ ) ++ else: ++ if value: ++ nvpair.set("value", value) ++ else: ++ # remove nvpair if value is empty ++ element.remove(nvpair) ++ return nvpair ++ ++ ++def update_nvset(tag_name, tree, element, attribute_dict): ++ """ ++ This method updates nvset specified by tag_name. If specified nvset ++ doesn't exist it will be created. Returns updated nvset element or None if ++ attribute_dict is empty. ++ ++ tag_name -- tag name of nvset element ++ tree -- cib etree node ++ element -- parent element of nvset ++ attribute_dict -- dictionary of nvpairs ++ """ ++ if not attribute_dict: ++ return None ++ ++ attributes = get_sub_element(element, tag_name, find_unique_id( ++ tree, "{0}-{1}".format(element.get("id"), tag_name) ++ ), 0) ++ ++ for name, value in sorted(attribute_dict.items()): ++ update_nvpair(tree, attributes, name, value) ++ ++ return attributes ++ ++ ++def get_nvset(nvset): ++ """ ++ Returns nvset element as list of nvpairs with format: ++ [ ++ { ++ "id": <id of nvpair>, ++ "name": <name of nvpair>, ++ "value": <value of nvpair> ++ }, ++ ... ++ ] ++ ++ nvset -- nvset element ++ """ ++ nvpair_list = [] ++ for nvpair in nvset.findall("./nvpair"): ++ nvpair_list.append({ ++ "id": nvpair.get("id"), ++ "name": nvpair.get("name"), ++ "value": nvpair.get("value", "") ++ }) ++ return nvpair_list +diff --git a/pcs/lib/cib/test/test_alert.py b/pcs/lib/cib/test/test_alert.py +new file mode 100644 +index 0000000..c387aaf +--- /dev/null ++++ b/pcs/lib/cib/test/test_alert.py +@@ -0,0 +1,931 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from unittest import TestCase ++ ++from lxml import etree ++ ++from pcs.common import report_codes ++from pcs.lib.cib import alert ++from pcs.lib.errors import ReportItemSeverity as severities ++from pcs.test.tools.assertions import( ++ assert_raise_library_error, ++ assert_xml_equal, ++) ++from pcs.test.tools.pcs_mock import mock ++ ++ ++@mock.patch("pcs.lib.cib.alert.update_nvset") ++class UpdateInstanceAttributesTest(TestCase): ++ def test_success(self, mock_update_nvset): ++ ret_val = etree.Element("nvset") ++ tree = etree.Element("tree") ++ element = etree.Element("element") ++ attributes = {"a": 1} ++ mock_update_nvset.return_value = ret_val ++ self.assertEqual( ++ alert.update_instance_attributes(tree, element, attributes), ++ ret_val ++ ) ++ mock_update_nvset.assert_called_once_with( ++ "instance_attributes", tree, element, attributes ++ ) ++ ++ ++@mock.patch("pcs.lib.cib.alert.update_nvset") ++class UpdateMetaAttributesTest(TestCase): ++ def test_success(self, mock_update_nvset): ++ ret_val = etree.Element("nvset") ++ tree = etree.Element("tree") ++ element = etree.Element("element") ++ attributes = {"a": 1} ++ mock_update_nvset.return_value = ret_val ++ self.assertEqual( ++ alert.update_meta_attributes(tree, element, attributes), ++ ret_val ++ ) ++ mock_update_nvset.assert_called_once_with( ++ "meta_attributes", tree, element, attributes ++ ) ++ ++ ++class UpdateOptionalAttributeTest(TestCase): ++ def test_add(self): ++ element = etree.Element("element") ++ alert._update_optional_attribute(element, "attr", "value1") ++ self.assertEqual(element.get("attr"), "value1") ++ ++ def test_update(self): ++ element = etree.Element("element", attr="value") ++ alert._update_optional_attribute(element, "attr", "value1") ++ self.assertEqual(element.get("attr"), "value1") ++ ++ def test_remove(self): ++ element = etree.Element("element", attr="value") ++ alert._update_optional_attribute(element, "attr", "") ++ self.assertTrue(element.get("attr") is None) ++ ++ ++class GetAlertByIdTest(TestCase): ++ def test_found(self): ++ xml = """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert-1"/> ++ <alert id="alert-2"/> ++ </alerts> ++ </configuration> ++ </cib> ++ """ ++ assert_xml_equal( ++ '<alert id="alert-2"/>', ++ etree.tostring( ++ alert.get_alert_by_id(etree.XML(xml), "alert-2") ++ ).decode() ++ ) ++ ++ def test_different_place(self): ++ xml = """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert-1"/> ++ </alerts> ++ <alert id="alert-2"/> ++ </configuration> ++ </cib> ++ """ ++ assert_raise_library_error( ++ lambda: alert.get_alert_by_id(etree.XML(xml), "alert-2"), ++ ( ++ severities.ERROR, ++ report_codes.CIB_ALERT_NOT_FOUND, ++ {"alert": "alert-2"} ++ ) ++ ) ++ ++ def test_not_exist(self): ++ xml = """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert-1"/> ++ </alerts> ++ </configuration> ++ </cib> ++ """ ++ assert_raise_library_error( ++ lambda: alert.get_alert_by_id(etree.XML(xml), "alert-2"), ++ ( ++ severities.ERROR, ++ report_codes.CIB_ALERT_NOT_FOUND, ++ {"alert": "alert-2"} ++ ) ++ ) ++ ++ ++class GetRecipientTest(TestCase): ++ def setUp(self): ++ self.xml = etree.XML( ++ """ ++ <alert id="alert-1"> ++ <recipient id="rec-1" value="value1"/> ++ <recipient id="rec-2" value="value2"/> ++ <not_recipient value="value3"/> ++ <recipients> ++ <recipient id="rec-4" value="value4"/> ++ </recipients> ++ </alert> ++ """ ++ ) ++ ++ def test_exist(self): ++ assert_xml_equal( ++ '<recipient id="rec-2" value="value2"/>', ++ etree.tostring(alert.get_recipient(self.xml, "value2")).decode() ++ ) ++ ++ def test_different_place(self): ++ assert_raise_library_error( ++ lambda: alert.get_recipient(self.xml, "value4"), ++ ( ++ severities.ERROR, ++ report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, ++ { ++ "alert": "alert-1", ++ "recipient": "value4" ++ } ++ ) ++ ) ++ ++ def test_not_recipient(self): ++ assert_raise_library_error( ++ lambda: alert.get_recipient(self.xml, "value3"), ++ ( ++ severities.ERROR, ++ report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, ++ { ++ "alert": "alert-1", ++ "recipient": "value3" ++ } ++ ) ++ ) ++ ++ ++class CreateAlertTest(TestCase): ++ def setUp(self): ++ self.tree = etree.XML( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert"/> ++ </alerts> ++ </configuration> ++ </cib> ++ """ ++ ) ++ ++ def test_no_alerts(self): ++ tree = etree.XML( ++ """ ++ <cib> ++ <configuration/> ++ </cib> ++ """ ++ ) ++ assert_xml_equal( ++ '<alert id="my-alert" path="/test/path"/>', ++ etree.tostring( ++ alert.create_alert(tree, "my-alert", "/test/path") ++ ).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="my-alert" path="/test/path"/> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(tree).decode() ++ ) ++ ++ def test_alerts_exists(self): ++ assert_xml_equal( ++ '<alert id="my-alert" path="/test/path"/>', ++ etree.tostring( ++ alert.create_alert(self.tree, "my-alert", "/test/path") ++ ).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert"/> ++ <alert id="my-alert" path="/test/path"/> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ ++ def test_alerts_exists_with_description(self): ++ assert_xml_equal( ++ '<alert id="my-alert" path="/test/path" description="nothing"/>', ++ etree.tostring(alert.create_alert( ++ self.tree, "my-alert", "/test/path", "nothing" ++ )).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert"/> ++ <alert ++ id="my-alert" ++ path="/test/path" ++ description="nothing" ++ /> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ ++ def test_invalid_id(self): ++ assert_raise_library_error( ++ lambda: alert.create_alert(self.tree, "1alert", "/path"), ++ ( ++ severities.ERROR, ++ report_codes.INVALID_ID, ++ { ++ "id": "1alert", ++ "id_description": "alert-id", ++ "invalid_character": "1", ++ "reason": "invalid first character" ++ } ++ ) ++ ) ++ ++ def test_id_exists(self): ++ assert_raise_library_error( ++ lambda: alert.create_alert(self.tree, "alert", "/path"), ++ ( ++ severities.ERROR, ++ report_codes.ID_ALREADY_EXISTS, ++ {"id": "alert"} ++ ) ++ ) ++ ++ def test_no_id(self): ++ assert_xml_equal( ++ '<alert id="alert-1" path="/test/path"/>', ++ etree.tostring( ++ alert.create_alert(self.tree, None, "/test/path") ++ ).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert"/> ++ <alert id="alert-1" path="/test/path"/> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ ++ ++class UpdateAlertTest(TestCase): ++ def setUp(self): ++ self.tree = etree.XML( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"/> ++ <alert id="alert1" path="/path1" description="nothing"/> ++ </alerts> ++ </configuration> ++ </cib> ++ """ ++ ) ++ ++ def test_update_path(self): ++ assert_xml_equal( ++ '<alert id="alert" path="/test/path"/>', ++ etree.tostring( ++ alert.update_alert(self.tree, "alert", "/test/path") ++ ).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/test/path"/> ++ <alert id="alert1" path="/path1" description="nothing"/> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ ++ def test_remove_path(self): ++ assert_xml_equal( ++ '<alert id="alert" path="/path"/>', ++ etree.tostring(alert.update_alert(self.tree, "alert", "")).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"/> ++ <alert id="alert1" path="/path1" description="nothing"/> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ ++ def test_update_description(self): ++ assert_xml_equal( ++ '<alert id="alert" path="/path" description="desc"/>', ++ etree.tostring( ++ alert.update_alert(self.tree, "alert", None, "desc") ++ ).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path" description="desc"/> ++ <alert id="alert1" path="/path1" description="nothing"/> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ ++ def test_remove_description(self): ++ assert_xml_equal( ++ '<alert id="alert1" path="/path1"/>', ++ etree.tostring( ++ alert.update_alert(self.tree, "alert1", None, "") ++ ).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"/> ++ <alert id="alert1" path="/path1"/> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ ++ def test_id_not_exists(self): ++ assert_raise_library_error( ++ lambda: alert.update_alert(self.tree, "alert0", "/test"), ++ ( ++ severities.ERROR, ++ report_codes.CIB_ALERT_NOT_FOUND, ++ {"alert": "alert0"} ++ ) ++ ) ++ ++ ++class RemoveAlertTest(TestCase): ++ def setUp(self): ++ self.tree = etree.XML( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"/> ++ <alert id="alert-1" path="/next"/> ++ </alerts> ++ </configuration> ++ </cib> ++ """ ++ ) ++ ++ def test_success(self): ++ alert.remove_alert(self.tree, "alert") ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert-1" path="/next"/> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ ++ def test_not_existing_id(self): ++ assert_raise_library_error( ++ lambda: alert.remove_alert(self.tree, "not-existing-id"), ++ ( ++ severities.ERROR, ++ report_codes.CIB_ALERT_NOT_FOUND, ++ {"alert": "not-existing-id"} ++ ) ++ ) ++ ++ ++class AddRecipientTest(TestCase): ++ def setUp(self): ++ self.tree = etree.XML( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"> ++ <recipient id="alert-recipient" value="test_val"/> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """ ++ ) ++ ++ def test_success(self): ++ assert_xml_equal( ++ '<recipient id="alert-recipient-1" value="value1"/>', ++ etree.tostring( ++ alert.add_recipient(self.tree, "alert", "value1") ++ ).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"> ++ <recipient id="alert-recipient" value="test_val"/> ++ <recipient id="alert-recipient-1" value="value1"/> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ ++ def test_recipient_exist(self): ++ assert_raise_library_error( ++ lambda: alert.add_recipient(self.tree, "alert", "test_val"), ++ ( ++ severities.ERROR, ++ report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS, ++ { ++ "recipient": "test_val", ++ "alert": "alert" ++ } ++ ) ++ ) ++ ++ def test_alert_not_exist(self): ++ assert_raise_library_error( ++ lambda: alert.add_recipient(self.tree, "alert1", "test_val"), ++ ( ++ severities.ERROR, ++ report_codes.CIB_ALERT_NOT_FOUND, ++ {"alert": "alert1"} ++ ) ++ ) ++ ++ def test_with_description(self): ++ assert_xml_equal( ++ """ ++ <recipient ++ id="alert-recipient-1" ++ value="value1" ++ description="desc" ++ /> ++ """, ++ etree.tostring(alert.add_recipient( ++ self.tree, "alert", "value1", "desc" ++ )).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"> ++ <recipient id="alert-recipient" value="test_val"/> ++ <recipient ++ id="alert-recipient-1" ++ value="value1" ++ description="desc" ++ /> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ ++ ++class UpdateRecipientTest(TestCase): ++ def setUp(self): ++ self.tree = etree.XML( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"> ++ <recipient id="alert-recipient" value="test_val"/> ++ <recipient ++ id="alert-recipient-1" ++ value="value1" ++ description="desc" ++ /> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """ ++ ) ++ ++ def test_add_description(self): ++ assert_xml_equal( ++ """ ++ <recipient ++ id="alert-recipient" value="test_val" description="description" ++ /> ++ """, ++ etree.tostring(alert.update_recipient( ++ self.tree, "alert", "test_val", "description" ++ )).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"> ++ <recipient ++ id="alert-recipient" ++ value="test_val" ++ description="description" ++ /> ++ <recipient ++ id="alert-recipient-1" ++ value="value1" ++ description="desc" ++ /> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ ++ def test_update_description(self): ++ assert_xml_equal( ++ """ ++ <recipient ++ id="alert-recipient-1" value="value1" description="description" ++ /> ++ """, ++ etree.tostring(alert.update_recipient( ++ self.tree, "alert", "value1", "description" ++ )).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"> ++ <recipient id="alert-recipient" value="test_val"/> ++ <recipient ++ id="alert-recipient-1" ++ value="value1" ++ description="description" ++ /> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ ++ def test_remove_description(self): ++ assert_xml_equal( ++ """ ++ <recipient id="alert-recipient-1" value="value1"/> ++ """, ++ etree.tostring( ++ alert.update_recipient(self.tree, "alert", "value1", "") ++ ).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"> ++ <recipient id="alert-recipient" value="test_val"/> ++ <recipient id="alert-recipient-1" value="value1"/> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ ++ def test_alert_not_exists(self): ++ assert_raise_library_error( ++ lambda: alert.update_recipient(self.tree, "alert1", "test_val", ""), ++ ( ++ severities.ERROR, ++ report_codes.CIB_ALERT_NOT_FOUND, ++ {"alert": "alert1"} ++ ) ++ ) ++ ++ def test_recipient_not_exists(self): ++ assert_raise_library_error( ++ lambda: alert.update_recipient(self.tree, "alert", "unknown", ""), ++ ( ++ severities.ERROR, ++ report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, ++ { ++ "alert": "alert", ++ "recipient": "unknown" ++ } ++ ) ++ ) ++ ++ ++class RemoveRecipientTest(TestCase): ++ def setUp(self): ++ self.tree = etree.XML( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"> ++ <recipient id="alert-recipient" value="test_val"/> ++ <recipient id="alert-recipient-2" value="val"/> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """ ++ ) ++ ++ def test_success(self): ++ alert.remove_recipient(self.tree, "alert", "val") ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"> ++ <recipient id="alert-recipient" value="test_val"/> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ ++ def test_alert_not_exists(self): ++ assert_raise_library_error( ++ lambda: alert.remove_recipient(self.tree, "alert1", "test_val"), ++ ( ++ severities.ERROR, ++ report_codes.CIB_ALERT_NOT_FOUND, ++ {"alert": "alert1"} ++ ) ++ ) ++ ++ def test_recipient_not_exists(self): ++ assert_raise_library_error( ++ lambda: alert.remove_recipient(self.tree, "alert", "unknown"), ++ ( ++ severities.ERROR, ++ report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, ++ { ++ "alert": "alert", ++ "recipient": "unknown" ++ } ++ ) ++ ) ++ ++ ++class GetAllRecipientsTest(TestCase): ++ def test_success(self): ++ alert_obj = etree.XML( ++ """ ++ <alert id="alert" path="/path"> ++ <recipient id="alert-recipient" value="test_val"> ++ <instance_attributes> ++ <nvpair ++ id="nvset-name1-value1" name="name1" value="value1" ++ /> ++ <nvpair ++ id="nvset-name2-value2" name="name2" value="value2" ++ /> ++ </instance_attributes> ++ <meta_attributes> ++ <nvpair id="nvset-name3" name="name3"/> ++ </meta_attributes> ++ </recipient> ++ <recipient ++ id="alert-recipient-1" value="value1" description="desc" ++ /> ++ </alert> ++ """ ++ ) ++ self.assertEqual( ++ [ ++ { ++ "id": "alert-recipient", ++ "value": "test_val", ++ "description": "", ++ "instance_attributes": [ ++ { ++ "id": "nvset-name1-value1", ++ "name": "name1", ++ "value": "value1" ++ }, ++ { ++ "id": "nvset-name2-value2", ++ "name": "name2", ++ "value": "value2" ++ } ++ ], ++ "meta_attributes": [ ++ { ++ "id": "nvset-name3", ++ "name": "name3", ++ "value": "" ++ } ++ ] ++ }, ++ { ++ "id": "alert-recipient-1", ++ "value": "value1", ++ "description": "desc", ++ "instance_attributes": [], ++ "meta_attributes": [] ++ } ++ ], ++ alert.get_all_recipients(alert_obj) ++ ) ++ ++ ++class GetAllAlertsTest(TestCase): ++ def test_success(self): ++ alerts = etree.XML( ++ """ ++<cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"> ++ <recipient id="alert-recipient" value="test_val"> ++ <instance_attributes> ++ <nvpair ++ id="instance_attributes-name1-value1" ++ name="name1" ++ value="value1" ++ /> ++ <nvpair ++ id="instance_attributes-name2-value2" ++ name="name2" ++ value="value2" ++ /> ++ </instance_attributes> ++ <meta_attributes> ++ <nvpair id="meta_attributes-name3" name="name3"/> ++ </meta_attributes> ++ </recipient> ++ <recipient ++ id="alert-recipient-1" value="value1" description="desc" ++ /> ++ </alert> ++ <alert id="alert1" path="/test/path" description="desc"> ++ <instance_attributes> ++ <nvpair ++ id="alert1-name1-value1" name="name1" value="value1" ++ /> ++ <nvpair ++ id="alert1-name2-value2" name="name2" value="value2" ++ /> ++ </instance_attributes> ++ <meta_attributes> ++ <nvpair id="alert1-name3" name="name3"/> ++ </meta_attributes> ++ </alert> ++ </alerts> ++ </configuration> ++</cib> ++ """ ++ ) ++ self.assertEqual( ++ [ ++ { ++ "id": "alert", ++ "path": "/path", ++ "description": "", ++ "instance_attributes": [], ++ "meta_attributes": [], ++ "recipient_list": [ ++ { ++ "id": "alert-recipient", ++ "value": "test_val", ++ "description": "", ++ "instance_attributes": [ ++ { ++ "id": "instance_attributes-name1-value1", ++ "name": "name1", ++ "value": "value1" ++ }, ++ { ++ "id": "instance_attributes-name2-value2", ++ "name": "name2", ++ "value": "value2" ++ } ++ ], ++ "meta_attributes": [ ++ { ++ "id": "meta_attributes-name3", ++ "name": "name3", ++ "value": "" ++ } ++ ] ++ }, ++ { ++ "id": "alert-recipient-1", ++ "value": "value1", ++ "description": "desc", ++ "instance_attributes": [], ++ "meta_attributes": [] ++ } ++ ] ++ }, ++ { ++ "id": "alert1", ++ "path": "/test/path", ++ "description": "desc", ++ "instance_attributes": [ ++ { ++ "id": "alert1-name1-value1", ++ "name": "name1", ++ "value": "value1" ++ }, ++ { ++ "id": "alert1-name2-value2", ++ "name": "name2", ++ "value": "value2" ++ } ++ ], ++ "meta_attributes": [ ++ { ++ "id": "alert1-name3", ++ "name": "name3", ++ "value": "" ++ } ++ ], ++ "recipient_list": [] ++ } ++ ], ++ alert.get_all_alerts(alerts) ++ ) +diff --git a/pcs/lib/cib/test/test_nvpair.py b/pcs/lib/cib/test/test_nvpair.py +new file mode 100644 +index 0000000..6907f25 +--- /dev/null ++++ b/pcs/lib/cib/test/test_nvpair.py +@@ -0,0 +1,206 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from unittest import TestCase ++ ++from lxml import etree ++ ++from pcs.lib.cib import nvpair ++from pcs.test.tools.assertions import assert_xml_equal ++ ++ ++class UpdateNvpairTest(TestCase): ++ def setUp(self): ++ self.nvset = etree.Element("nvset", id="nvset") ++ etree.SubElement( ++ self.nvset, "nvpair", id="nvset-attr", name="attr", value="1" ++ ) ++ etree.SubElement( ++ self.nvset, "nvpair", id="nvset-attr2", name="attr2", value="2" ++ ) ++ etree.SubElement( ++ self.nvset, "notnvpair", id="nvset-test", name="test", value="0" ++ ) ++ ++ def test_update(self): ++ assert_xml_equal( ++ "<nvpair id='nvset-attr' name='attr' value='10'/>", ++ etree.tostring( ++ nvpair.update_nvpair(self.nvset, self.nvset, "attr", "10") ++ ).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <nvset id="nvset"> ++ <nvpair id="nvset-attr" name="attr" value="10"/> ++ <nvpair id="nvset-attr2" name="attr2" value="2"/> ++ <notnvpair id="nvset-test" name="test" value="0"/> ++ </nvset> ++ """, ++ etree.tostring(self.nvset).decode() ++ ) ++ ++ def test_add(self): ++ assert_xml_equal( ++ "<nvpair id='nvset-test-1' name='test' value='0'/>", ++ etree.tostring( ++ nvpair.update_nvpair(self.nvset, self.nvset, "test", "0") ++ ).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <nvset id="nvset"> ++ <nvpair id="nvset-attr" name="attr" value="1"/> ++ <nvpair id="nvset-attr2" name="attr2" value="2"/> ++ <notnvpair id="nvset-test" name="test" value="0"/> ++ <nvpair id="nvset-test-1" name="test" value="0"/> ++ </nvset> ++ """, ++ etree.tostring(self.nvset).decode() ++ ) ++ ++ def test_remove(self): ++ assert_xml_equal( ++ "<nvpair id='nvset-attr2' name='attr2' value='2'/>", ++ etree.tostring( ++ nvpair.update_nvpair(self.nvset, self.nvset, "attr2", "") ++ ).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <nvset id="nvset"> ++ <nvpair id="nvset-attr" name="attr" value="1"/> ++ <notnvpair id="nvset-test" name="test" value="0"/> ++ </nvset> ++ """, ++ etree.tostring(self.nvset).decode() ++ ) ++ ++ def test_remove_not_existing(self): ++ self.assertTrue( ++ nvpair.update_nvpair(self.nvset, self.nvset, "attr3", "") is None ++ ) ++ assert_xml_equal( ++ """ ++ <nvset id="nvset"> ++ <nvpair id="nvset-attr" name="attr" value="1"/> ++ <nvpair id="nvset-attr2" name="attr2" value="2"/> ++ <notnvpair id="nvset-test" name="test" value="0"/> ++ </nvset> ++ """, ++ etree.tostring(self.nvset).decode() ++ ) ++ ++ ++class UpdateNvsetTest(TestCase): ++ def setUp(self): ++ self.root = etree.Element("root", id="root") ++ self.nvset = etree.SubElement(self.root, "nvset", id="nvset") ++ etree.SubElement( ++ self.nvset, "nvpair", id="nvset-attr", name="attr", value="1" ++ ) ++ etree.SubElement( ++ self.nvset, "nvpair", id="nvset-attr2", name="attr2", value="2" ++ ) ++ etree.SubElement( ++ self.nvset, "notnvpair", id="nvset-test", name="test", value="0" ++ ) ++ ++ def test_None(self): ++ self.assertTrue( ++ nvpair.update_nvset("nvset", self.root, self.root, None) is None ++ ) ++ ++ def test_empty(self): ++ self.assertTrue( ++ nvpair.update_nvset("nvset", self.root, self.root, {}) is None ++ ) ++ ++ def test_existing(self): ++ self.assertEqual( ++ self.nvset, ++ nvpair.update_nvset("nvset", self.root, self.root, { ++ "attr": "10", ++ "new_one": "20", ++ "test": "0", ++ "attr2": "" ++ }) ++ ) ++ assert_xml_equal( ++ """ ++ <nvset id="nvset"> ++ <nvpair id="nvset-attr" name="attr" value="10"/> ++ <notnvpair id="nvset-test" name="test" value="0"/> ++ <nvpair id="nvset-new_one" name="new_one" value="20"/> ++ <nvpair id="nvset-test-1" name="test" value="0"/> ++ </nvset> ++ """, ++ etree.tostring(self.nvset).decode() ++ ) ++ ++ def test_new(self): ++ root = etree.Element("root", id="root") ++ assert_xml_equal( ++ """ ++ <nvset id="root-nvset"> ++ <nvpair id="root-nvset-attr" name="attr" value="10"/> ++ <nvpair id="root-nvset-new_one" name="new_one" value="20"/> ++ <nvpair id="root-nvset-test" name="test" value="0"/> ++ </nvset> ++ """, ++ etree.tostring(nvpair.update_nvset("nvset", root, root, { ++ "attr": "10", ++ "new_one": "20", ++ "test": "0", ++ "attr2": "" ++ })).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <root id="root"> ++ <nvset id="root-nvset"> ++ <nvpair id="root-nvset-attr" name="attr" value="10"/> ++ <nvpair id="root-nvset-new_one" name="new_one" value="20"/> ++ <nvpair id="root-nvset-test" name="test" value="0"/> ++ </nvset> ++ </root> ++ """, ++ etree.tostring(root).decode() ++ ) ++ ++ ++class GetNvsetTest(TestCase): ++ def test_success(self): ++ nvset = etree.XML( ++ """ ++ <nvset> ++ <nvpair id="nvset-name1" name="name1" value="value1"/> ++ <nvpair id="nvset-name2" name="name2" value="value2"/> ++ <nvpair id="nvset-name3" name="name3"/> ++ </nvset> ++ """ ++ ) ++ self.assertEqual( ++ [ ++ { ++ "id": "nvset-name1", ++ "name": "name1", ++ "value": "value1" ++ }, ++ { ++ "id": "nvset-name2", ++ "name": "name2", ++ "value": "value2" ++ }, ++ { ++ "id": "nvset-name3", ++ "name": "name3", ++ "value": "" ++ } ++ ], ++ nvpair.get_nvset(nvset) ++ ) +diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py +index dfe31fc..b59d50d 100644 +--- a/pcs/lib/cib/tools.py ++++ b/pcs/lib/cib/tools.py +@@ -5,8 +5,12 @@ from __future__ import ( + unicode_literals, + ) + ++import os ++import re ++import tempfile + from lxml import etree + ++from pcs import settings + from pcs.lib import reports + from pcs.lib.errors import LibraryError + from pcs.lib.pacemaker_values import validate_id +@@ -71,6 +75,15 @@ def get_acls(tree): + acls = etree.SubElement(get_configuration(tree), "acls") + return acls + ++ ++def get_alerts(tree): ++ """ ++ Return 'alerts' element from tree, create a new one if missing ++ tree -- cib etree node ++ """ ++ return get_sub_element(get_configuration(tree), "alerts") ++ ++ + def get_constraints(tree): + """ + Return 'constraint' element from tree +@@ -87,3 +100,117 @@ def find_parent(element, tag_names): + + def export_attributes(element): + return dict((key, value) for key, value in element.attrib.items()) ++ ++ ++def get_sub_element(element, sub_element_tag, new_id=None, new_index=None): ++ """ ++ Returns sub-element sub_element_tag of element. It will create new ++ element if such doesn't exist yet. Id of new element will be new_if if ++ it's not None. new_index specify where will be new element added, if None ++ it will be appended. ++ ++ element -- parent element ++ sub_element_tag -- tag of wanted element ++ new_id -- id of new element ++ new_index -- index for new element ++ """ ++ sub_element = element.find("./{0}".format(sub_element_tag)) ++ if sub_element is None: ++ sub_element = etree.Element(sub_element_tag) ++ if new_id: ++ sub_element.set("id", new_id) ++ if new_index is None: ++ element.append(sub_element) ++ else: ++ element.insert(new_index, sub_element) ++ return sub_element ++ ++ ++def get_pacemaker_version_by_which_cib_was_validated(cib): ++ """ ++ Return version of pacemaker which validated specified cib as tree. ++ Version is returned as tuple of integers: (<major>, <minor>, <revision>). ++ Raises LibraryError on any failure. ++ ++ cib -- cib etree ++ """ ++ version = cib.get("validate-with") ++ if version is None: ++ raise LibraryError(reports.cib_load_error_invalid_format()) ++ ++ regexp = re.compile( ++ r"pacemaker-(?P<major>\d+)\.(?P<minor>\d+)(\.(?P<rev>\d+))?" ++ ) ++ match = regexp.match(version) ++ if not match: ++ raise LibraryError(reports.cib_load_error_invalid_format()) ++ return ( ++ int(match.group("major")), ++ int(match.group("minor")), ++ int(match.group("rev") or 0) ++ ) ++ ++ ++def upgrade_cib(cib, runner): ++ """ ++ Upgrade CIB to the latest schema of installed pacemaker. Returns upgraded ++ CIB as string. ++ Raises LibraryError on any failure. ++ ++ cib -- cib etree ++ runner -- CommandRunner ++ """ ++ temp_file = tempfile.NamedTemporaryFile("w+", suffix=".pcs") ++ temp_file.write(etree.tostring(cib).decode()) ++ temp_file.flush() ++ output, retval = runner.run( ++ [ ++ os.path.join(settings.pacemaker_binaries, "cibadmin"), ++ "--upgrade", ++ "--force" ++ ], ++ env_extend={"CIB_file": temp_file.name} ++ ) ++ ++ if retval != 0: ++ temp_file.close() ++ LibraryError(reports.cib_upgrade_failed(output)) ++ ++ try: ++ temp_file.seek(0) ++ return etree.fromstring(temp_file.read()) ++ except (EnvironmentError, etree.XMLSyntaxError, etree.DocumentInvalid) as e: ++ LibraryError(reports.cib_upgrade_failed(str(e))) ++ finally: ++ temp_file.close() ++ ++ ++def ensure_cib_version(runner, cib, version): ++ """ ++ This method ensures that specified cib is verified by pacemaker with ++ version 'version' or newer. If cib doesn't correspond to this version, ++ method will try to upgrade cib. ++ Returns cib which was verified by pacemaker version 'version' or later. ++ Raises LibraryError on any failure. ++ ++ runner -- CommandRunner ++ cib -- cib tree ++ version -- tuple of integers (<major>, <minor>, <revision>) ++ """ ++ current_version = get_pacemaker_version_by_which_cib_was_validated( ++ cib ++ ) ++ if current_version >= version: ++ return None ++ ++ upgraded_cib = upgrade_cib(cib, runner) ++ current_version = get_pacemaker_version_by_which_cib_was_validated( ++ upgraded_cib ++ ) ++ ++ if current_version >= version: ++ return upgraded_cib ++ ++ raise LibraryError(reports.unable_to_upgrade_cib_to_required_version( ++ current_version, version ++ )) +diff --git a/pcs/lib/commands/alert.py b/pcs/lib/commands/alert.py +new file mode 100644 +index 0000000..7371fbc +--- /dev/null ++++ b/pcs/lib/commands/alert.py +@@ -0,0 +1,169 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++from pcs.lib import reports ++from pcs.lib.cib import alert ++from pcs.lib.errors import LibraryError ++ ++ ++REQUIRED_CIB_VERSION = (2, 5, 0) ++ ++ ++def create_alert( ++ lib_env, ++ alert_id, ++ path, ++ instance_attribute_dict, ++ meta_attribute_dict, ++ description=None ++): ++ """ ++ Create new alert. ++ Raises LibraryError if path is not specified, or any other failure. ++ ++ lib_env -- LibraryEnvironment ++ alert_id -- id of alert to be created, if None it will be generated ++ path -- path to script for alert ++ instance_attribute_dict -- dictionary of instance attributes ++ meta_attribute_dict -- dictionary of meta attributes ++ description -- alert description description ++ """ ++ if not path: ++ raise LibraryError(reports.required_option_is_missing("path")) ++ ++ cib = lib_env.get_cib(REQUIRED_CIB_VERSION) ++ ++ alert_el = alert.create_alert(cib, alert_id, path, description) ++ alert.update_instance_attributes(cib, alert_el, instance_attribute_dict) ++ alert.update_meta_attributes(cib, alert_el, meta_attribute_dict) ++ ++ lib_env.push_cib(cib) ++ ++ ++def update_alert( ++ lib_env, ++ alert_id, ++ path, ++ instance_attribute_dict, ++ meta_attribute_dict, ++ description=None ++): ++ """ ++ Update existing alert with specified id. ++ ++ lib_env -- LibraryEnvironment ++ alert_id -- id of alert to be updated ++ path -- new path, if None old value will stay unchanged ++ instance_attribute_dict -- dictionary of instance attributes to update ++ meta_attribute_dict -- dictionary of meta attributes to update ++ description -- new description, if empty string, old description will be ++ deleted, if None old value will stay unchanged ++ """ ++ cib = lib_env.get_cib(REQUIRED_CIB_VERSION) ++ ++ alert_el = alert.update_alert(cib, alert_id, path, description) ++ alert.update_instance_attributes(cib, alert_el, instance_attribute_dict) ++ alert.update_meta_attributes(cib, alert_el, meta_attribute_dict) ++ ++ lib_env.push_cib(cib) ++ ++ ++def remove_alert(lib_env, alert_id): ++ """ ++ Remove alert with specified id. ++ ++ lib_env -- LibraryEnvironment ++ alert_id -- id of alert which should be removed ++ """ ++ cib = lib_env.get_cib(REQUIRED_CIB_VERSION) ++ alert.remove_alert(cib, alert_id) ++ lib_env.push_cib(cib) ++ ++ ++def add_recipient( ++ lib_env, ++ alert_id, ++ recipient_value, ++ instance_attribute_dict, ++ meta_attribute_dict, ++ description=None ++): ++ """ ++ Add new recipient to alert witch id alert_id. ++ ++ lib_env -- LibraryEnvironment ++ alert_id -- id of alert to which new recipient should be added ++ recipient_value -- value of new recipient ++ instance_attribute_dict -- dictionary of instance attributes to update ++ meta_attribute_dict -- dictionary of meta attributes to update ++ description -- recipient description ++ """ ++ if not recipient_value: ++ raise LibraryError( ++ reports.required_option_is_missing("value") ++ ) ++ ++ cib = lib_env.get_cib(REQUIRED_CIB_VERSION) ++ recipient = alert.add_recipient( ++ cib, alert_id, recipient_value, description ++ ) ++ alert.update_instance_attributes(cib, recipient, instance_attribute_dict) ++ alert.update_meta_attributes(cib, recipient, meta_attribute_dict) ++ ++ lib_env.push_cib(cib) ++ ++ ++def update_recipient( ++ lib_env, ++ alert_id, ++ recipient_value, ++ instance_attribute_dict, ++ meta_attribute_dict, ++ description=None ++): ++ """ ++ Update existing recipient. ++ ++ lib_env -- LibraryEnvironment ++ alert_id -- id of alert to which recipient belong ++ recipient_value -- recipient to be updated ++ instance_attribute_dict -- dictionary of instance attributes to update ++ meta_attribute_dict -- dictionary of meta attributes to update ++ description -- new description, if empty string, old description will be ++ deleted, if None old value will stay unchanged ++ """ ++ cib = lib_env.get_cib(REQUIRED_CIB_VERSION) ++ recipient = alert.update_recipient( ++ cib, alert_id, recipient_value, description ++ ) ++ alert.update_instance_attributes(cib, recipient, instance_attribute_dict) ++ alert.update_meta_attributes(cib, recipient, meta_attribute_dict) ++ ++ lib_env.push_cib(cib) ++ ++ ++def remove_recipient(lib_env, alert_id, recipient_value): ++ """ ++ Remove existing recipient. ++ ++ lib_env -- LibraryEnvironment ++ alert_id -- id of alert to which recipient belong ++ recipient_value -- recipient to be removed ++ """ ++ cib = lib_env.get_cib(REQUIRED_CIB_VERSION) ++ alert.remove_recipient(cib, alert_id, recipient_value) ++ lib_env.push_cib(cib) ++ ++ ++def get_all_alerts(lib_env): ++ """ ++ Returns list of all alerts. See docs of pcs.lib.cib.alert.get_all_alerts for ++ description of data format. ++ ++ lib_env -- LibraryEnvironment ++ """ ++ return alert.get_all_alerts(lib_env.get_cib()) +diff --git a/pcs/lib/commands/test/test_alert.py b/pcs/lib/commands/test/test_alert.py +new file mode 100644 +index 0000000..34813df +--- /dev/null ++++ b/pcs/lib/commands/test/test_alert.py +@@ -0,0 +1,639 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import logging ++from lxml import etree ++ ++from unittest import TestCase ++ ++from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.assertions import ( ++ assert_raise_library_error, ++ assert_xml_equal, ++) ++from pcs.test.tools.custom_mock import MockLibraryReportProcessor ++ ++from pcs.common import report_codes ++from pcs.lib.errors import ReportItemSeverity as Severities ++from pcs.lib.env import LibraryEnvironment ++from pcs.lib.external import CommandRunner ++ ++import pcs.lib.commands.alert as cmd_alert ++ ++ ++@mock.patch("pcs.lib.cib.tools.upgrade_cib") ++class CreateAlertTest(TestCase): ++ def setUp(self): ++ self.mock_log = mock.MagicMock(spec_set=logging.Logger) ++ self.mock_run = mock.MagicMock(spec_set=CommandRunner) ++ self.mock_rep = MockLibraryReportProcessor() ++ self.mock_env = LibraryEnvironment( ++ self.mock_log, self.mock_rep, cib_data="<cib/>" ++ ) ++ ++ def test_no_path(self, mock_upgrade_cib): ++ assert_raise_library_error( ++ lambda: cmd_alert.create_alert( ++ self.mock_env, None, None, None, None ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.REQUIRED_OPTION_IS_MISSING, ++ {"option_name": "path"} ++ ) ++ ) ++ self.assertEqual(0, mock_upgrade_cib.call_count) ++ ++ def test_upgrade_needed(self, mock_upgrade_cib): ++ self.mock_env._push_cib_xml( ++ """ ++ <cib validate-with="pacemaker-2.4.1"> ++ <configuration> ++ </configuration> ++ </cib> ++ """ ++ ) ++ mock_upgrade_cib.return_value = etree.XML( ++ """ ++ <cib validate-with="pacemaker-2.5.0"> ++ <configuration> ++ </configuration> ++ </cib> ++ """ ++ ) ++ cmd_alert.create_alert( ++ self.mock_env, ++ "my-alert", ++ "/my/path", ++ { ++ "instance": "value", ++ "another": "val" ++ }, ++ {"meta1": "val1"}, ++ "my description" ++ ) ++ assert_xml_equal( ++ """ ++<cib validate-with="pacemaker-2.5.0"> ++ <configuration> ++ <alerts> ++ <alert id="my-alert" path="/my/path" description="my description"> ++ <meta_attributes id="my-alert-meta_attributes"> ++ <nvpair ++ id="my-alert-meta_attributes-meta1" ++ name="meta1" ++ value="val1" ++ /> ++ </meta_attributes> ++ <instance_attributes id="my-alert-instance_attributes"> ++ <nvpair ++ id="my-alert-instance_attributes-another" ++ name="another" ++ value="val" ++ /> ++ <nvpair ++ id="my-alert-instance_attributes-instance" ++ name="instance" ++ value="value" ++ /> ++ </instance_attributes> ++ </alert> ++ </alerts> ++ </configuration> ++</cib> ++ """, ++ self.mock_env._get_cib_xml() ++ ) ++ self.assertEqual(1, mock_upgrade_cib.call_count) ++ ++ ++class UpdateAlertTest(TestCase): ++ def setUp(self): ++ self.mock_log = mock.MagicMock(spec_set=logging.Logger) ++ self.mock_run = mock.MagicMock(spec_set=CommandRunner) ++ self.mock_rep = MockLibraryReportProcessor() ++ self.mock_env = LibraryEnvironment( ++ self.mock_log, self.mock_rep, cib_data="<cib/>" ++ ) ++ ++ def test_update_all(self): ++ self.mock_env._push_cib_xml( ++ """ ++<cib validate-with="pacemaker-2.5"> ++ <configuration> ++ <alerts> ++ <alert id="my-alert" path="/my/path" description="my description"> ++ <instance_attributes id="my-alert-instance_attributes"> ++ <nvpair ++ id="my-alert-instance_attributes-instance" ++ name="instance" ++ value="value" ++ /> ++ <nvpair ++ id="my-alert-instance_attributes-another" ++ name="another" ++ value="val" ++ /> ++ </instance_attributes> ++ <meta_attributes id="my-alert-meta_attributes"> ++ <nvpair ++ id="my-alert-meta_attributes-meta1" ++ name="meta1" ++ value="val1" ++ /> ++ </meta_attributes> ++ </alert> ++ </alerts> ++ </configuration> ++</cib> ++ """ ++ ) ++ cmd_alert.update_alert( ++ self.mock_env, ++ "my-alert", ++ "/another/one", ++ { ++ "instance": "", ++ "my-attr": "its_val" ++ }, ++ {"meta1": "val2"}, ++ "" ++ ) ++ assert_xml_equal( ++ """ ++<cib validate-with="pacemaker-2.5"> ++ <configuration> ++ <alerts> ++ <alert id="my-alert" path="/another/one"> ++ <instance_attributes id="my-alert-instance_attributes"> ++ <nvpair ++ id="my-alert-instance_attributes-another" ++ name="another" ++ value="val" ++ /> ++ <nvpair ++ id="my-alert-instance_attributes-my-attr" ++ name="my-attr" ++ value="its_val" ++ /> ++ </instance_attributes> ++ <meta_attributes id="my-alert-meta_attributes"> ++ <nvpair ++ id="my-alert-meta_attributes-meta1" ++ name="meta1" ++ value="val2" ++ /> ++ </meta_attributes> ++ </alert> ++ </alerts> ++ </configuration> ++</cib> ++ """, ++ self.mock_env._get_cib_xml() ++ ) ++ ++ def test_update_instance_attribute(self): ++ self.mock_env._push_cib_xml( ++ """ ++<cib validate-with="pacemaker-2.5"> ++ <configuration> ++ <alerts> ++ <alert id="my-alert" path="/my/path" description="my description"> ++ <instance_attributes id="my-alert-instance_attributes"> ++ <nvpair ++ id="my-alert-instance_attributes-instance" ++ name="instance" ++ value="value" ++ /> ++ </instance_attributes> ++ </alert> ++ </alerts> ++ </configuration> ++</cib> ++ """ ++ ) ++ cmd_alert.update_alert( ++ self.mock_env, ++ "my-alert", ++ None, ++ {"instance": "new_val"}, ++ {}, ++ None ++ ) ++ assert_xml_equal( ++ """ ++<cib validate-with="pacemaker-2.5"> ++ <configuration> ++ <alerts> ++ <alert id="my-alert" path="/my/path" description="my description"> ++ <instance_attributes id="my-alert-instance_attributes"> ++ <nvpair ++ id="my-alert-instance_attributes-instance" ++ name="instance" ++ value="new_val" ++ /> ++ </instance_attributes> ++ </alert> ++ </alerts> ++ </configuration> ++</cib> ++ """, ++ self.mock_env._get_cib_xml() ++ ) ++ ++ def test_alert_doesnt_exist(self): ++ self.mock_env._push_cib_xml( ++ """ ++ <cib validate-with="pacemaker-2.5"> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="path"/> ++ </alerts> ++ </configuration> ++ </cib> ++ """ ++ ) ++ assert_raise_library_error( ++ lambda: cmd_alert.update_alert( ++ self.mock_env, "unknown", "test", {}, {}, None ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.CIB_ALERT_NOT_FOUND, ++ {"alert": "unknown"} ++ ) ++ ) ++ ++ ++class RemoveAlertTest(TestCase): ++ def setUp(self): ++ self.mock_log = mock.MagicMock(spec_set=logging.Logger) ++ self.mock_run = mock.MagicMock(spec_set=CommandRunner) ++ self.mock_rep = MockLibraryReportProcessor() ++ cib = """ ++ <cib validate-with="pacemaker-2.5"> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="path"/> ++ <alert id="alert-1" path="/path"/> ++ </alerts> ++ </configuration> ++ </cib> ++ """ ++ self.mock_env = LibraryEnvironment( ++ self.mock_log, self.mock_rep, cib_data=cib ++ ) ++ ++ def test_success(self): ++ cmd_alert.remove_alert(self.mock_env, "alert") ++ assert_xml_equal( ++ """ ++ <cib validate-with="pacemaker-2.5"> ++ <configuration> ++ <alerts> ++ <alert id="alert-1" path="/path"/> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ self.mock_env._get_cib_xml() ++ ) ++ ++ def test_not_existing_alert(self): ++ assert_raise_library_error( ++ lambda: cmd_alert.remove_alert(self.mock_env, "unknown"), ++ ( ++ Severities.ERROR, ++ report_codes.CIB_ALERT_NOT_FOUND, ++ {"alert": "unknown"} ++ ) ++ ) ++ ++ ++class AddRecipientTest(TestCase): ++ def setUp(self): ++ self.mock_log = mock.MagicMock(spec_set=logging.Logger) ++ self.mock_run = mock.MagicMock(spec_set=CommandRunner) ++ self.mock_rep = MockLibraryReportProcessor() ++ cib = """ ++ <cib validate-with="pacemaker-2.5"> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="path"> ++ <recipient id="alert-recipient" value="value1"/> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """ ++ self.mock_env = LibraryEnvironment( ++ self.mock_log, self.mock_rep, cib_data=cib ++ ) ++ ++ def test_alert_not_found(self): ++ assert_raise_library_error( ++ lambda: cmd_alert.add_recipient( ++ self.mock_env, "unknown", "recipient", {}, {} ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.CIB_ALERT_NOT_FOUND, ++ {"alert": "unknown"} ++ ) ++ ) ++ ++ def test_value_not_defined(self): ++ assert_raise_library_error( ++ lambda: cmd_alert.add_recipient( ++ self.mock_env, "unknown", "", {}, {} ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.REQUIRED_OPTION_IS_MISSING, ++ {"option_name": "value"} ++ ) ++ ) ++ ++ def test_recipient_already_exists(self): ++ assert_raise_library_error( ++ lambda: cmd_alert.add_recipient( ++ self.mock_env, "alert", "value1", {}, {} ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS, ++ { ++ "recipient": "value1", ++ "alert": "alert" ++ } ++ ) ++ ) ++ ++ def test_success(self): ++ cmd_alert.add_recipient( ++ self.mock_env, ++ "alert", ++ "value", ++ {"attr1": "val1"}, ++ { ++ "attr2": "val2", ++ "attr1": "val1" ++ } ++ ) ++ assert_xml_equal( ++ """ ++<cib validate-with="pacemaker-2.5"> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="path"> ++ <recipient id="alert-recipient" value="value1"/> ++ <recipient id="alert-recipient-1" value="value"> ++ <meta_attributes ++ id="alert-recipient-1-meta_attributes" ++ > ++ <nvpair ++ id="alert-recipient-1-meta_attributes-attr1" ++ name="attr1" ++ value="val1" ++ /> ++ <nvpair ++ id="alert-recipient-1-meta_attributes-attr2" ++ name="attr2" ++ value="val2" ++ /> ++ </meta_attributes> ++ <instance_attributes ++ id="alert-recipient-1-instance_attributes" ++ > ++ <nvpair ++ id="alert-recipient-1-instance_attributes-attr1" ++ name="attr1" ++ value="val1" ++ /> ++ </instance_attributes> ++ </recipient> ++ </alert> ++ </alerts> ++ </configuration> ++</cib> ++ """, ++ self.mock_env._get_cib_xml() ++ ) ++ ++ ++class UpdateRecipientTest(TestCase): ++ def setUp(self): ++ self.mock_log = mock.MagicMock(spec_set=logging.Logger) ++ self.mock_run = mock.MagicMock(spec_set=CommandRunner) ++ self.mock_rep = MockLibraryReportProcessor() ++ cib = """ ++<cib validate-with="pacemaker-2.5"> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="path"> ++ <recipient id="alert-recipient" value="value1"/> ++ <recipient id="alert-recipient-1" value="value" description="d"> ++ <meta_attributes ++ id="alert-recipient-1-meta_attributes" ++ > ++ <nvpair ++ id="alert-recipient-1-meta_attributes-attr1" ++ name="attr1" ++ value="val1" ++ /> ++ <nvpair ++ id="alert-recipient-1-meta_attributes-attr2" ++ name="attr2" ++ value="val2" ++ /> ++ </meta_attributes> ++ <instance_attributes ++ id="alert-recipient-1-instance_attributes" ++ > ++ <nvpair ++ id="alert-recipient-1-instance_attributes-attr1" ++ name="attr1" ++ value="val1" ++ /> ++ </instance_attributes> ++ </recipient> ++ </alert> ++ </alerts> ++ </configuration> ++</cib> ++ """ ++ self.mock_env = LibraryEnvironment( ++ self.mock_log, self.mock_rep, cib_data=cib ++ ) ++ ++ def test_alert_not_found(self): ++ assert_raise_library_error( ++ lambda: cmd_alert.update_recipient( ++ self.mock_env, "unknown", "recipient", {}, {} ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.CIB_ALERT_NOT_FOUND, ++ {"alert": "unknown"} ++ ) ++ ) ++ ++ def test_recipient_not_found(self): ++ assert_raise_library_error( ++ lambda: cmd_alert.update_recipient( ++ self.mock_env, "alert", "recipient", {}, {} ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, ++ { ++ "recipient": "recipient", ++ "alert": "alert" ++ } ++ ) ++ ) ++ ++ def test_update_all(self): ++ cmd_alert.update_recipient( ++ self.mock_env, ++ "alert", ++ "value", ++ {"attr1": "value"}, ++ { ++ "attr1": "", ++ "attr3": "new_val" ++ }, ++ "desc" ++ ) ++ assert_xml_equal( ++ """ ++<cib validate-with="pacemaker-2.5"> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="path"> ++ <recipient id="alert-recipient" value="value1"/> ++ <recipient ++ id="alert-recipient-1" ++ value="value" ++ description="desc" ++ > ++ <meta_attributes ++ id="alert-recipient-1-meta_attributes" ++ > ++ <nvpair ++ id="alert-recipient-1-meta_attributes-attr2" ++ name="attr2" ++ value="val2" ++ /> ++ <nvpair ++ id="alert-recipient-1-meta_attributes-attr3" ++ name="attr3" ++ value="new_val" ++ /> ++ </meta_attributes> ++ <instance_attributes ++ id="alert-recipient-1-instance_attributes" ++ > ++ <nvpair ++ id="alert-recipient-1-instance_attributes-attr1" ++ name="attr1" ++ value="value" ++ /> ++ </instance_attributes> ++ </recipient> ++ </alert> ++ </alerts> ++ </configuration> ++</cib> ++ """, ++ self.mock_env._get_cib_xml() ++ ) ++ ++ ++class RemoveRecipientTest(TestCase): ++ def setUp(self): ++ self.mock_log = mock.MagicMock(spec_set=logging.Logger) ++ self.mock_run = mock.MagicMock(spec_set=CommandRunner) ++ self.mock_rep = MockLibraryReportProcessor() ++ cib = """ ++ <cib validate-with="pacemaker-2.5"> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="path"> ++ <recipient id="alert-recipient" value="value1"/> ++ <recipient id="alert-recipient-1" value="value"/> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """ ++ self.mock_env = LibraryEnvironment( ++ self.mock_log, self.mock_rep, cib_data=cib ++ ) ++ ++ def test_alert_not_found(self): ++ assert_raise_library_error( ++ lambda: cmd_alert.remove_recipient( ++ self.mock_env, "unknown", "recipient" ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.CIB_ALERT_NOT_FOUND, ++ {"alert": "unknown"} ++ ) ++ ) ++ ++ def test_recipient_not_found(self): ++ assert_raise_library_error( ++ lambda: cmd_alert.remove_recipient( ++ self.mock_env, "alert", "recipient" ++ ), ++ ( ++ Severities.ERROR, ++ report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, ++ { ++ "recipient": "recipient", ++ "alert": "alert" ++ } ++ ) ++ ) ++ ++ def test_success(self): ++ cmd_alert.remove_recipient(self.mock_env, "alert", "value1") ++ assert_xml_equal( ++ """ ++ <cib validate-with="pacemaker-2.5"> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="path"> ++ <recipient id="alert-recipient-1" value="value"/> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ self.mock_env._get_cib_xml() ++ ) ++ ++ ++@mock.patch("pcs.lib.cib.alert.get_all_alerts") ++class GetAllAlertsTest(TestCase): ++ def setUp(self): ++ self.mock_log = mock.MagicMock(spec_set=logging.Logger) ++ self.mock_run = mock.MagicMock(spec_set=CommandRunner) ++ self.mock_rep = MockLibraryReportProcessor() ++ self.mock_env = LibraryEnvironment( ++ self.mock_log, self.mock_rep, cib_data='<cib/>' ++ ) ++ ++ def test_success(self, mock_alerts): ++ mock_alerts.return_value = [{"id": "alert"}] ++ self.assertEqual( ++ [{"id": "alert"}], ++ cmd_alert.get_all_alerts(self.mock_env) ++ ) ++ self.assertEqual(1, mock_alerts.call_count) +diff --git a/pcs/lib/commands/test/test_ticket.py b/pcs/lib/commands/test/test_ticket.py +index a22a014..751001b 100644 +--- a/pcs/lib/commands/test/test_ticket.py ++++ b/pcs/lib/commands/test/test_ticket.py +@@ -44,7 +44,7 @@ class CreateTest(TestCase): + }) + + assert_xml_equal( +- env.get_cib_xml(), ++ env._get_cib_xml(), + str(cib.append_to_first_tag_name( + 'constraints', """ + <rsc_ticket +diff --git a/pcs/lib/env.py b/pcs/lib/env.py +index 99e3397..1151891 100644 +--- a/pcs/lib/env.py ++++ b/pcs/lib/env.py +@@ -27,6 +27,7 @@ from pcs.lib.pacemaker import ( + get_cib_xml, + replace_cib_configuration_xml, + ) ++from pcs.lib.cib.tools import ensure_cib_version + + + class LibraryEnvironment(object): +@@ -54,6 +55,7 @@ class LibraryEnvironment(object): + # related code currently - it's in pcsd + self._auth_tokens_getter = auth_tokens_getter + self._auth_tokens = None ++ self._cib_upgraded = False + + @property + def logger(self): +@@ -77,27 +79,45 @@ class LibraryEnvironment(object): + self._is_cman_cluster = is_cman_cluster(self.cmd_runner()) + return self._is_cman_cluster + +- def get_cib_xml(self): ++ @property ++ def cib_upgraded(self): ++ return self._cib_upgraded ++ ++ def _get_cib_xml(self): + if self.is_cib_live: + return get_cib_xml(self.cmd_runner()) + else: + return self._cib_data + +- def get_cib(self): +- return get_cib(self.get_cib_xml()) ++ def get_cib(self, minimal_version=None): ++ cib = get_cib(self._get_cib_xml()) ++ if minimal_version is not None: ++ upgraded_cib = ensure_cib_version( ++ self.cmd_runner(), cib, minimal_version ++ ) ++ if upgraded_cib is not None: ++ cib = upgraded_cib ++ self._cib_upgraded = True ++ return cib + +- def push_cib_xml(self, cib_data): ++ def _push_cib_xml(self, cib_data): + if self.is_cib_live: +- replace_cib_configuration_xml(self.cmd_runner(), cib_data) ++ replace_cib_configuration_xml( ++ self.cmd_runner(), cib_data, self._cib_upgraded ++ ) ++ if self._cib_upgraded: ++ self._cib_upgraded = False ++ self.report_processor.process(reports.cib_upgrade_successful()) + else: + self._cib_data = cib_data + ++ + def push_cib(self, cib): + #etree returns bytes: b'xml' + #python 3 removed .encode() from bytes + #run(...) calls subprocess.Popen.communicate which calls encode... + #so here is bytes to str conversion +- self.push_cib_xml(etree.tostring(cib).decode()) ++ self._push_cib_xml(etree.tostring(cib).decode()) + + @property + def is_cib_live(self): +diff --git a/pcs/lib/pacemaker.py b/pcs/lib/pacemaker.py +index 14745c5..fd6f97b 100644 +--- a/pcs/lib/pacemaker.py ++++ b/pcs/lib/pacemaker.py +@@ -55,24 +55,21 @@ def get_cib(xml): + except (etree.XMLSyntaxError, etree.DocumentInvalid): + raise LibraryError(reports.cib_load_error_invalid_format()) + +-def replace_cib_configuration_xml(runner, xml): +- output, retval = runner.run( +- [ +- __exec("cibadmin"), +- "--replace", "--scope", "configuration", "--verbose", "--xml-pipe" +- ], +- stdin_string=xml +- ) ++def replace_cib_configuration_xml(runner, xml, cib_upgraded=False): ++ cmd = [__exec("cibadmin"), "--replace", "--verbose", "--xml-pipe"] ++ if not cib_upgraded: ++ cmd += ["--scope", "configuration"] ++ output, retval = runner.run(cmd, stdin_string=xml) + if retval != 0: + raise LibraryError(reports.cib_push_error(retval, output)) + +-def replace_cib_configuration(runner, tree): ++def replace_cib_configuration(runner, tree, cib_upgraded=False): + #etree returns bytes: b'xml' + #python 3 removed .encode() from bytes + #run(...) calls subprocess.Popen.communicate which calls encode... + #so here is bytes to str conversion + xml = etree.tostring(tree).decode() +- return replace_cib_configuration_xml(runner, xml) ++ return replace_cib_configuration_xml(runner, xml, cib_upgraded) + + def get_local_node_status(runner): + try: +diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py +index 4f4f580..490b4ff 100644 +--- a/pcs/lib/reports.py ++++ b/pcs/lib/reports.py +@@ -1436,3 +1436,94 @@ def cluster_restart_required_to_apply_changes(): + report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES, + "Cluster restart is required in order to apply these changes." + ) ++ ++ ++def cib_alert_recipient_already_exists(alert_id, recipient_value): ++ """ ++ Error that recipient already exists. ++ ++ alert_id -- id of alert to which recipient belongs ++ recipient_value -- value of recipient ++ """ ++ return ReportItem.error( ++ report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS, ++ "Recipient '{recipient}' in alert '{alert}' already exists.", ++ info={ ++ "recipient": recipient_value, ++ "alert": alert_id ++ } ++ ) ++ ++ ++def cib_alert_recipient_not_found(alert_id, recipient_value): ++ """ ++ Specified recipient not found. ++ ++ alert_id -- id of alert to which recipient should belong ++ recipient_value -- recipient value ++ """ ++ return ReportItem.error( ++ report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, ++ "Recipient '{recipient}' not found in alert '{alert}'.", ++ info={ ++ "recipient": recipient_value, ++ "alert": alert_id ++ } ++ ) ++ ++ ++def cib_alert_not_found(alert_id): ++ """ ++ Alert with specified id doesn't exist. ++ ++ alert_id -- id of alert ++ """ ++ return ReportItem.error( ++ report_codes.CIB_ALERT_NOT_FOUND, ++ "Alert '{alert}' not found.", ++ info={"alert": alert_id} ++ ) ++ ++ ++def cib_upgrade_successful(): ++ """ ++ Upgrade of CIB schema was successful. ++ """ ++ return ReportItem.info( ++ report_codes.CIB_UPGRADE_SUCCESSFUL, ++ "CIB has been upgraded to the latest schema version." ++ ) ++ ++ ++def cib_upgrade_failed(reason): ++ """ ++ Upgrade of CIB schema failed. ++ ++ reason -- reason of failure ++ """ ++ return ReportItem.error( ++ report_codes.CIB_UPGRADE_FAILED, ++ "Upgrading of CIB to the latest schema failed: {reason}", ++ info={"reason": reason} ++ ) ++ ++ ++def unable_to_upgrade_cib_to_required_version( ++ current_version, required_version ++): ++ """ ++ Unable to upgrade CIB to minimal required schema version. ++ ++ current_version -- current version of CIB schema ++ required_version -- required version of CIB schema ++ """ ++ return ReportItem.error( ++ report_codes.CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION, ++ "Unable to upgrade CIB to required schema version {required_version} " ++ "or higher. Current version is {current_version}. Newer version of " ++ "pacemaker is needed.", ++ info={ ++ "required_version": "{0}.{1}.{2}".format(*required_version), ++ "current_version": "{0}.{1}.{2}".format(*current_version) ++ } ++ ) +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 0e230b7..425b613 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -56,6 +56,9 @@ Manage pcs daemon. + .TP + node + Manage cluster nodes. ++.TP ++alert ++Manage pacemaker alerts. + .SS "resource" + .TP + [show [resource id]] [\fB\-\-full\fR] [\fB\-\-groups\fR] +@@ -635,6 +638,28 @@ Remove node from standby mode (the node specified will now be able to host resou + .TP + utilization [<node> [<name>=<value> ...]] + Add specified utilization options to specified node. If node is not specified, shows utilization of all nodes. If utilization options are not specified, shows utilization of specified node. Utilization option should be in format name=value, value has to be integer. Options may be removed by setting an option without a value. Example: pcs node utilization node1 cpu=4 ram= ++.SS "alert" ++.TP ++[config|show] ++Show all configured alerts. ++.TP ++create path=<path> [id=<alert\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...] ++Create new alert with specified path. Id will be automatically generated if it is not specified. ++.TP ++update <alert\-id> [path=<path>] [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...] ++Update existing alert with specified id. ++.TP ++remove <alert\-id> ++Remove alert with specified id. ++.TP ++recipient add <alert\-id> <recipient\-value> [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...] ++Add new recipient to specified alert. ++.TP ++recipient update <alert\-id> <recipient\-value> [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...] ++Update existing recipient identified by alert and it's value. ++.TP ++recipient remove <alert\-id> <recipient\-value> ++Remove specified recipient. + .SH EXAMPLES + .TP + Show all resources +diff --git a/pcs/test/resources/cib-empty-2.5.xml b/pcs/test/resources/cib-empty-2.5.xml +new file mode 100644 +index 0000000..1b4fb0a +--- /dev/null ++++ b/pcs/test/resources/cib-empty-2.5.xml +@@ -0,0 +1,10 @@ ++<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-2.5" crm_feature_set="3.0.9" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2"> ++ <configuration> ++ <crm_config/> ++ <nodes> ++ </nodes> ++ <resources/> ++ <constraints/> ++ </configuration> ++ <status/> ++</cib> +diff --git a/pcs/test/test_alert.py b/pcs/test/test_alert.py +new file mode 100644 +index 0000000..905dc9f +--- /dev/null ++++ b/pcs/test/test_alert.py +@@ -0,0 +1,363 @@ ++ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import shutil ++import sys ++ ++from pcs.test.tools.misc import ( ++ get_test_resource as rc, ++ is_minimum_pacemaker_version, ++) ++from pcs.test.tools.assertions import AssertPcsMixin ++from pcs.test.tools.pcs_runner import PcsRunner ++ ++major, minor = sys.version_info[:2] ++if major == 2 and minor == 6: ++ import unittest2 as unittest ++else: ++ import unittest ++ ++ ++old_cib = rc("cib-empty.xml") ++empty_cib = rc("cib-empty-2.5.xml") ++temp_cib = rc("temp-cib.xml") ++ ++ ++ALERTS_SUPPORTED = is_minimum_pacemaker_version(1, 1, 15) ++ALERTS_NOT_SUPPORTED_MSG = "Pacemaker version is too old (must be >= 1.1.15)" +\ ++ " to test alerts" ++ ++ ++class PcsAlertTest(unittest.TestCase, AssertPcsMixin): ++ def setUp(self): ++ shutil.copy(empty_cib, temp_cib) ++ self.pcs_runner = PcsRunner(temp_cib) ++ ++ ++@unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG) ++class AlertCibUpgradeTest(unittest.TestCase, AssertPcsMixin): ++ def setUp(self): ++ shutil.copy(old_cib, temp_cib) ++ self.pcs_runner = PcsRunner(temp_cib) ++ ++ def test_cib_upgrade(self): ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ No alerts defined ++""" ++ ) ++ ++ self.assert_pcs_success( ++ "alert create path=test", ++ "CIB has been upgraded to the latest schema version.\n" ++ ) ++ ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert (path=test) ++""" ++ ) ++ ++ ++@unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG) ++class CreateAlertTest(PcsAlertTest): ++ def test_create_multiple_without_id(self): ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ No alerts defined ++""" ++ ) ++ ++ self.assert_pcs_success("alert create path=test") ++ self.assert_pcs_success("alert create path=test") ++ self.assert_pcs_success("alert create path=test2") ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert (path=test) ++ Alert: alert-1 (path=test) ++ Alert: alert-2 (path=test2) ++""" ++ ) ++ ++ def test_create_multiple_with_id(self): ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ No alerts defined ++""" ++ ) ++ self.assert_pcs_success("alert create id=alert1 path=test") ++ self.assert_pcs_success( ++ "alert create id=alert2 description=desc path=test" ++ ) ++ self.assert_pcs_success( ++ "alert create description=desc2 path=test2 id=alert3" ++ ) ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert1 (path=test) ++ Alert: alert2 (path=test) ++ Description: desc ++ Alert: alert3 (path=test2) ++ Description: desc2 ++""" ++ ) ++ ++ def test_create_with_options(self): ++ self.assert_pcs_success( ++ "alert create id=alert1 description=desc path=test " ++ "options opt1=val1 opt2=val2 meta m1=v1 m2=v2" ++ ) ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert1 (path=test) ++ Description: desc ++ Options: opt1=val1 opt2=val2 ++ Meta options: m1=v1 m2=v2 ++""" ++ ) ++ ++ def test_already_exists(self): ++ self.assert_pcs_success("alert create id=alert1 path=test") ++ self.assert_pcs_fail( ++ "alert create id=alert1 path=test", ++ "Error: 'alert1' already exists\n" ++ ) ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert1 (path=test) ++""" ++ ) ++ ++ def test_path_is_required(self): ++ self.assert_pcs_fail( ++ "alert create id=alert1", ++ "Error: required option 'path' is missing\n" ++ ) ++ ++ ++@unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG) ++class UpdateAlertTest(PcsAlertTest): ++ def test_update_everything(self): ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ No alerts defined ++""" ++ ) ++ self.assert_pcs_success( ++ "alert create id=alert1 description=desc path=test " ++ "options opt1=val1 opt2=val2 meta m1=v1 m2=v2" ++ ) ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert1 (path=test) ++ Description: desc ++ Options: opt1=val1 opt2=val2 ++ Meta options: m1=v1 m2=v2 ++""" ++ ) ++ self.assert_pcs_success( ++ "alert update alert1 description=new_desc path=/new/path " ++ "options opt1= opt2=test opt3=1 meta m1= m2=v m3=3" ++ ) ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert1 (path=/new/path) ++ Description: new_desc ++ Options: opt2=test opt3=1 ++ Meta options: m2=v m3=3 ++""" ++ ) ++ ++ def test_not_existing_alert(self): ++ self.assert_pcs_fail( ++ "alert update alert1", "Error: Alert 'alert1' not found.\n" ++ ) ++ ++ ++@unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG) ++class RemoveAlertTest(PcsAlertTest): ++ def test_not_existing_alert(self): ++ self.assert_pcs_fail( ++ "alert remove alert1", "Error: Alert 'alert1' not found.\n" ++ ) ++ ++ def test_success(self): ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ No alerts defined ++""" ++ ) ++ ++ self.assert_pcs_success("alert create path=test") ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert (path=test) ++""" ++ ) ++ self.assert_pcs_success("alert remove alert") ++ ++ ++@unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG) ++class AddRecipientTest(PcsAlertTest): ++ def test_success(self): ++ self.assert_pcs_success("alert create path=test") ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert (path=test) ++""" ++ ) ++ self.assert_pcs_success("alert recipient add alert rec_value") ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert (path=test) ++ Recipients: ++ Recipient: rec_value ++""" ++ ) ++ self.assert_pcs_success( ++ "alert recipient add alert rec_value2 description=description " ++ "options o1=1 o2=2 meta m1=v1 m2=v2" ++ ) ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert (path=test) ++ Recipients: ++ Recipient: rec_value ++ Recipient: rec_value2 ++ Description: description ++ Options: o1=1 o2=2 ++ Meta options: m1=v1 m2=v2 ++""" ++ ) ++ ++ def test_no_alert(self): ++ self.assert_pcs_fail( ++ "alert recipient add alert rec_value", ++ "Error: Alert 'alert' not found.\n" ++ ) ++ ++ def test_already_exists(self): ++ self.assert_pcs_success("alert create path=test") ++ self.assert_pcs_success("alert recipient add alert rec_value") ++ self.assert_pcs_fail( ++ "alert recipient add alert rec_value", ++ "Error: Recipient 'rec_value' in alert 'alert' already exists.\n" ++ ) ++ ++ ++@unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG) ++class UpdateRecipientAlert(PcsAlertTest): ++ def test_success(self): ++ self.assert_pcs_success("alert create path=test") ++ self.assert_pcs_success( ++ "alert recipient add alert rec_value description=description " ++ "options o1=1 o2=2 meta m1=v1 m2=v2" ++ ) ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert (path=test) ++ Recipients: ++ Recipient: rec_value ++ Description: description ++ Options: o1=1 o2=2 ++ Meta options: m1=v1 m2=v2 ++""" ++ ) ++ self.assert_pcs_success( ++ "alert recipient update alert rec_value description=desc " ++ "options o1= o2=v2 o3=3 meta m1= m2=2 m3=3" ++ ) ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert (path=test) ++ Recipients: ++ Recipient: rec_value ++ Description: desc ++ Options: o2=v2 o3=3 ++ Meta options: m2=2 m3=3 ++""" ++ ) ++ ++ def test_no_alert(self): ++ self.assert_pcs_fail( ++ "alert recipient update alert rec_value description=desc", ++ "Error: Alert 'alert' not found.\n" ++ ) ++ ++ def test_no_recipient(self): ++ self.assert_pcs_success("alert create path=test") ++ self.assert_pcs_fail( ++ "alert recipient update alert rec_value description=desc", ++ "Error: Recipient 'rec_value' not found in alert 'alert'.\n" ++ ) ++ ++ ++@unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG) ++class RemoveRecipientTest(PcsAlertTest): ++ def test_success(self): ++ self.assert_pcs_success("alert create path=test") ++ self.assert_pcs_success("alert recipient add alert rec_value") ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert (path=test) ++ Recipients: ++ Recipient: rec_value ++""" ++ ) ++ self.assert_pcs_success("alert recipient remove alert rec_value") ++ ++ def test_no_alert(self): ++ self.assert_pcs_fail( ++ "alert recipient remove alert rec_value", ++ "Error: Alert 'alert' not found.\n" ++ ) ++ ++ def test_no_recipient(self): ++ self.assert_pcs_success("alert create path=test") ++ self.assert_pcs_fail( ++ "alert recipient remove alert rec_value", ++ "Error: Recipient 'rec_value' not found in alert 'alert'.\n" ++ ) +diff --git a/pcs/test/test_lib_cib_tools.py b/pcs/test/test_lib_cib_tools.py +index 405a270..1149a3f 100644 +--- a/pcs/test/test_lib_cib_tools.py ++++ b/pcs/test/test_lib_cib_tools.py +@@ -7,12 +7,18 @@ from __future__ import ( + + from unittest import TestCase + +-from pcs.test.tools.assertions import assert_raise_library_error ++from lxml import etree ++ ++from pcs.test.tools.assertions import ( ++ assert_raise_library_error, ++ assert_xml_equal, ++) + from pcs.test.tools.misc import get_test_resource as rc + from pcs.test.tools.pcs_mock import mock + from pcs.test.tools.xml import get_xml_manipulation_creator_from_file + + from pcs.common import report_codes ++from pcs.lib.external import CommandRunner + from pcs.lib.errors import ReportItemSeverity as severities + + from pcs.lib.cib import tools as lib +@@ -145,3 +151,176 @@ class ValidateIdDoesNotExistsTest(TestCase): + ), + ) + does_id_exists.assert_called_once_with("tree", "some-id") ++ ++ ++class GetSubElementTest(TestCase): ++ def setUp(self): ++ self.root = etree.Element("root") ++ self.sub = etree.SubElement(self.root, "sub_element") ++ ++ def test_sub_element_exists(self): ++ self.assertEqual( ++ self.sub, lib.get_sub_element(self.root, "sub_element") ++ ) ++ ++ def test_new_no_id(self): ++ assert_xml_equal( ++ '<new_element/>', ++ etree.tostring( ++ lib.get_sub_element(self.root, "new_element") ++ ).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <root> ++ <sub_element/> ++ <new_element/> ++ </root> ++ """, ++ etree.tostring(self.root).decode() ++ ) ++ ++ def test_new_with_id(self): ++ assert_xml_equal( ++ '<new_element id="new_id"/>', ++ etree.tostring( ++ lib.get_sub_element(self.root, "new_element", "new_id") ++ ).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <root> ++ <sub_element/> ++ <new_element id="new_id"/> ++ </root> ++ """, ++ etree.tostring(self.root).decode() ++ ) ++ ++ def test_new_first(self): ++ lib.get_sub_element(self.root, "new_element", "new_id", 0) ++ assert_xml_equal( ++ """ ++ <root> ++ <new_element id="new_id"/> ++ <sub_element/> ++ </root> ++ """, ++ etree.tostring(self.root).decode() ++ ) ++ ++ def test_new_last(self): ++ lib.get_sub_element(self.root, "new_element", "new_id", None) ++ assert_xml_equal( ++ """ ++ <root> ++ <sub_element/> ++ <new_element id="new_id"/> ++ </root> ++ """, ++ etree.tostring(self.root).decode() ++ ) ++ ++ ++class GetPacemakerVersionByWhichCibWasValidatedTest(TestCase): ++ def test_missing_attribute(self): ++ assert_raise_library_error( ++ lambda: lib.get_pacemaker_version_by_which_cib_was_validated( ++ etree.XML("<cib/>") ++ ), ++ ( ++ severities.ERROR, ++ report_codes.CIB_LOAD_ERROR_BAD_FORMAT, ++ {} ++ ) ++ ) ++ ++ def test_invalid_version(self): ++ assert_raise_library_error( ++ lambda: lib.get_pacemaker_version_by_which_cib_was_validated( ++ etree.XML('<cib validate-with="something-1.2.3"/>') ++ ), ++ ( ++ severities.ERROR, ++ report_codes.CIB_LOAD_ERROR_BAD_FORMAT, ++ {} ++ ) ++ ) ++ ++ def test_no_revision(self): ++ self.assertEqual( ++ (1, 2, 0), ++ lib.get_pacemaker_version_by_which_cib_was_validated( ++ etree.XML('<cib validate-with="pacemaker-1.2"/>') ++ ) ++ ) ++ ++ def test_with_revision(self): ++ self.assertEqual( ++ (1, 2, 3), ++ lib.get_pacemaker_version_by_which_cib_was_validated( ++ etree.XML('<cib validate-with="pacemaker-1.2.3"/>') ++ ) ++ ) ++ ++ ++@mock.patch("pcs.lib.cib.tools.upgrade_cib") ++class EnsureCibVersionTest(TestCase): ++ def setUp(self): ++ self.mock_runner = mock.MagicMock(spec_set=CommandRunner) ++ self.cib = etree.XML('<cib validate-with="pacemaker-2.3.4"/>') ++ ++ def test_same_version(self, mock_upgrade_cib): ++ self.assertTrue( ++ lib.ensure_cib_version( ++ self.mock_runner, self.cib, (2, 3, 4) ++ ) is None ++ ) ++ self.assertEqual(0, mock_upgrade_cib.run.call_count) ++ ++ def test_higher_version(self, mock_upgrade_cib): ++ self.assertTrue( ++ lib.ensure_cib_version( ++ self.mock_runner, self.cib, (2, 3, 3) ++ ) is None ++ ) ++ self.assertEqual(0, mock_upgrade_cib.call_count) ++ ++ def test_upgraded_same_version(self, mock_upgrade_cib): ++ upgraded_cib = etree.XML('<cib validate-with="pacemaker-2.3.5"/>') ++ mock_upgrade_cib.return_value = upgraded_cib ++ self.assertEqual( ++ upgraded_cib, ++ lib.ensure_cib_version( ++ self.mock_runner, self.cib, (2, 3, 5) ++ ) ++ ) ++ mock_upgrade_cib.assert_called_once_with(self.cib, self.mock_runner) ++ ++ def test_upgraded_higher_version(self, mock_upgrade_cib): ++ upgraded_cib = etree.XML('<cib validate-with="pacemaker-2.3.6"/>') ++ mock_upgrade_cib.return_value = upgraded_cib ++ self.assertEqual( ++ upgraded_cib, ++ lib.ensure_cib_version( ++ self.mock_runner, self.cib, (2, 3, 5) ++ ) ++ ) ++ mock_upgrade_cib.assert_called_once_with(self.cib, self.mock_runner) ++ ++ def test_upgraded_lower_version(self, mock_upgrade_cib): ++ mock_upgrade_cib.return_value = self.cib ++ assert_raise_library_error( ++ lambda: lib.ensure_cib_version( ++ self.mock_runner, self.cib, (2, 3, 5) ++ ), ++ ( ++ severities.ERROR, ++ report_codes.CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION, ++ { ++ "required_version": "2.3.5", ++ "current_version": "2.3.4" ++ } ++ ) ++ ) ++ mock_upgrade_cib.assert_called_once_with(self.cib, self.mock_runner) +diff --git a/pcs/test/test_lib_env.py b/pcs/test/test_lib_env.py +index fbaac09..95f7a00 100644 +--- a/pcs/test/test_lib_env.py ++++ b/pcs/test/test_lib_env.py +@@ -7,8 +7,13 @@ from __future__ import ( + + from unittest import TestCase + import logging ++from lxml import etree + +-from pcs.test.tools.assertions import assert_raise_library_error ++from pcs.test.tools.assertions import ( ++ assert_raise_library_error, ++ assert_xml_equal, ++ assert_report_item_list_equal, ++) + from pcs.test.tools.custom_mock import MockLibraryReportProcessor + from pcs.test.tools.misc import get_test_resource as rc + from pcs.test.tools.pcs_mock import mock +@@ -82,13 +87,13 @@ class LibraryEnvironmentTest(TestCase): + + self.assertFalse(env.is_cib_live) + +- self.assertEqual(cib_data, env.get_cib_xml()) ++ self.assertEqual(cib_data, env._get_cib_xml()) + self.assertEqual(0, mock_get_cib.call_count) + +- env.push_cib_xml(new_cib_data) ++ env._push_cib_xml(new_cib_data) + self.assertEqual(0, mock_push_cib.call_count) + +- self.assertEqual(new_cib_data, env.get_cib_xml()) ++ self.assertEqual(new_cib_data, env._get_cib_xml()) + self.assertEqual(0, mock_get_cib.call_count) + + @mock.patch("pcs.lib.env.replace_cib_configuration_xml") +@@ -101,12 +106,135 @@ class LibraryEnvironmentTest(TestCase): + + self.assertTrue(env.is_cib_live) + +- self.assertEqual(cib_data, env.get_cib_xml()) ++ self.assertEqual(cib_data, env._get_cib_xml()) + self.assertEqual(1, mock_get_cib.call_count) + +- env.push_cib_xml(new_cib_data) ++ env._push_cib_xml(new_cib_data) + self.assertEqual(1, mock_push_cib.call_count) + ++ @mock.patch("pcs.lib.env.ensure_cib_version") ++ @mock.patch("pcs.lib.env.get_cib_xml") ++ def test_get_cib_no_version_live( ++ self, mock_get_cib_xml, mock_ensure_cib_version ++ ): ++ mock_get_cib_xml.return_value = '<cib/>' ++ env = LibraryEnvironment(self.mock_logger, self.mock_reporter) ++ assert_xml_equal('<cib/>', etree.tostring(env.get_cib()).decode()) ++ self.assertEqual(1, mock_get_cib_xml.call_count) ++ self.assertEqual(0, mock_ensure_cib_version.call_count) ++ self.assertFalse(env.cib_upgraded) ++ ++ @mock.patch("pcs.lib.env.ensure_cib_version") ++ @mock.patch("pcs.lib.env.get_cib_xml") ++ def test_get_cib_upgrade_live( ++ self, mock_get_cib_xml, mock_ensure_cib_version ++ ): ++ mock_get_cib_xml.return_value = '<cib/>' ++ mock_ensure_cib_version.return_value = etree.XML('<new_cib/>') ++ env = LibraryEnvironment(self.mock_logger, self.mock_reporter) ++ assert_xml_equal( ++ '<new_cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode() ++ ) ++ self.assertEqual(1, mock_get_cib_xml.call_count) ++ self.assertEqual(1, mock_ensure_cib_version.call_count) ++ self.assertTrue(env.cib_upgraded) ++ ++ @mock.patch("pcs.lib.env.ensure_cib_version") ++ @mock.patch("pcs.lib.env.get_cib_xml") ++ def test_get_cib_no_upgrade_live( ++ self, mock_get_cib_xml, mock_ensure_cib_version ++ ): ++ mock_get_cib_xml.return_value = '<cib/>' ++ mock_ensure_cib_version.return_value = None ++ env = LibraryEnvironment(self.mock_logger, self.mock_reporter) ++ assert_xml_equal( ++ '<cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode() ++ ) ++ self.assertEqual(1, mock_get_cib_xml.call_count) ++ self.assertEqual(1, mock_ensure_cib_version.call_count) ++ self.assertFalse(env.cib_upgraded) ++ ++ @mock.patch("pcs.lib.env.ensure_cib_version") ++ @mock.patch("pcs.lib.env.get_cib_xml") ++ def test_get_cib_no_version_file( ++ self, mock_get_cib_xml, mock_ensure_cib_version ++ ): ++ env = LibraryEnvironment( ++ self.mock_logger, self.mock_reporter, cib_data='<cib/>' ++ ) ++ assert_xml_equal('<cib/>', etree.tostring(env.get_cib()).decode()) ++ self.assertEqual(0, mock_get_cib_xml.call_count) ++ self.assertEqual(0, mock_ensure_cib_version.call_count) ++ self.assertFalse(env.cib_upgraded) ++ ++ @mock.patch("pcs.lib.env.ensure_cib_version") ++ @mock.patch("pcs.lib.env.get_cib_xml") ++ def test_get_cib_upgrade_file( ++ self, mock_get_cib_xml, mock_ensure_cib_version ++ ): ++ mock_ensure_cib_version.return_value = etree.XML('<new_cib/>') ++ env = LibraryEnvironment( ++ self.mock_logger, self.mock_reporter, cib_data='<cib/>' ++ ) ++ assert_xml_equal( ++ '<new_cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode() ++ ) ++ self.assertEqual(0, mock_get_cib_xml.call_count) ++ self.assertEqual(1, mock_ensure_cib_version.call_count) ++ self.assertTrue(env.cib_upgraded) ++ ++ @mock.patch("pcs.lib.env.ensure_cib_version") ++ @mock.patch("pcs.lib.env.get_cib_xml") ++ def test_get_cib_no_upgrade_file( ++ self, mock_get_cib_xml, mock_ensure_cib_version ++ ): ++ mock_ensure_cib_version.return_value = None ++ env = LibraryEnvironment( ++ self.mock_logger, self.mock_reporter, cib_data='<cib/>' ++ ) ++ assert_xml_equal( ++ '<cib/>', etree.tostring(env.get_cib((1, 2, 3))).decode() ++ ) ++ self.assertEqual(0, mock_get_cib_xml.call_count) ++ self.assertEqual(1, mock_ensure_cib_version.call_count) ++ self.assertFalse(env.cib_upgraded) ++ ++ @mock.patch("pcs.lib.env.replace_cib_configuration_xml") ++ @mock.patch.object( ++ LibraryEnvironment, ++ "cmd_runner", ++ lambda self: "mock cmd runner" ++ ) ++ def test_push_cib_not_upgraded_live(self, mock_replace_cib): ++ env = LibraryEnvironment(self.mock_logger, self.mock_reporter) ++ env.push_cib(etree.XML('<cib/>')) ++ mock_replace_cib.assert_called_once_with( ++ "mock cmd runner", '<cib/>', False ++ ) ++ self.assertEqual([], env.report_processor.report_item_list) ++ ++ @mock.patch("pcs.lib.env.replace_cib_configuration_xml") ++ @mock.patch.object( ++ LibraryEnvironment, ++ "cmd_runner", ++ lambda self: "mock cmd runner" ++ ) ++ def test_push_cib_upgraded_live(self, mock_replace_cib): ++ env = LibraryEnvironment(self.mock_logger, self.mock_reporter) ++ env._cib_upgraded = True ++ env.push_cib(etree.XML('<cib/>')) ++ mock_replace_cib.assert_called_once_with( ++ "mock cmd runner", '<cib/>', True ++ ) ++ assert_report_item_list_equal( ++ env.report_processor.report_item_list, ++ [( ++ severity.INFO, ++ report_codes.CIB_UPGRADE_SUCCESSFUL, ++ {} ++ )] ++ ) ++ + @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes") + @mock.patch("pcs.lib.env.reload_corosync_config") + @mock.patch("pcs.lib.env.distribute_corosync_conf") +diff --git a/pcs/test/test_lib_pacemaker.py b/pcs/test/test_lib_pacemaker.py +index 85d2034..0edee5c 100644 +--- a/pcs/test/test_lib_pacemaker.py ++++ b/pcs/test/test_lib_pacemaker.py +@@ -206,12 +206,28 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest): + + mock_runner.run.assert_called_once_with( + [ +- self.path("cibadmin"), "--replace", "--scope", "configuration", +- "--verbose", "--xml-pipe" ++ self.path("cibadmin"), "--replace", "--verbose", "--xml-pipe", ++ "--scope", "configuration" + ], + stdin_string=xml + ) + ++ def test_cib_upgraded(self): ++ xml = "<xml/>" ++ expected_output = "expected output" ++ expected_retval = 0 ++ mock_runner = mock.MagicMock(spec_set=CommandRunner) ++ mock_runner.run.return_value = (expected_output, expected_retval) ++ ++ lib.replace_cib_configuration( ++ mock_runner, XmlManipulation.from_str(xml).tree, True ++ ) ++ ++ mock_runner.run.assert_called_once_with( ++ [self.path("cibadmin"), "--replace", "--verbose", "--xml-pipe"], ++ stdin_string=xml ++ ) ++ + def test_error(self): + xml = "<xml/>" + expected_error = "expected error" +@@ -237,8 +253,8 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest): + + mock_runner.run.assert_called_once_with( + [ +- self.path("cibadmin"), "--replace", "--scope", "configuration", +- "--verbose", "--xml-pipe" ++ self.path("cibadmin"), "--replace", "--verbose", "--xml-pipe", ++ "--scope", "configuration" + ], + stdin_string=xml + ) +diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py +index e8c0813..2fa5088 100644 +--- a/pcs/test/test_resource.py ++++ b/pcs/test/test_resource.py +@@ -1541,6 +1541,9 @@ Ordering Constraints: + Colocation Constraints: + Ticket Constraints: + ++Alerts: ++ No alerts defined ++ + Resources Defaults: + No defaults set + Operations Defaults: +@@ -1704,6 +1707,9 @@ Ordering Constraints: + Colocation Constraints: + Ticket Constraints: + ++Alerts: ++ No alerts defined ++ + Resources Defaults: + No defaults set + Operations Defaults: +diff --git a/pcs/test/test_stonith.py b/pcs/test/test_stonith.py +index 479c8e9..a6ee2f5 100644 +--- a/pcs/test/test_stonith.py ++++ b/pcs/test/test_stonith.py +@@ -149,6 +149,9 @@ Ordering Constraints: + Colocation Constraints: + Ticket Constraints: + ++Alerts: ++ No alerts defined ++ + Resources Defaults: + No defaults set + Operations Defaults: +diff --git a/pcs/test/tools/color_text_runner.py b/pcs/test/tools/color_text_runner.py +index 305fe32..78a0787 100644 +--- a/pcs/test/tools/color_text_runner.py ++++ b/pcs/test/tools/color_text_runner.py +@@ -64,6 +64,16 @@ class ColorTextTestResult(TextTestResult): + self.stream.write(apply(["lightred", "bold"], 'F')) + self.stream.flush() + ++ def addSkip(self, test, reason): ++ super(TextTestResult, self).addSkip(test, reason) ++ if self.showAll: ++ self.stream.writeln( ++ apply(["blue", "bold"], "skipped {0!r}".format(reason)) ++ ) ++ elif self.dots: ++ self.stream.write(apply(["blue", "bold"], 's')) ++ self.stream.flush() ++ + def getDescription(self, test): + doc_first_line = test.shortDescription() + if self.descriptions and doc_first_line: +diff --git a/pcs/usage.py b/pcs/usage.py +index c4c417a..8ae6839 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -24,6 +24,7 @@ def full_usage(): + out += strip_extras(status([],False)) + out += strip_extras(config([],False)) + out += strip_extras(pcsd([],False)) ++ out += strip_extras(alert([], False)) + print(out.strip()) + print("Examples:\n" + examples.replace(" \ ","")) + +@@ -115,6 +116,7 @@ def generate_completion_tree_from_usage(): + tree["config"] = generate_tree(config([],False)) + tree["pcsd"] = generate_tree(pcsd([],False)) + tree["node"] = generate_tree(node([], False)) ++ tree["alert"] = generate_tree(alert([], False)) + return tree + + def generate_tree(usage_txt): +@@ -169,6 +171,7 @@ Commands: + config View and manage cluster configuration. + pcsd Manage pcs daemon. + node Manage cluster nodes. ++ alert Set pacemaker alerts. + """ + # Advanced usage to possibly add later + # --corosync_conf=<corosync file> Specify alternative corosync.conf file +@@ -1347,9 +1350,49 @@ Commands: + else: + return output + ++ ++def alert(args=[], pout=True): ++ output = """ ++Usage: pcs alert <command> ++Set pacemaker alerts. ++ ++Commands: ++ [config|show] ++ Show all configured alerts. ++ ++ create path=<path> [id=<alert-id>] [description=<description>] ++ [options [<option>=<value>]...] [meta [<meta-option>=<value>]...] ++ Create new alert with specified path. Id will be automatically ++ generated if it is not specified. ++ ++ update <alert-id> [path=<path>] [description=<description>] ++ [options [<option>=<value>]...] [meta [<meta-option>=<value>]...] ++ Update existing alert with specified id. ++ ++ remove <alert-id> ++ Remove alert with specified id. ++ ++ recipient add <alert-id> <recipient-value> [description=<description>] ++ [options [<option>=<value>]...] [meta [<meta-option>=<value>]...] ++ Add new recipient to specified alert. ++ ++ recipient update <alert-id> <recipient-value> [description=<description>] ++ [options [<option>=<value>]...] [meta [<meta-option>=<value>]...] ++ Update existing recipient identified by alert and it's value. ++ ++ recipient remove <alert-id> <recipient-value> ++ Remove specified recipient. ++""" ++ if pout: ++ print(sub_usage(args, output)) ++ else: ++ return output ++ ++ + def show(main_usage_name, rest_usage_names): + usage_map = { + "acl": acl, ++ "alert": alert, + "cluster": cluster, + "config": config, + "constraint": constraint, +diff --git a/pcs/utils.py b/pcs/utils.py +index 11bd4cf..f9cdb1c 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -1592,7 +1592,7 @@ def is_etree(var): + ) + + # Replace only configuration section of cib with dom passed +-def replace_cib_configuration(dom): ++def replace_cib_configuration(dom, cib_upgraded=False): + if is_etree(dom): + #etree returns string in bytes: b'xml' + #python 3 removed .encode() from byte strings +@@ -1603,7 +1603,12 @@ def replace_cib_configuration(dom): + new_dom = dom.toxml() + else: + new_dom = dom +- output, retval = run(["cibadmin", "--replace", "-o", "configuration", "-V", "--xml-pipe"],False,new_dom) ++ cmd = ["cibadmin", "--replace", "-V", "--xml-pipe"] ++ if cib_upgraded: ++ print("CIB has been upgraded to the latest schema version.") ++ else: ++ cmd += ["-o", "configuration"] ++ output, retval = run(cmd, False, new_dom) + if retval != 0: + err("Unable to update cib\n"+output) + +-- +1.8.3.1 + diff --git a/SOURCES/bz1315371-02-use-recipient-id-as-identifier-instead-of-its-value.patch b/SOURCES/bz1315371-02-use-recipient-id-as-identifier-instead-of-its-value.patch new file mode 100644 index 0000000..2154f0e --- /dev/null +++ b/SOURCES/bz1315371-02-use-recipient-id-as-identifier-instead-of-its-value.patch @@ -0,0 +1,1638 @@ +From 8eef21a7bbfdcba709515529a40fadc1f5386b70 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular <omular@redhat.com> +Date: Fri, 8 Jul 2016 16:43:16 +0200 +Subject: [PATCH 1/2] lib: use recipient id as identifier instead of its value + +--- + pcs/common/report_codes.py | 3 +- + pcs/lib/cib/alert.py | 129 +++++++---- + pcs/lib/cib/test/test_alert.py | 449 +++++++++++++++++++++++++++++++----- + pcs/lib/commands/alert.py | 45 +++- + pcs/lib/commands/test/test_alert.py | 111 ++++++--- + pcs/lib/reports.py | 29 ++- + 6 files changed, 597 insertions(+), 169 deletions(-) + +diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py +index 2b39938..53f2ccb 100644 +--- a/pcs/common/report_codes.py ++++ b/pcs/common/report_codes.py +@@ -7,6 +7,7 @@ from __future__ import ( + + # force cathegories + FORCE_ACTIVE_RRP = "ACTIVE_RRP" ++FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE = "FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE" + FORCE_CONSTRAINT_DUPLICATE = "CONSTRAINT_DUPLICATE" + FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE = "CONSTRAINT_MULTIINSTANCE_RESOURCE" + FORCE_LOAD_THRESHOLD = "LOAD_THRESHOLD" +@@ -22,7 +23,7 @@ AGENT_NOT_FOUND = "AGENT_NOT_FOUND" + BAD_CLUSTER_STATE_FORMAT = 'BAD_CLUSTER_STATE_FORMAT' + CIB_ALERT_NOT_FOUND = "CIB_ALERT_NOT_FOUND" + CIB_ALERT_RECIPIENT_ALREADY_EXISTS = "CIB_ALERT_RECIPIENT_ALREADY_EXISTS" +-CIB_ALERT_RECIPIENT_NOT_FOUND = "CIB_ALERT_RECIPIENT_NOT_FOUND" ++CIB_ALERT_RECIPIENT_VALUE_INVALID = "CIB_ALERT_RECIPIENT_VALUE_INVALID" + CIB_CANNOT_FIND_MANDATORY_SECTION = "CIB_CANNOT_FIND_MANDATORY_SECTION" + CIB_LOAD_ERROR_BAD_FORMAT = "CIB_LOAD_ERROR_BAD_FORMAT" + CIB_LOAD_ERROR = "CIB_LOAD_ERROR" +diff --git a/pcs/lib/cib/alert.py b/pcs/lib/cib/alert.py +index 6b72996..b5fe88c 100644 +--- a/pcs/lib/cib/alert.py ++++ b/pcs/lib/cib/alert.py +@@ -7,14 +7,16 @@ from __future__ import ( + + from lxml import etree + ++from pcs.common import report_codes + from pcs.lib import reports +-from pcs.lib.errors import LibraryError ++from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities + from pcs.lib.cib.nvpair import update_nvset, get_nvset + from pcs.lib.cib.tools import ( + check_new_id_applicable, + get_sub_element, + find_unique_id, + get_alerts, ++ validate_id_does_not_exist, + ) + + +@@ -61,7 +63,7 @@ def _update_optional_attribute(element, attribute, value): + def get_alert_by_id(tree, alert_id): + """ + Returns alert element with specified id. +- Raises AlertNotFound if alert with specified id doesn't exist. ++ Raises LibraryError if alert with specified id doesn't exist. + + tree -- cib etree node + alert_id -- id of alert +@@ -72,25 +74,53 @@ def get_alert_by_id(tree, alert_id): + return alert + + +-def get_recipient(alert, recipient_value): ++def get_recipient_by_id(tree, recipient_id): + """ + Returns recipient element with value recipient_value which belong to + specified alert. +- Raises RecipientNotFound if recipient doesn't exist. ++ Raises LibraryError if recipient doesn't exist. + +- alert -- parent element of required recipient +- recipient_value -- value of recipient ++ tree -- cib etree node ++ recipient_id -- id of recipient + """ +- recipient = alert.find( +- "./recipient[@value='{0}']".format(recipient_value) ++ recipient = get_alerts(tree).find( ++ "./alert/recipient[@id='{0}']".format(recipient_id) + ) + if recipient is None: +- raise LibraryError(reports.cib_alert_recipient_not_found( +- alert.get("id"), recipient_value +- )) ++ raise LibraryError(reports.id_not_found(recipient_id, "Recipient")) + return recipient + + ++def ensure_recipient_value_is_unique( ++ reporter, alert, recipient_value, recipient_id="", allow_duplicity=False ++): ++ """ ++ Ensures that recipient_value is unique in alert. ++ ++ reporter -- report processor ++ alert -- alert ++ recipient_value -- recipient value ++ recipient_id -- recipient id of to which value belongs to ++ allow_duplicity -- if True only warning will be shown if value already ++ exists ++ """ ++ recipient_list = alert.xpath( ++ "./recipient[@value='{value}' and @id!='{id}']".format( ++ value=recipient_value, id=recipient_id ++ ) ++ ) ++ if recipient_list: ++ reporter.process(reports.cib_alert_recipient_already_exists( ++ alert.get("id", None), ++ recipient_value, ++ Severities.WARNING if allow_duplicity else Severities.ERROR, ++ forceable=( ++ None if allow_duplicity ++ else report_codes.FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE ++ ) ++ )) ++ ++ + def create_alert(tree, alert_id, path, description=""): + """ + Create new alert element. Returns newly created element. +@@ -116,7 +146,7 @@ def create_alert(tree, alert_id, path, description=""): + def update_alert(tree, alert_id, path, description=None): + """ + Update existing alert. Return updated alert element. +- Raises AlertNotFound if alert with specified id doesn't exist. ++ Raises LibraryError if alert with specified id doesn't exist. + + tree -- cib etree node + alert_id -- id of alert to be updated +@@ -134,7 +164,7 @@ def update_alert(tree, alert_id, path, description=None): + def remove_alert(tree, alert_id): + """ + Remove alert with specified id. +- Raises AlertNotFound if alert with specified id doesn't exist. ++ Raises LibraryError if alert with specified id doesn't exist. + + tree -- cib etree node + alert_id -- id of alert which should be removed +@@ -144,36 +174,38 @@ def remove_alert(tree, alert_id): + + + def add_recipient( ++ reporter, + tree, + alert_id, + recipient_value, +- description="" ++ recipient_id=None, ++ description="", ++ allow_same_value=False + ): + """ + Add recipient to alert with specified id. Returns added recipient element. +- Raises AlertNotFound if alert with specified id doesn't exist. ++ Raises LibraryError if alert with specified recipient_id doesn't exist. + Raises LibraryError if recipient already exists. + ++ reporter -- report processor + tree -- cib etree node + alert_id -- id of alert which should be parent of new recipient + recipient_value -- value of recipient ++ recipient_id -- id of new recipient, if None it will be generated + description -- description of recipient ++ allow_same_value -- if True unique recipient value is not required + """ +- alert = get_alert_by_id(tree, alert_id) ++ if recipient_id is None: ++ recipient_id = find_unique_id(tree, "{0}-recipient".format(alert_id)) ++ else: ++ validate_id_does_not_exist(tree, recipient_id) + +- recipient = alert.find( +- "./recipient[@value='{0}']".format(recipient_value) ++ alert = get_alert_by_id(tree, alert_id) ++ ensure_recipient_value_is_unique( ++ reporter, alert, recipient_value, allow_duplicity=allow_same_value + ) +- if recipient is not None: +- raise LibraryError(reports.cib_alert_recipient_already_exists( +- alert_id, recipient_value +- )) +- + recipient = etree.SubElement( +- alert, +- "recipient", +- id=find_unique_id(tree, "{0}-recipient".format(alert_id)), +- value=recipient_value ++ alert, "recipient", id=recipient_id, value=recipient_value + ) + + if description: +@@ -182,38 +214,49 @@ def add_recipient( + return recipient + + +-def update_recipient(tree, alert_id, recipient_value, description): ++def update_recipient( ++ reporter, ++ tree, ++ recipient_id, ++ recipient_value=None, ++ description=None, ++ allow_same_value=False ++): + """ + Update specified recipient. Returns updated recipient element. +- Raises AlertNotFound if alert with specified id doesn't exist. +- Raises RecipientNotFound if recipient doesn't exist. ++ Raises LibraryError if recipient doesn't exist. + ++ reporter -- report processor + tree -- cib etree node +- alert_id -- id of alert, parent element of recipient +- recipient_value -- recipient value ++ recipient_id -- id of recipient to be updated ++ recipient_value -- recipient value, stay unchanged if None + description -- description, if empty it will be removed, stay unchanged + if None ++ allow_same_value -- if True unique recipient value is not required + """ +- recipient = get_recipient( +- get_alert_by_id(tree, alert_id), recipient_value +- ) ++ recipient = get_recipient_by_id(tree, recipient_id) ++ if recipient_value is not None: ++ ensure_recipient_value_is_unique( ++ reporter, ++ recipient.getparent(), ++ recipient_value, ++ recipient_id=recipient_id, ++ allow_duplicity=allow_same_value ++ ) ++ recipient.set("value", recipient_value) + _update_optional_attribute(recipient, "description", description) + return recipient + + +-def remove_recipient(tree, alert_id, recipient_value): ++def remove_recipient(tree, recipient_id): + """ + Remove specified recipient. +- Raises AlertNotFound if alert with specified id doesn't exist. +- Raises RecipientNotFound if recipient doesn't exist. ++ Raises LibraryError if recipient doesn't exist. + + tree -- cib etree node +- alert_id -- id of alert, parent element of recipient +- recipient_value -- recipient value ++ recipient_id -- id of recipient to be removed + """ +- recipient = get_recipient( +- get_alert_by_id(tree, alert_id), recipient_value +- ) ++ recipient = get_recipient_by_id(tree, recipient_id) + recipient.getparent().remove(recipient) + + +diff --git a/pcs/lib/cib/test/test_alert.py b/pcs/lib/cib/test/test_alert.py +index c387aaf..50eaef6 100644 +--- a/pcs/lib/cib/test/test_alert.py ++++ b/pcs/lib/cib/test/test_alert.py +@@ -15,8 +15,10 @@ from pcs.lib.errors import ReportItemSeverity as severities + from pcs.test.tools.assertions import( + assert_raise_library_error, + assert_xml_equal, ++ assert_report_item_list_equal, + ) + from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.custom_mock import MockLibraryReportProcessor + + + @mock.patch("pcs.lib.cib.alert.update_nvset") +@@ -129,54 +131,146 @@ class GetAlertByIdTest(TestCase): + ) + + +-class GetRecipientTest(TestCase): ++class GetRecipientByIdTest(TestCase): + def setUp(self): + self.xml = etree.XML( + """ +- <alert id="alert-1"> +- <recipient id="rec-1" value="value1"/> +- <recipient id="rec-2" value="value2"/> +- <not_recipient value="value3"/> +- <recipients> +- <recipient id="rec-4" value="value4"/> +- </recipients> +- </alert> ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert-1"> ++ <recipient id="rec-1" value="value1"/> ++ <not_recipient id="rec-3" value="value3"/> ++ <recipients> ++ <recipient id="rec-4" value="value4"/> ++ </recipients> ++ </alert> ++ <recipient id="rec-2" value="value2"/> ++ </alerts> ++ <alert id="alert-2"/> ++ </configuration> ++ </cib> + """ + ) + + def test_exist(self): + assert_xml_equal( +- '<recipient id="rec-2" value="value2"/>', +- etree.tostring(alert.get_recipient(self.xml, "value2")).decode() ++ '<recipient id="rec-1" value="value1"/>', ++ etree.tostring( ++ alert.get_recipient_by_id(self.xml, "rec-1") ++ ).decode() + ) + + def test_different_place(self): + assert_raise_library_error( +- lambda: alert.get_recipient(self.xml, "value4"), ++ lambda: alert.get_recipient_by_id(self.xml, "rec-4"), + ( + severities.ERROR, +- report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, ++ report_codes.ID_NOT_FOUND, + { +- "alert": "alert-1", +- "recipient": "value4" ++ "id": "rec-4", ++ "id_description": "Recipient" ++ } ++ ) ++ ) ++ ++ def test_not_in_alert(self): ++ assert_raise_library_error( ++ lambda: alert.get_recipient_by_id(self.xml, "rec-2"), ++ ( ++ severities.ERROR, ++ report_codes.ID_NOT_FOUND, ++ { ++ "id": "rec-2", ++ "id_description": "Recipient" + } + ) + ) + + def test_not_recipient(self): + assert_raise_library_error( +- lambda: alert.get_recipient(self.xml, "value3"), ++ lambda: alert.get_recipient_by_id(self.xml, "rec-3"), + ( + severities.ERROR, +- report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, ++ report_codes.ID_NOT_FOUND, + { +- "alert": "alert-1", +- "recipient": "value3" ++ "id": "rec-3", ++ "id_description": "Recipient" + } + ) + ) + + ++class EnsureRecipientValueIsUniqueTest(TestCase): ++ def setUp(self): ++ self.mock_reporter = MockLibraryReportProcessor() ++ self.alert = etree.Element("alert", id="alert-1") ++ self.recipient = etree.SubElement( ++ self.alert, "recipient", id="rec-1", value="value1" ++ ) ++ ++ def test_is_unique_no_duplicity_allowed(self): ++ alert.ensure_recipient_value_is_unique( ++ self.mock_reporter, self.alert, "value2" ++ ) ++ self.assertEqual(0, len(self.mock_reporter.report_item_list)) ++ ++ def test_same_recipient_no_duplicity_allowed(self): ++ alert.ensure_recipient_value_is_unique( ++ self.mock_reporter, self.alert, "value1", recipient_id="rec-1" ++ ) ++ self.assertEqual(0, len(self.mock_reporter.report_item_list)) ++ ++ def test_same_recipient_duplicity_allowed(self): ++ alert.ensure_recipient_value_is_unique( ++ self.mock_reporter, self.alert, "value1", recipient_id="rec-1", ++ allow_duplicity=True ++ ) ++ self.assertEqual(0, len(self.mock_reporter.report_item_list)) ++ ++ def test_not_unique_no_duplicity_allowed(self): ++ report_item = ( ++ severities.ERROR, ++ report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS, ++ { ++ "alert": "alert-1", ++ "recipient": "value1" ++ }, ++ report_codes.FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE ++ ) ++ assert_raise_library_error( ++ lambda: alert.ensure_recipient_value_is_unique( ++ self.mock_reporter, self.alert, "value1" ++ ), ++ report_item ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, [report_item] ++ ) ++ ++ def test_is_unique_duplicity_allowed(self): ++ alert.ensure_recipient_value_is_unique( ++ self.mock_reporter, self.alert, "value2", allow_duplicity=True ++ ) ++ self.assertEqual(0, len(self.mock_reporter.report_item_list)) ++ ++ def test_not_unique_duplicity_allowed(self): ++ alert.ensure_recipient_value_is_unique( ++ self.mock_reporter, self.alert, "value1", allow_duplicity=True ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [( ++ severities.WARNING, ++ report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS, ++ { ++ "alert": "alert-1", ++ "recipient": "value1" ++ } ++ )] ++ ) ++ ++ + class CreateAlertTest(TestCase): + def setUp(self): + self.tree = etree.XML( +@@ -462,6 +556,7 @@ class RemoveAlertTest(TestCase): + + class AddRecipientTest(TestCase): + def setUp(self): ++ self.mock_reporter = MockLibraryReportProcessor() + self.tree = etree.XML( + """ + <cib> +@@ -476,11 +571,40 @@ class AddRecipientTest(TestCase): + """ + ) + +- def test_success(self): ++ def test_with_id(self): ++ assert_xml_equal( ++ '<recipient id="my-recipient" value="value1"/>', ++ etree.tostring( ++ alert.add_recipient( ++ self.mock_reporter, self.tree, "alert", "value1", ++ "my-recipient" ++ ) ++ ).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"> ++ <recipient id="alert-recipient" value="test_val"/> ++ <recipient id="my-recipient" value="value1"/> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ def test_without_id(self): + assert_xml_equal( + '<recipient id="alert-recipient-1" value="value1"/>', + etree.tostring( +- alert.add_recipient(self.tree, "alert", "value1") ++ alert.add_recipient( ++ self.mock_reporter, self.tree, "alert", "value1" ++ ) + ).decode() + ) + assert_xml_equal( +@@ -498,23 +622,85 @@ class AddRecipientTest(TestCase): + """, + etree.tostring(self.tree).decode() + ) ++ self.assertEqual([], self.mock_reporter.report_item_list) + +- def test_recipient_exist(self): ++ def test_id_exists(self): + assert_raise_library_error( +- lambda: alert.add_recipient(self.tree, "alert", "test_val"), ++ lambda: alert.add_recipient( ++ self.mock_reporter, self.tree, "alert", "value1", ++ "alert-recipient" ++ ), + ( + severities.ERROR, ++ report_codes.ID_ALREADY_EXISTS, ++ {"id": "alert-recipient"} ++ ) ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ def test_duplicity_of_value_not_allowed(self): ++ report_item = ( ++ severities.ERROR, ++ report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS, ++ { ++ "alert": "alert", ++ "recipient": "test_val" ++ }, ++ report_codes.FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE ++ ) ++ assert_raise_library_error( ++ lambda: alert.add_recipient( ++ self.mock_reporter, self.tree, "alert", "test_val" ++ ), ++ report_item ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [report_item] ++ ) ++ ++ def test_duplicity_of_value_allowed(self): ++ assert_xml_equal( ++ '<recipient id="alert-recipient-1" value="test_val"/>', ++ etree.tostring( ++ alert.add_recipient( ++ self.mock_reporter, self.tree, "alert", "test_val", ++ allow_same_value=True ++ ) ++ ).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"> ++ <recipient id="alert-recipient" value="test_val"/> ++ <recipient id="alert-recipient-1" value="test_val"/> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [( ++ severities.WARNING, + report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS, + { +- "recipient": "test_val", +- "alert": "alert" ++ "alert": "alert", ++ "recipient": "test_val" + } +- ) ++ )] + ) + + def test_alert_not_exist(self): + assert_raise_library_error( +- lambda: alert.add_recipient(self.tree, "alert1", "test_val"), ++ lambda: alert.add_recipient( ++ self.mock_reporter, self.tree, "alert1", "test_val" ++ ), + ( + severities.ERROR, + report_codes.CIB_ALERT_NOT_FOUND, +@@ -532,7 +718,8 @@ class AddRecipientTest(TestCase): + /> + """, + etree.tostring(alert.add_recipient( +- self.tree, "alert", "value1", "desc" ++ self.mock_reporter, self.tree, "alert", "value1", ++ description="desc" + )).decode() + ) + assert_xml_equal( +@@ -554,10 +741,12 @@ class AddRecipientTest(TestCase): + """, + etree.tostring(self.tree).decode() + ) ++ self.assertEqual([], self.mock_reporter.report_item_list) + + + class UpdateRecipientTest(TestCase): + def setUp(self): ++ self.mock_reporter = MockLibraryReportProcessor() + self.tree = etree.XML( + """ + <cib> +@@ -577,6 +766,157 @@ class UpdateRecipientTest(TestCase): + """ + ) + ++ def test_update_value(self): ++ assert_xml_equal( ++ """ ++ <recipient id="alert-recipient" value="new_val"/> ++ """, ++ etree.tostring(alert.update_recipient( ++ self.mock_reporter, self.tree, "alert-recipient", ++ recipient_value="new_val" ++ )).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"> ++ <recipient id="alert-recipient" value="new_val"/> ++ <recipient ++ id="alert-recipient-1" ++ value="value1" ++ description="desc" ++ /> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ def test_update_same_value_no_duplicity_allowed(self): ++ assert_xml_equal( ++ '<recipient id="alert-recipient" value="test_val"/>', ++ etree.tostring(alert.update_recipient( ++ self.mock_reporter, self.tree, "alert-recipient", ++ recipient_value="test_val" ++ )).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"> ++ <recipient id="alert-recipient" value="test_val"/> ++ <recipient ++ id="alert-recipient-1" ++ value="value1" ++ description="desc" ++ /> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ def test_update_same_value_duplicity_allowed(self): ++ assert_xml_equal( ++ '<recipient id="alert-recipient" value="test_val"/>', ++ etree.tostring(alert.update_recipient( ++ self.mock_reporter, self.tree, "alert-recipient", ++ recipient_value="test_val", allow_same_value=True ++ )).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"> ++ <recipient id="alert-recipient" value="test_val"/> ++ <recipient ++ id="alert-recipient-1" ++ value="value1" ++ description="desc" ++ /> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ self.assertEqual([], self.mock_reporter.report_item_list) ++ ++ def test_duplicity_of_value_not_allowed(self): ++ report_item = ( ++ severities.ERROR, ++ report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS, ++ { ++ "alert": "alert", ++ "recipient": "value1" ++ }, ++ report_codes.FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE ++ ) ++ assert_raise_library_error( ++ lambda: alert.update_recipient( ++ self.mock_reporter, self.tree, "alert-recipient", "value1" ++ ), ++ report_item ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [report_item] ++ ) ++ ++ def test_duplicity_of_value_allowed(self): ++ assert_xml_equal( ++ """ ++ <recipient id="alert-recipient" value="value1"/> ++ """, ++ etree.tostring(alert.update_recipient( ++ self.mock_reporter, self.tree, "alert-recipient", ++ recipient_value="value1", allow_same_value=True ++ )).decode() ++ ) ++ assert_xml_equal( ++ """ ++ <cib> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="/path"> ++ <recipient id="alert-recipient" value="value1"/> ++ <recipient ++ id="alert-recipient-1" ++ value="value1" ++ description="desc" ++ /> ++ </alert> ++ </alerts> ++ </configuration> ++ </cib> ++ """, ++ etree.tostring(self.tree).decode() ++ ) ++ assert_report_item_list_equal( ++ self.mock_reporter.report_item_list, ++ [( ++ severities.WARNING, ++ report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS, ++ { ++ "alert": "alert", ++ "recipient": "value1" ++ } ++ )] ++ ) ++ + def test_add_description(self): + assert_xml_equal( + """ +@@ -585,7 +925,8 @@ class UpdateRecipientTest(TestCase): + /> + """, + etree.tostring(alert.update_recipient( +- self.tree, "alert", "test_val", "description" ++ self.mock_reporter, self.tree, "alert-recipient", ++ description="description" + )).decode() + ) + assert_xml_equal( +@@ -611,6 +952,7 @@ class UpdateRecipientTest(TestCase): + """, + etree.tostring(self.tree).decode() + ) ++ self.assertEqual([], self.mock_reporter.report_item_list) + + def test_update_description(self): + assert_xml_equal( +@@ -620,7 +962,8 @@ class UpdateRecipientTest(TestCase): + /> + """, + etree.tostring(alert.update_recipient( +- self.tree, "alert", "value1", "description" ++ self.mock_reporter, self.tree, "alert-recipient-1", ++ description="description" + )).decode() + ) + assert_xml_equal( +@@ -642,6 +985,7 @@ class UpdateRecipientTest(TestCase): + """, + etree.tostring(self.tree).decode() + ) ++ self.assertEqual([], self.mock_reporter.report_item_list) + + def test_remove_description(self): + assert_xml_equal( +@@ -649,7 +993,10 @@ class UpdateRecipientTest(TestCase): + <recipient id="alert-recipient-1" value="value1"/> + """, + etree.tostring( +- alert.update_recipient(self.tree, "alert", "value1", "") ++ alert.update_recipient( ++ self.mock_reporter, self.tree, "alert-recipient-1", ++ description="" ++ ) + ).decode() + ) + assert_xml_equal( +@@ -667,26 +1014,18 @@ class UpdateRecipientTest(TestCase): + """, + etree.tostring(self.tree).decode() + ) +- +- def test_alert_not_exists(self): +- assert_raise_library_error( +- lambda: alert.update_recipient(self.tree, "alert1", "test_val", ""), +- ( +- severities.ERROR, +- report_codes.CIB_ALERT_NOT_FOUND, +- {"alert": "alert1"} +- ) +- ) ++ self.assertEqual([], self.mock_reporter.report_item_list) + + def test_recipient_not_exists(self): + assert_raise_library_error( +- lambda: alert.update_recipient(self.tree, "alert", "unknown", ""), ++ lambda: alert.update_recipient( ++ self.mock_reporter, self.tree, "recipient"), + ( + severities.ERROR, +- report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, ++ report_codes.ID_NOT_FOUND, + { +- "alert": "alert", +- "recipient": "unknown" ++ "id": "recipient", ++ "id_description": "Recipient" + } + ) + ) +@@ -710,7 +1049,7 @@ class RemoveRecipientTest(TestCase): + ) + + def test_success(self): +- alert.remove_recipient(self.tree, "alert", "val") ++ alert.remove_recipient(self.tree, "alert-recipient-2") + assert_xml_equal( + """ + <cib> +@@ -726,25 +1065,15 @@ class RemoveRecipientTest(TestCase): + etree.tostring(self.tree).decode() + ) + +- def test_alert_not_exists(self): +- assert_raise_library_error( +- lambda: alert.remove_recipient(self.tree, "alert1", "test_val"), +- ( +- severities.ERROR, +- report_codes.CIB_ALERT_NOT_FOUND, +- {"alert": "alert1"} +- ) +- ) +- + def test_recipient_not_exists(self): + assert_raise_library_error( +- lambda: alert.remove_recipient(self.tree, "alert", "unknown"), ++ lambda: alert.remove_recipient(self.tree, "recipient"), + ( + severities.ERROR, +- report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, ++ report_codes.ID_NOT_FOUND, + { +- "alert": "alert", +- "recipient": "unknown" ++ "id": "recipient", ++ "id_description": "Recipient" + } + ) + ) +diff --git a/pcs/lib/commands/alert.py b/pcs/lib/commands/alert.py +index 7371fbc..432d9d5 100644 +--- a/pcs/lib/commands/alert.py ++++ b/pcs/lib/commands/alert.py +@@ -90,7 +90,9 @@ def add_recipient( + recipient_value, + instance_attribute_dict, + meta_attribute_dict, +- description=None ++ recipient_id=None, ++ description=None, ++ allow_same_value=False + ): + """ + Add new recipient to alert witch id alert_id. +@@ -100,7 +102,9 @@ def add_recipient( + recipient_value -- value of new recipient + instance_attribute_dict -- dictionary of instance attributes to update + meta_attribute_dict -- dictionary of meta attributes to update ++ recipient_id -- id of new recipient, if None it will be generated + description -- recipient description ++ allow_same_value -- if True unique recipient value is not required + """ + if not recipient_value: + raise LibraryError( +@@ -109,7 +113,13 @@ def add_recipient( + + cib = lib_env.get_cib(REQUIRED_CIB_VERSION) + recipient = alert.add_recipient( +- cib, alert_id, recipient_value, description ++ lib_env.report_processor, ++ cib, ++ alert_id, ++ recipient_value, ++ recipient_id=recipient_id, ++ description=description, ++ allow_same_value=allow_same_value + ) + alert.update_instance_attributes(cib, recipient, instance_attribute_dict) + alert.update_meta_attributes(cib, recipient, meta_attribute_dict) +@@ -119,26 +129,38 @@ def add_recipient( + + def update_recipient( + lib_env, +- alert_id, +- recipient_value, ++ recipient_id, + instance_attribute_dict, + meta_attribute_dict, +- description=None ++ recipient_value=None, ++ description=None, ++ allow_same_value=False + ): + """ + Update existing recipient. + + lib_env -- LibraryEnvironment +- alert_id -- id of alert to which recipient belong +- recipient_value -- recipient to be updated ++ recipient_id -- id of recipient to be updated + instance_attribute_dict -- dictionary of instance attributes to update + meta_attribute_dict -- dictionary of meta attributes to update ++ recipient_value -- new recipient value, if None old value will stay ++ unchanged + description -- new description, if empty string, old description will be + deleted, if None old value will stay unchanged ++ allow_same_value -- if True unique recipient value is not required + """ ++ if not recipient_value and recipient_value is not None: ++ raise LibraryError( ++ reports.cib_alert_recipient_invalid_value(recipient_value) ++ ) + cib = lib_env.get_cib(REQUIRED_CIB_VERSION) + recipient = alert.update_recipient( +- cib, alert_id, recipient_value, description ++ lib_env.report_processor, ++ cib, ++ recipient_id, ++ recipient_value=recipient_value, ++ description=description, ++ allow_same_value=allow_same_value + ) + alert.update_instance_attributes(cib, recipient, instance_attribute_dict) + alert.update_meta_attributes(cib, recipient, meta_attribute_dict) +@@ -146,16 +168,15 @@ def update_recipient( + lib_env.push_cib(cib) + + +-def remove_recipient(lib_env, alert_id, recipient_value): ++def remove_recipient(lib_env, recipient_id): + """ + Remove existing recipient. + + lib_env -- LibraryEnvironment +- alert_id -- id of alert to which recipient belong +- recipient_value -- recipient to be removed ++ recipient_id -- if of recipient to be removed + """ + cib = lib_env.get_cib(REQUIRED_CIB_VERSION) +- alert.remove_recipient(cib, alert_id, recipient_value) ++ alert.remove_recipient(cib, recipient_id) + lib_env.push_cib(cib) + + +diff --git a/pcs/lib/commands/test/test_alert.py b/pcs/lib/commands/test/test_alert.py +index 34813df..bced45e 100644 +--- a/pcs/lib/commands/test/test_alert.py ++++ b/pcs/lib/commands/test/test_alert.py +@@ -361,19 +361,17 @@ class AddRecipientTest(TestCase): + def test_recipient_already_exists(self): + assert_raise_library_error( + lambda: cmd_alert.add_recipient( +- self.mock_env, "alert", "value1", {}, {} ++ self.mock_env, "alert", "value1", {}, {}, ++ recipient_id="alert-recipient" + ), + ( + Severities.ERROR, +- report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS, +- { +- "recipient": "value1", +- "alert": "alert" +- } ++ report_codes.ID_ALREADY_EXISTS, ++ {"id": "alert-recipient"} + ) + ) + +- def test_success(self): ++ def test_without_id(self): + cmd_alert.add_recipient( + self.mock_env, + "alert", +@@ -424,6 +422,58 @@ class AddRecipientTest(TestCase): + self.mock_env._get_cib_xml() + ) + ++ def test_with_id(self): ++ cmd_alert.add_recipient( ++ self.mock_env, ++ "alert", ++ "value", ++ {"attr1": "val1"}, ++ { ++ "attr2": "val2", ++ "attr1": "val1" ++ }, ++ recipient_id="my-recipient" ++ ) ++ assert_xml_equal( ++ """ ++<cib validate-with="pacemaker-2.5"> ++ <configuration> ++ <alerts> ++ <alert id="alert" path="path"> ++ <recipient id="alert-recipient" value="value1"/> ++ <recipient id="my-recipient" value="value"> ++ <meta_attributes ++ id="my-recipient-meta_attributes" ++ > ++ <nvpair ++ id="my-recipient-meta_attributes-attr1" ++ name="attr1" ++ value="val1" ++ /> ++ <nvpair ++ id="my-recipient-meta_attributes-attr2" ++ name="attr2" ++ value="val2" ++ /> ++ </meta_attributes> ++ <instance_attributes ++ id="my-recipient-instance_attributes" ++ > ++ <nvpair ++ id="my-recipient-instance_attributes-attr1" ++ name="attr1" ++ value="val1" ++ /> ++ </instance_attributes> ++ </recipient> ++ </alert> ++ </alerts> ++ </configuration> ++</cib> ++ """, ++ self.mock_env._get_cib_xml() ++ ) ++ + + class UpdateRecipientTest(TestCase): + def setUp(self): +@@ -470,29 +520,29 @@ class UpdateRecipientTest(TestCase): + self.mock_log, self.mock_rep, cib_data=cib + ) + +- def test_alert_not_found(self): ++ def test_empty_value(self): + assert_raise_library_error( + lambda: cmd_alert.update_recipient( +- self.mock_env, "unknown", "recipient", {}, {} ++ self.mock_env, "alert-recipient-1", {}, {}, recipient_value="" + ), + ( + Severities.ERROR, +- report_codes.CIB_ALERT_NOT_FOUND, +- {"alert": "unknown"} ++ report_codes.CIB_ALERT_RECIPIENT_VALUE_INVALID, ++ {"recipient": ""} + ) + ) + + def test_recipient_not_found(self): + assert_raise_library_error( + lambda: cmd_alert.update_recipient( +- self.mock_env, "alert", "recipient", {}, {} ++ self.mock_env, "recipient", {}, {} + ), + ( + Severities.ERROR, +- report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, ++ report_codes.ID_NOT_FOUND, + { +- "recipient": "recipient", +- "alert": "alert" ++ "id": "recipient", ++ "id_description": "Recipient" + } + ) + ) +@@ -500,14 +550,14 @@ class UpdateRecipientTest(TestCase): + def test_update_all(self): + cmd_alert.update_recipient( + self.mock_env, +- "alert", +- "value", ++ "alert-recipient-1", + {"attr1": "value"}, + { + "attr1": "", + "attr3": "new_val" + }, +- "desc" ++ recipient_value="new_val", ++ description="desc" + ) + assert_xml_equal( + """ +@@ -518,7 +568,7 @@ class UpdateRecipientTest(TestCase): + <recipient id="alert-recipient" value="value1"/> + <recipient + id="alert-recipient-1" +- value="value" ++ value="new_val" + description="desc" + > + <meta_attributes +@@ -575,35 +625,20 @@ class RemoveRecipientTest(TestCase): + self.mock_log, self.mock_rep, cib_data=cib + ) + +- def test_alert_not_found(self): +- assert_raise_library_error( +- lambda: cmd_alert.remove_recipient( +- self.mock_env, "unknown", "recipient" +- ), +- ( +- Severities.ERROR, +- report_codes.CIB_ALERT_NOT_FOUND, +- {"alert": "unknown"} +- ) +- ) +- + def test_recipient_not_found(self): + assert_raise_library_error( + lambda: cmd_alert.remove_recipient( +- self.mock_env, "alert", "recipient" ++ self.mock_env, "recipient" + ), + ( + Severities.ERROR, +- report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, +- { +- "recipient": "recipient", +- "alert": "alert" +- } ++ report_codes.ID_NOT_FOUND, ++ {"id": "recipient"} + ) + ) + + def test_success(self): +- cmd_alert.remove_recipient(self.mock_env, "alert", "value1") ++ cmd_alert.remove_recipient(self.mock_env, "alert-recipient") + assert_xml_equal( + """ + <cib validate-with="pacemaker-2.5"> +diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py +index 9ececf9..fc2670b 100644 +--- a/pcs/lib/reports.py ++++ b/pcs/lib/reports.py +@@ -1654,40 +1654,39 @@ def cluster_restart_required_to_apply_changes(): + ) + + +-def cib_alert_recipient_already_exists(alert_id, recipient_value): ++def cib_alert_recipient_already_exists( ++ alert_id, recipient_value, severity=ReportItemSeverity.ERROR, forceable=None ++): + """ +- Error that recipient already exists. ++ Recipient with specified value already exists in alert with id 'alert_id' + + alert_id -- id of alert to which recipient belongs + recipient_value -- value of recipient + """ +- return ReportItem.error( ++ return ReportItem( + report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS, +- "Recipient '{recipient}' in alert '{alert}' already exists.", ++ severity, ++ "Recipient '{recipient}' in alert '{alert}' already exists", + info={ + "recipient": recipient_value, + "alert": alert_id +- } ++ }, ++ forceable=forceable + ) + + +-def cib_alert_recipient_not_found(alert_id, recipient_value): ++def cib_alert_recipient_invalid_value(recipient_value): + """ +- Specified recipient not found. ++ Invalid recipient value. + +- alert_id -- id of alert to which recipient should belong + recipient_value -- recipient value + """ + return ReportItem.error( +- report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND, +- "Recipient '{recipient}' not found in alert '{alert}'.", +- info={ +- "recipient": recipient_value, +- "alert": alert_id +- } ++ report_codes.CIB_ALERT_RECIPIENT_VALUE_INVALID, ++ "Recipient value '{recipient}' is not valid.", ++ info={"recipient": recipient_value} + ) + +- + def cib_alert_not_found(alert_id): + """ + Alert with specified id doesn't exist. +-- +1.8.3.1 + + +From b8155a2bfa79bb71429953eb756e393c18926e4c Mon Sep 17 00:00:00 2001 +From: Ondrej Mular <omular@redhat.com> +Date: Sat, 9 Jul 2016 13:54:00 +0200 +Subject: [PATCH 2/2] cli: use recipient id as identifier instead of its value + +--- + pcs/alert.py | 32 +++++----- + pcs/pcs.8 | 8 +-- + pcs/test/test_alert.py | 165 ++++++++++++++++++++++++++++++++++++++++--------- + pcs/usage.py | 14 +++-- + 4 files changed, 164 insertions(+), 55 deletions(-) + +diff --git a/pcs/alert.py b/pcs/alert.py +index d3a6e28..4786f57 100644 +--- a/pcs/alert.py ++++ b/pcs/alert.py +@@ -139,42 +139,44 @@ def recipient_add(lib, argv, modifiers): + + sections = parse_cmd_sections(argv[2:], ["options", "meta"]) + main_args = prepare_options(sections["main"]) +- ensure_only_allowed_options(main_args, ["description"]) ++ ensure_only_allowed_options(main_args, ["description", "id"]) + + lib.alert.add_recipient( + alert_id, + recipient_value, + prepare_options(sections["options"]), + prepare_options(sections["meta"]), +- main_args.get("description", None) ++ recipient_id=main_args.get("id", None), ++ description=main_args.get("description", None), ++ allow_same_value=modifiers["force"] + ) + + + def recipient_update(lib, argv, modifiers): +- if len(argv) < 2: ++ if len(argv) < 1: + raise CmdLineInputError() + +- alert_id = argv[0] +- recipient_value = argv[1] ++ recipient_id = argv[0] + +- sections = parse_cmd_sections(argv[2:], ["options", "meta"]) ++ sections = parse_cmd_sections(argv[1:], ["options", "meta"]) + main_args = prepare_options(sections["main"]) +- ensure_only_allowed_options(main_args, ["description"]) ++ ensure_only_allowed_options(main_args, ["description", "value"]) + + lib.alert.update_recipient( +- alert_id, +- recipient_value, ++ recipient_id, + prepare_options(sections["options"]), + prepare_options(sections["meta"]), +- main_args.get("description", None) ++ recipient_value=main_args.get("value", None), ++ description=main_args.get("description", None), ++ allow_same_value=modifiers["force"] + ) + + + def recipient_remove(lib, argv, modifiers): +- if len(argv) != 2: ++ if len(argv) != 1: + raise CmdLineInputError() + +- lib.alert.remove_recipient(argv[0], argv[1]) ++ lib.alert.remove_recipient(argv[0]) + + + def _nvset_to_str(nvset_obj): +@@ -219,9 +221,9 @@ def _alert_to_str(alert): + + + def _recipient_to_str(recipient): +- return ["Recipient: {value}".format(value=recipient["value"])] + indent( +- __description_attributes_to_str(recipient), 1 +- ) ++ return ["Recipient: {id} (value={value})".format( ++ value=recipient["value"], id=recipient["id"] ++ )] + indent(__description_attributes_to_str(recipient), 1) + + + def print_alert_config(lib, argv, modifiers): +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 4426444..223ef1b 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -666,13 +666,13 @@ Update existing alert with specified id. + remove <alert\-id> + Remove alert with specified id. + .TP +-recipient add <alert\-id> <recipient\-value> [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...] ++recipient add <alert\-id> <recipient\-value> [id=<recipient\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...] + Add new recipient to specified alert. + .TP +-recipient update <alert\-id> <recipient\-value> [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...] +-Update existing recipient identified by alert and it's value. ++recipient update <recipient\-id> [value=<recipient\-value>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...] ++Update existing recipient identified by it's id. + .TP +-recipient remove <alert\-id> <recipient\-value> ++recipient remove <recipient\-id> + Remove specified recipient. + .SH EXAMPLES + .TP +diff --git a/pcs/test/test_alert.py b/pcs/test/test_alert.py +index 905dc9f..bb61600 100644 +--- a/pcs/test/test_alert.py ++++ b/pcs/test/test_alert.py +@@ -246,12 +246,12 @@ Alerts: + Alerts: + Alert: alert (path=test) + Recipients: +- Recipient: rec_value ++ Recipient: alert-recipient (value=rec_value) + """ + ) + self.assert_pcs_success( +- "alert recipient add alert rec_value2 description=description " +- "options o1=1 o2=2 meta m1=v1 m2=v2" ++ "alert recipient add alert rec_value2 id=my-recipient " ++ "description=description options o1=1 o2=2 meta m1=v1 m2=v2" + ) + self.assert_pcs_success( + "alert config", +@@ -259,26 +259,56 @@ Alerts: + Alerts: + Alert: alert (path=test) + Recipients: +- Recipient: rec_value +- Recipient: rec_value2 ++ Recipient: alert-recipient (value=rec_value) ++ Recipient: my-recipient (value=rec_value2) + Description: description + Options: o1=1 o2=2 + Meta options: m1=v1 m2=v2 + """ + ) + +- def test_no_alert(self): ++ def test_already_exists(self): ++ self.assert_pcs_success("alert create path=test") ++ self.assert_pcs_success("alert recipient add alert rec_value id=rec") + self.assert_pcs_fail( +- "alert recipient add alert rec_value", +- "Error: Alert 'alert' not found.\n" ++ "alert recipient add alert value id=rec", ++ "Error: 'rec' already exists\n" ++ ) ++ self.assert_pcs_fail( ++ "alert recipient add alert value id=alert", ++ "Error: 'alert' already exists\n" + ) + +- def test_already_exists(self): ++ def test_same_value(self): + self.assert_pcs_success("alert create path=test") +- self.assert_pcs_success("alert recipient add alert rec_value") ++ self.assert_pcs_success("alert recipient add alert rec_value id=rec") + self.assert_pcs_fail( + "alert recipient add alert rec_value", +- "Error: Recipient 'rec_value' in alert 'alert' already exists.\n" ++ "Error: Recipient 'rec_value' in alert 'alert' already exists, " ++ "use --force to override\n" ++ ) ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert (path=test) ++ Recipients: ++ Recipient: rec (value=rec_value) ++""" ++ ) ++ self.assert_pcs_success( ++ "alert recipient add alert rec_value --force", ++ "Warning: Recipient 'rec_value' in alert 'alert' already exists\n" ++ ) ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert (path=test) ++ Recipients: ++ Recipient: rec (value=rec_value) ++ Recipient: alert-recipient (value=rec_value) ++""" + ) + + +@@ -296,14 +326,14 @@ class UpdateRecipientAlert(PcsAlertTest): + Alerts: + Alert: alert (path=test) + Recipients: +- Recipient: rec_value ++ Recipient: alert-recipient (value=rec_value) + Description: description + Options: o1=1 o2=2 + Meta options: m1=v1 m2=v2 + """ + ) + self.assert_pcs_success( +- "alert recipient update alert rec_value description=desc " ++ "alert recipient update alert-recipient value=new description=desc " + "options o1= o2=v2 o3=3 meta m1= m2=2 m3=3" + ) + self.assert_pcs_success( +@@ -312,24 +342,99 @@ Alerts: + Alerts: + Alert: alert (path=test) + Recipients: +- Recipient: rec_value ++ Recipient: alert-recipient (value=new) ++ Description: desc ++ Options: o2=v2 o3=3 ++ Meta options: m2=2 m3=3 ++""" ++ ) ++ self.assert_pcs_success( ++ "alert recipient update alert-recipient value=new" ++ ) ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert (path=test) ++ Recipients: ++ Recipient: alert-recipient (value=new) + Description: desc + Options: o2=v2 o3=3 + Meta options: m2=2 m3=3 + """ + ) + +- def test_no_alert(self): ++ def test_value_exists(self): ++ self.assert_pcs_success("alert create path=test") ++ self.assert_pcs_success("alert recipient add alert rec_value") ++ self.assert_pcs_success("alert recipient add alert value") ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert (path=test) ++ Recipients: ++ Recipient: alert-recipient (value=rec_value) ++ Recipient: alert-recipient-1 (value=value) ++""" ++ ) + self.assert_pcs_fail( +- "alert recipient update alert rec_value description=desc", +- "Error: Alert 'alert' not found.\n" ++ "alert recipient update alert-recipient value=value", ++ "Error: Recipient 'value' in alert 'alert' already exists, " ++ "use --force to override\n" ++ ) ++ self.assert_pcs_success( ++ "alert recipient update alert-recipient value=value --force", ++ "Warning: Recipient 'value' in alert 'alert' already exists\n" ++ ) ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert (path=test) ++ Recipients: ++ Recipient: alert-recipient (value=value) ++ Recipient: alert-recipient-1 (value=value) ++""" ++ ) ++ ++ def test_value_same_as_previous(self): ++ self.assert_pcs_success("alert create path=test") ++ self.assert_pcs_success("alert recipient add alert rec_value") ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert (path=test) ++ Recipients: ++ Recipient: alert-recipient (value=rec_value) ++""" ++ ) ++ self.assert_pcs_success( ++ "alert recipient update alert-recipient value=rec_value" ++ ) ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert (path=test) ++ Recipients: ++ Recipient: alert-recipient (value=rec_value) ++""" + ) + + def test_no_recipient(self): ++ self.assert_pcs_fail( ++ "alert recipient update rec description=desc", ++ "Error: Recipient 'rec' does not exist\n" ++ ) ++ ++ def test_empty_value(self): + self.assert_pcs_success("alert create path=test") ++ self.assert_pcs_success("alert recipient add alert rec_value id=rec") + self.assert_pcs_fail( +- "alert recipient update alert rec_value description=desc", +- "Error: Recipient 'rec_value' not found in alert 'alert'.\n" ++ "alert recipient update rec value=", ++ "Error: Recipient value '' is not valid.\n" + ) + + +@@ -337,27 +442,27 @@ Alerts: + class RemoveRecipientTest(PcsAlertTest): + def test_success(self): + self.assert_pcs_success("alert create path=test") +- self.assert_pcs_success("alert recipient add alert rec_value") ++ self.assert_pcs_success("alert recipient add alert rec_value id=rec") + self.assert_pcs_success( + "alert config", + """\ + Alerts: + Alert: alert (path=test) + Recipients: +- Recipient: rec_value ++ Recipient: rec (value=rec_value) + """ + ) +- self.assert_pcs_success("alert recipient remove alert rec_value") +- +- def test_no_alert(self): +- self.assert_pcs_fail( +- "alert recipient remove alert rec_value", +- "Error: Alert 'alert' not found.\n" ++ self.assert_pcs_success("alert recipient remove rec") ++ self.assert_pcs_success( ++ "alert config", ++ """\ ++Alerts: ++ Alert: alert (path=test) ++""" + ) + + def test_no_recipient(self): +- self.assert_pcs_success("alert create path=test") + self.assert_pcs_fail( +- "alert recipient remove alert rec_value", +- "Error: Recipient 'rec_value' not found in alert 'alert'.\n" ++ "alert recipient remove rec", ++ "Error: Recipient 'rec' does not exist\n" + ) +diff --git a/pcs/usage.py b/pcs/usage.py +index ee53a2f..77b496e 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -1402,15 +1402,17 @@ Commands: + remove <alert-id> + Remove alert with specified id. + +- recipient add <alert-id> <recipient-value> [description=<description>] +- [options [<option>=<value>]...] [meta [<meta-option>=<value>]...] ++ recipient add <alert-id> <recipient-value> [id=<recipient-id>] ++ [description=<description>] [options [<option>=<value>]...] ++ [meta [<meta-option>=<value>]...] + Add new recipient to specified alert. + +- recipient update <alert-id> <recipient-value> [description=<description>] +- [options [<option>=<value>]...] [meta [<meta-option>=<value>]...] +- Update existing recipient identified by alert and it's value. ++ recipient update <recipient-id> [value=<recipient-value>] ++ [description=<description>] [options [<option>=<value>]...] ++ [meta [<meta-option>=<value>]...] ++ Update existing recipient identified by it's id. + +- recipient remove <alert-id> <recipient-value> ++ recipient remove <recipient-id> + Remove specified recipient. + """ + if pout: +-- +1.8.3.1 + diff --git a/SOURCES/bz1315371-03-improve-alerts-help.patch b/SOURCES/bz1315371-03-improve-alerts-help.patch new file mode 100644 index 0000000..a26e2b9 --- /dev/null +++ b/SOURCES/bz1315371-03-improve-alerts-help.patch @@ -0,0 +1,67 @@ +From a315196a5f9fc70ce1cd4b56648f262048bb93f1 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular <omular@redhat.com> +Date: Mon, 25 Jul 2016 14:10:55 +0200 +Subject: [PATCH] improve alerts help + +--- + pcs/pcs.8 | 8 ++++---- + pcs/usage.py | 8 ++++---- + 2 files changed, 8 insertions(+), 8 deletions(-) + +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index f789df7..0e8e967 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -667,16 +667,16 @@ Add specified utilization options to specified node. If node is not specified, + Show all configured alerts. + .TP + create path=<path> [id=<alert\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...] +-Create new alert with specified path. Id will be automatically generated if it is not specified. ++Define an alert handler with specified path. Id will be automatically generated if it is not specified. + .TP + update <alert\-id> [path=<path>] [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...] +-Update existing alert with specified id. ++Update existing alert handler with specified id. + .TP + remove <alert\-id> +-Remove alert with specified id. ++Remove alert handler with specified id. + .TP + recipient add <alert\-id> <recipient\-value> [id=<recipient\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...] +-Add new recipient to specified alert. ++Add new recipient to specified alert handler. + .TP + recipient update <recipient\-id> [value=<recipient\-value>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...] + Update existing recipient identified by it's id. +diff --git a/pcs/usage.py b/pcs/usage.py +index 2f8f855..7cfb33e 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -1416,20 +1416,20 @@ Commands: + + create path=<path> [id=<alert-id>] [description=<description>] + [options [<option>=<value>]...] [meta [<meta-option>=<value>]...] +- Create new alert with specified path. Id will be automatically ++ Define an alert handler with specified path. Id will be automatically + generated if it is not specified. + + update <alert-id> [path=<path>] [description=<description>] + [options [<option>=<value>]...] [meta [<meta-option>=<value>]...] +- Update existing alert with specified id. ++ Update existing alert handler with specified id. + + remove <alert-id> +- Remove alert with specified id. ++ Remove alert handler with specified id. + + recipient add <alert-id> <recipient-value> [id=<recipient-id>] + [description=<description>] [options [<option>=<value>]...] + [meta [<meta-option>=<value>]...] +- Add new recipient to specified alert. ++ Add new recipient to specified alert handler. + + recipient update <recipient-id> [value=<recipient-value>] + [description=<description>] [options [<option>=<value>]...] +-- +1.8.3.1 + diff --git a/SOURCES/bz1315371-04-alerts-related-fixes.patch b/SOURCES/bz1315371-04-alerts-related-fixes.patch new file mode 100644 index 0000000..9c8e1ec --- /dev/null +++ b/SOURCES/bz1315371-04-alerts-related-fixes.patch @@ -0,0 +1,373 @@ +From b438fe5c0eb4e6fa738e21287540c0d8f6b91c68 Mon Sep 17 00:00:00 2001 +From: Ivan Devat <idevat@redhat.com> +Date: Fri, 19 Aug 2016 02:57:39 +0200 +Subject: [PATCH] squash bz1315371 [RFE] Provide configurable alerts + +25a25c534ff6 show help when unknown subcommand of 'pcs alert recipient' was given + +c352ce184093 make syntax of command 'pcs alert recipient add' more consistent + +8c6ec586d57c fix error handling when upgrading cib schema +--- + pcs/alert.py | 9 +++-- + pcs/lib/cib/tools.py | 36 +++++++++-------- + pcs/pcs.8 | 2 +- + pcs/test/test_alert.py | 44 +++++++++++++------- + pcs/test/test_lib_cib_tools.py | 91 ++++++++++++++++++++++++++++++++++++++++++ + pcs/usage.py | 2 +- + 6 files changed, 147 insertions(+), 37 deletions(-) + +diff --git a/pcs/alert.py b/pcs/alert.py +index 693bb8d..17f4e8d 100644 +--- a/pcs/alert.py ++++ b/pcs/alert.py +@@ -63,6 +63,8 @@ def recipient_cmd(*args): + recipient_update(*args) + elif sub_cmd == "remove": + recipient_remove(*args) ++ else: ++ raise CmdLineInputError() + except CmdLineInputError as e: + utils.exit_on_cmdline_input_errror( + e, "alert", "recipient {0}".format(sub_cmd) +@@ -127,15 +129,14 @@ def recipient_add(lib, argv, modifiers): + raise CmdLineInputError() + + alert_id = argv[0] +- recipient_value = argv[1] + +- sections = parse_cmd_sections(argv[2:], set(["options", "meta"])) ++ sections = parse_cmd_sections(argv[1:], set(["options", "meta"])) + main_args = prepare_options(sections["main"]) +- ensure_only_allowed_options(main_args, ["description", "id"]) ++ ensure_only_allowed_options(main_args, ["description", "id", "value"]) + + lib.alert.add_recipient( + alert_id, +- recipient_value, ++ main_args.get("value", None), + prepare_options(sections["options"]), + prepare_options(sections["meta"]), + recipient_id=main_args.get("id", None), +diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py +index d8ce57a..8141360 100644 +--- a/pcs/lib/cib/tools.py ++++ b/pcs/lib/cib/tools.py +@@ -176,29 +176,31 @@ def upgrade_cib(cib, runner): + cib -- cib etree + runner -- CommandRunner + """ +- temp_file = tempfile.NamedTemporaryFile("w+", suffix=".pcs") +- temp_file.write(etree.tostring(cib).decode()) +- temp_file.flush() +- output, retval = runner.run( +- [ +- os.path.join(settings.pacemaker_binaries, "cibadmin"), +- "--upgrade", +- "--force" +- ], +- env_extend={"CIB_file": temp_file.name} +- ) ++ temp_file = None ++ try: ++ temp_file = tempfile.NamedTemporaryFile("w+", suffix=".pcs") ++ temp_file.write(etree.tostring(cib).decode()) ++ temp_file.flush() ++ output, retval = runner.run( ++ [ ++ os.path.join(settings.pacemaker_binaries, "cibadmin"), ++ "--upgrade", ++ "--force" ++ ], ++ env_extend={"CIB_file": temp_file.name} ++ ) + +- if retval != 0: +- temp_file.close() +- LibraryError(reports.cib_upgrade_failed(output)) ++ if retval != 0: ++ temp_file.close() ++ raise LibraryError(reports.cib_upgrade_failed(output)) + +- try: + temp_file.seek(0) + return etree.fromstring(temp_file.read()) + except (EnvironmentError, etree.XMLSyntaxError, etree.DocumentInvalid) as e: +- LibraryError(reports.cib_upgrade_failed(str(e))) ++ raise LibraryError(reports.cib_upgrade_failed(str(e))) + finally: +- temp_file.close() ++ if temp_file: ++ temp_file.close() + + + def ensure_cib_version(runner, cib, version): +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 7a054ca..b3c4877 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -727,7 +727,7 @@ Update existing alert handler with specified id. + remove <alert\-id> + Remove alert handler with specified id. + .TP +-recipient add <alert\-id> <recipient\-value> [id=<recipient\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...] ++recipient add <alert\-id> value=<recipient\-value> [id=<recipient\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...] + Add new recipient to specified alert handler. + .TP + recipient update <recipient\-id> [value=<recipient\-value>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...] +diff --git a/pcs/test/test_alert.py b/pcs/test/test_alert.py +index f6ea70d..d919ff6 100644 +--- a/pcs/test/test_alert.py ++++ b/pcs/test/test_alert.py +@@ -233,7 +233,7 @@ Alerts: + Alert: alert (path=test) + """ + ) +- self.assert_pcs_success("alert recipient add alert rec_value") ++ self.assert_pcs_success("alert recipient add alert value=rec_value") + self.assert_pcs_success( + "alert config", + """\ +@@ -244,7 +244,7 @@ Alerts: + """ + ) + self.assert_pcs_success( +- "alert recipient add alert rec_value2 id=my-recipient " ++ "alert recipient add alert value=rec_value2 id=my-recipient " + "description=description options o1=1 o2=2 meta m1=v1 m2=v2" + ) + self.assert_pcs_success( +@@ -263,21 +263,25 @@ Alerts: + + def test_already_exists(self): + self.assert_pcs_success("alert create path=test") +- self.assert_pcs_success("alert recipient add alert rec_value id=rec") ++ self.assert_pcs_success( ++ "alert recipient add alert value=rec_value id=rec" ++ ) + self.assert_pcs_fail( +- "alert recipient add alert value id=rec", ++ "alert recipient add alert value=value id=rec", + "Error: 'rec' already exists\n" + ) + self.assert_pcs_fail( +- "alert recipient add alert value id=alert", ++ "alert recipient add alert value=value id=alert", + "Error: 'alert' already exists\n" + ) + + def test_same_value(self): + self.assert_pcs_success("alert create path=test") +- self.assert_pcs_success("alert recipient add alert rec_value id=rec") ++ self.assert_pcs_success( ++ "alert recipient add alert value=rec_value id=rec" ++ ) + self.assert_pcs_fail( +- "alert recipient add alert rec_value", ++ "alert recipient add alert value=rec_value", + "Error: Recipient 'rec_value' in alert 'alert' already exists, " + "use --force to override\n" + ) +@@ -291,7 +295,7 @@ Alerts: + """ + ) + self.assert_pcs_success( +- "alert recipient add alert rec_value --force", ++ "alert recipient add alert value=rec_value --force", + "Warning: Recipient 'rec_value' in alert 'alert' already exists\n" + ) + self.assert_pcs_success( +@@ -305,13 +309,21 @@ Alerts: + """ + ) + ++ def test_no_value(self): ++ self.assert_pcs_success("alert create path=test") ++ self.assert_pcs_fail( ++ "alert recipient add alert id=rec", ++ "Error: required option 'value' is missing\n" ++ ) ++ ++ + + @unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG) + class UpdateRecipientAlert(PcsAlertTest): + def test_success(self): + self.assert_pcs_success("alert create path=test") + self.assert_pcs_success( +- "alert recipient add alert rec_value description=description " ++ "alert recipient add alert value=rec_value description=description " + "options o1=1 o2=2 meta m1=v1 m2=v2" + ) + self.assert_pcs_success( +@@ -360,8 +372,8 @@ Alerts: + + def test_value_exists(self): + self.assert_pcs_success("alert create path=test") +- self.assert_pcs_success("alert recipient add alert rec_value") +- self.assert_pcs_success("alert recipient add alert value") ++ self.assert_pcs_success("alert recipient add alert value=rec_value") ++ self.assert_pcs_success("alert recipient add alert value=value") + self.assert_pcs_success( + "alert config", + """\ +@@ -394,7 +406,7 @@ Alerts: + + def test_value_same_as_previous(self): + self.assert_pcs_success("alert create path=test") +- self.assert_pcs_success("alert recipient add alert rec_value") ++ self.assert_pcs_success("alert recipient add alert value=rec_value") + self.assert_pcs_success( + "alert config", + """\ +@@ -425,7 +437,9 @@ Alerts: + + def test_empty_value(self): + self.assert_pcs_success("alert create path=test") +- self.assert_pcs_success("alert recipient add alert rec_value id=rec") ++ self.assert_pcs_success( ++ "alert recipient add alert value=rec_value id=rec" ++ ) + self.assert_pcs_fail( + "alert recipient update rec value=", + "Error: Recipient value '' is not valid.\n" +@@ -436,7 +450,9 @@ Alerts: + class RemoveRecipientTest(PcsAlertTest): + def test_success(self): + self.assert_pcs_success("alert create path=test") +- self.assert_pcs_success("alert recipient add alert rec_value id=rec") ++ self.assert_pcs_success( ++ "alert recipient add alert value=rec_value id=rec" ++ ) + self.assert_pcs_success( + "alert config", + """\ +diff --git a/pcs/test/test_lib_cib_tools.py b/pcs/test/test_lib_cib_tools.py +index 10f8a96..0fd4d22 100644 +--- a/pcs/test/test_lib_cib_tools.py ++++ b/pcs/test/test_lib_cib_tools.py +@@ -7,6 +7,7 @@ from __future__ import ( + + from unittest import TestCase + ++from os.path import join + from lxml import etree + + from pcs.test.tools.assertions import ( +@@ -17,6 +18,7 @@ from pcs.test.tools.misc import get_test_resource as rc + from pcs.test.tools.pcs_mock import mock + from pcs.test.tools.xml import get_xml_manipulation_creator_from_file + ++from pcs import settings + from pcs.common import report_codes + from pcs.lib.external import CommandRunner + from pcs.lib.errors import ReportItemSeverity as severities +@@ -369,3 +371,92 @@ class EnsureCibVersionTest(TestCase): + ) + ) + mock_upgrade_cib.assert_called_once_with(self.cib, self.mock_runner) ++ ++ ++@mock.patch("tempfile.NamedTemporaryFile") ++class UpgradeCibTest(TestCase): ++ def setUp(self): ++ self.mock_runner = mock.MagicMock(spec_set=CommandRunner) ++ ++ def test_success(self, mock_named_file): ++ mock_file = mock.MagicMock() ++ mock_file.name = "mock_file_name" ++ mock_file.read.return_value = "<cib/>" ++ mock_named_file.return_value = mock_file ++ self.mock_runner.run.return_value = ("", 0) ++ assert_xml_equal( ++ "<cib/>", ++ etree.tostring( ++ lib.upgrade_cib(etree.XML("<old_cib/>"), self.mock_runner) ++ ).decode() ++ ) ++ mock_named_file.assert_called_once_with("w+", suffix=".pcs") ++ mock_file.write.assert_called_once_with("<old_cib/>") ++ mock_file.flush.assert_called_once_with() ++ self.mock_runner.run.assert_called_once_with( ++ [ ++ join(settings.pacemaker_binaries, "cibadmin"), ++ "--upgrade", ++ "--force" ++ ], ++ env_extend={"CIB_file": "mock_file_name"} ++ ) ++ mock_file.seek.assert_called_once_with(0) ++ mock_file.read.assert_called_once_with() ++ ++ def test_upgrade_failed(self, mock_named_file): ++ mock_file = mock.MagicMock() ++ mock_file.name = "mock_file_name" ++ mock_named_file.return_value = mock_file ++ self.mock_runner.run.return_value = ("reason", 1) ++ assert_raise_library_error( ++ lambda: lib.upgrade_cib(etree.XML("<old_cib/>"), self.mock_runner), ++ ( ++ severities.ERROR, ++ report_codes.CIB_UPGRADE_FAILED, ++ {"reason": "reason"} ++ ) ++ ) ++ mock_named_file.assert_called_once_with("w+", suffix=".pcs") ++ mock_file.write.assert_called_once_with("<old_cib/>") ++ mock_file.flush.assert_called_once_with() ++ self.mock_runner.run.assert_called_once_with( ++ [ ++ join(settings.pacemaker_binaries, "cibadmin"), ++ "--upgrade", ++ "--force" ++ ], ++ env_extend={"CIB_file": "mock_file_name"} ++ ) ++ ++ def test_unable_to_parse_upgraded_cib(self, mock_named_file): ++ mock_file = mock.MagicMock() ++ mock_file.name = "mock_file_name" ++ mock_file.read.return_value = "not xml" ++ mock_named_file.return_value = mock_file ++ self.mock_runner.run.return_value = ("", 0) ++ assert_raise_library_error( ++ lambda: lib.upgrade_cib(etree.XML("<old_cib/>"), self.mock_runner), ++ ( ++ severities.ERROR, ++ report_codes.CIB_UPGRADE_FAILED, ++ { ++ "reason": ++ "Start tag expected, '<' not found, line 1, column 1", ++ } ++ ) ++ ) ++ mock_named_file.assert_called_once_with("w+", suffix=".pcs") ++ mock_file.write.assert_called_once_with("<old_cib/>") ++ mock_file.flush.assert_called_once_with() ++ self.mock_runner.run.assert_called_once_with( ++ [ ++ join(settings.pacemaker_binaries, "cibadmin"), ++ "--upgrade", ++ "--force" ++ ], ++ env_extend={"CIB_file": "mock_file_name"} ++ ) ++ mock_file.seek.assert_called_once_with(0) ++ mock_file.read.assert_called_once_with() ++ +diff --git a/pcs/usage.py b/pcs/usage.py +index 9ebbca9..78e340b 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -1507,7 +1507,7 @@ Commands: + remove <alert-id> + Remove alert handler with specified id. + +- recipient add <alert-id> <recipient-value> [id=<recipient-id>] ++ recipient add <alert-id> value=<recipient-value> [id=<recipient-id>] + [description=<description>] [options [<option>=<value>]...] + [meta [<meta-option>=<value>]...] + Add new recipient to specified alert handler. +-- +1.8.3.1 + diff --git a/SOURCES/bz1327739-01-add-pcs-quorum-expected-votes-command.patch b/SOURCES/bz1327739-01-add-pcs-quorum-expected-votes-command.patch new file mode 100644 index 0000000..ce9052f --- /dev/null +++ b/SOURCES/bz1327739-01-add-pcs-quorum-expected-votes-command.patch @@ -0,0 +1,311 @@ +From bd852905ad905b83daa1a7240e7a79c3357db5b8 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Thu, 30 Jun 2016 15:09:31 +0200 +Subject: [PATCH] bz1158805-01 add "pcs quorum expected-votes" command + +--- + pcs/cli/common/lib_wrapper.py | 1 + + pcs/common/report_codes.py | 1 + + pcs/lib/commands/quorum.py | 21 ++++++++++++++++++ + pcs/lib/corosync/live.py | 15 +++++++++++++ + pcs/lib/reports.py | 13 +++++++++++ + pcs/pcs.8 | 3 +++ + pcs/quorum.py | 7 ++++++ + pcs/test/suite.py | 6 +++-- + pcs/test/test_lib_commands_quorum.py | 43 ++++++++++++++++++++++++++++++++++++ + pcs/test/test_lib_corosync_live.py | 42 +++++++++++++++++++++++++++++++++++ + pcs/usage.py | 4 ++++ + 11 files changed, 154 insertions(+), 2 deletions(-) + +diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py +index 2dd5810..c4b8342 100644 +--- a/pcs/cli/common/lib_wrapper.py ++++ b/pcs/cli/common/lib_wrapper.py +@@ -116,6 +116,7 @@ def load_module(env, middleware_factory, name): + "add_device": quorum.add_device, + "get_config": quorum.get_config, + "remove_device": quorum.remove_device, ++ "set_expected_votes_live": quorum.set_expected_votes_live, + "set_options": quorum.set_options, + "status": quorum.status_text, + "status_device": quorum.status_device_text, +diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py +index afe0554..2b39938 100644 +--- a/pcs/common/report_codes.py ++++ b/pcs/common/report_codes.py +@@ -47,6 +47,7 @@ COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR = "COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR" + COROSYNC_NOT_RUNNING_ON_NODE = "COROSYNC_NOT_RUNNING_ON_NODE" + COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE = "COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE" + COROSYNC_QUORUM_GET_STATUS_ERROR = "COROSYNC_QUORUM_GET_STATUS_ERROR" ++COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR = "COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR" + COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE" + CRM_MON_ERROR = "CRM_MON_ERROR" + DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST" +diff --git a/pcs/lib/commands/quorum.py b/pcs/lib/commands/quorum.py +index aa00bbd..7425e78 100644 +--- a/pcs/lib/commands/quorum.py ++++ b/pcs/lib/commands/quorum.py +@@ -314,6 +314,27 @@ def _remove_device_model_net(lib_env, cluster_nodes, skip_offline_nodes): + skip_offline_nodes + ) + ++def set_expected_votes_live(lib_env, expected_votes): ++ """ ++ set expected votes in live cluster to specified value ++ numeric expected_votes desired value of expected votes ++ """ ++ if lib_env.is_cman_cluster: ++ raise LibraryError(reports.cman_unsupported_command()) ++ ++ try: ++ votes_int = int(expected_votes) ++ if votes_int < 1: ++ raise ValueError() ++ except ValueError: ++ raise LibraryError(reports.invalid_option_value( ++ "expected votes", ++ expected_votes, ++ "positive integer" ++ )) ++ ++ corosync_live.set_expected_votes(lib_env.cmd_runner(), votes_int) ++ + def __ensure_not_cman(lib_env): + if lib_env.is_corosync_conf_live and lib_env.is_cman_cluster: + raise LibraryError(reports.cman_unsupported_command()) +diff --git a/pcs/lib/corosync/live.py b/pcs/lib/corosync/live.py +index 4129aeb..b49b9f6 100644 +--- a/pcs/lib/corosync/live.py ++++ b/pcs/lib/corosync/live.py +@@ -62,3 +62,18 @@ def get_quorum_status_text(runner): + reports.corosync_quorum_get_status_error(output) + ) + return output ++ ++def set_expected_votes(runner, votes): ++ """ ++ set expected votes in live cluster to specified value ++ """ ++ output, retval = runner.run([ ++ os.path.join(settings.corosync_binaries, "corosync-quorumtool"), ++ # format votes to handle the case where they are int ++ "-e", "{0}".format(votes) ++ ]) ++ if retval != 0: ++ raise LibraryError( ++ reports.corosync_quorum_set_expected_votes_error(output) ++ ) ++ return output +diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py +index d8f88cd..9ececf9 100644 +--- a/pcs/lib/reports.py ++++ b/pcs/lib/reports.py +@@ -565,6 +565,19 @@ def corosync_quorum_get_status_error(reason): + } + ) + ++def corosync_quorum_set_expected_votes_error(reason): ++ """ ++ unable to set expcted votes in a live cluster ++ string reason an error message ++ """ ++ return ReportItem.error( ++ report_codes.COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR, ++ "Unable to set expected votes: {reason}", ++ info={ ++ "reason": reason, ++ } ++ ) ++ + def corosync_config_reloaded(): + """ + corosync configuration has been reloaded +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 949d918..a436b4c 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -563,6 +563,9 @@ Add/Change quorum device options. Generic options and model options are all doc + + WARNING: If you want to change "host" option of qdevice model net, use "pcs quorum device remove" and "pcs quorum device add" commands to set up configuration properly unless old and new host is the same machine. + .TP ++expected\-votes <votes> ++Set expected votes in the live cluster to specified value. This only affects the live cluster, not changes any configuration files. ++.TP + unblock [\fB\-\-force\fR] + Cancel waiting for all nodes when establishing quorum. Useful in situations where you know the cluster is inquorate, but you are confident that the cluster should proceed with resource management regardless. This command should ONLY be used when nodes which the cluster is waiting for have been confirmed to be powered off and to have no access to shared resources. + +diff --git a/pcs/quorum.py b/pcs/quorum.py +index 27085ac..2d54ed7 100644 +--- a/pcs/quorum.py ++++ b/pcs/quorum.py +@@ -28,6 +28,8 @@ def quorum_cmd(lib, argv, modificators): + usage.quorum(argv) + elif sub_cmd == "config": + quorum_config_cmd(lib, argv_next, modificators) ++ elif sub_cmd == "expected-votes": ++ quorum_expected_votes_cmd(lib, argv_next, modificators) + elif sub_cmd == "status": + quorum_status_cmd(lib, argv_next, modificators) + elif sub_cmd == "device": +@@ -101,6 +103,11 @@ def quorum_config_to_str(config): + + return lines + ++def quorum_expected_votes_cmd(lib, argv, modificators): ++ if len(argv) != 1: ++ raise CmdLineInputError() ++ lib.quorum.set_expected_votes_live(argv[0]) ++ + def quorum_status_cmd(lib, argv, modificators): + if argv: + raise CmdLineInputError() +diff --git a/pcs/test/suite.py b/pcs/test/suite.py +index 85dd20c..5b29918 100755 +--- a/pcs/test/suite.py ++++ b/pcs/test/suite.py +@@ -74,7 +74,7 @@ def run_tests(tests, verbose=False, color=False): + verbosity=2 if verbose else 1, + resultclass=resultclass + ) +- testRunner.run(tests) ++ return testRunner.run(tests) + + put_package_to_path() + explicitly_enumerated_tests = [ +@@ -85,7 +85,7 @@ explicitly_enumerated_tests = [ + "--all-but", + ) + ] +-run_tests( ++test_result = run_tests( + discover_tests(explicitly_enumerated_tests, "--all-but" in sys.argv), + verbose="-v" in sys.argv, + color=( +@@ -99,6 +99,8 @@ run_tests( + ) + ), + ) ++if not test_result.wasSuccessful(): ++ sys.exit(1) + + # assume that we are in pcs root dir + # +diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py +index e824f37..c12ab66 100644 +--- a/pcs/test/test_lib_commands_quorum.py ++++ b/pcs/test/test_lib_commands_quorum.py +@@ -1750,3 +1750,46 @@ class UpdateDeviceTest(TestCase, CmanMixin): + "model: net\n bad_option: bad_value" + ) + ) ++ ++ ++@mock.patch("pcs.lib.commands.quorum.corosync_live.set_expected_votes") ++@mock.patch.object( ++ LibraryEnvironment, ++ "cmd_runner", ++ lambda self: "mock_runner" ++) ++class SetExpectedVotesLiveTest(TestCase, CmanMixin): ++ def setUp(self): ++ self.mock_logger = mock.MagicMock(logging.Logger) ++ self.mock_reporter = MockLibraryReportProcessor() ++ ++ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True) ++ def test_disabled_on_cman(self, mock_set_votes): ++ lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) ++ self.assert_disabled_on_cman( ++ lambda: lib.set_expected_votes_live(lib_env, "5") ++ ) ++ mock_set_votes.assert_not_called() ++ ++ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) ++ def test_success(self, mock_set_votes): ++ lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) ++ lib.set_expected_votes_live(lib_env, "5") ++ mock_set_votes.assert_called_once_with("mock_runner", 5) ++ ++ @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False) ++ def test_invalid_votes(self, mock_set_votes): ++ lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter) ++ assert_raise_library_error( ++ lambda: lib.set_expected_votes_live(lib_env, "-5"), ++ ( ++ severity.ERROR, ++ report_codes.INVALID_OPTION_VALUE, ++ { ++ "option_name": "expected votes", ++ "option_value": "-5", ++ "allowed_values": "positive integer", ++ } ++ ) ++ ) ++ mock_set_votes.assert_not_called() +diff --git a/pcs/test/test_lib_corosync_live.py b/pcs/test/test_lib_corosync_live.py +index 96fe235..0fc5eb2 100644 +--- a/pcs/test/test_lib_corosync_live.py ++++ b/pcs/test/test_lib_corosync_live.py +@@ -141,3 +141,45 @@ class GetQuorumStatusTextTest(TestCase): + self.mock_runner.run.assert_called_once_with([ + self.quorum_tool, "-p" + ]) ++ ++ ++class SetExpectedVotesTest(TestCase): ++ def setUp(self): ++ self.mock_runner = mock.MagicMock(spec_set=CommandRunner) ++ ++ def path(self, name): ++ return os.path.join(settings.corosync_binaries, name) ++ ++ def test_success(self): ++ cmd_retval = 0 ++ cmd_output = "cmd output" ++ mock_runner = mock.MagicMock(spec_set=CommandRunner) ++ mock_runner.run.return_value = (cmd_output, cmd_retval) ++ ++ lib.set_expected_votes(mock_runner, 3) ++ ++ mock_runner.run.assert_called_once_with([ ++ self.path("corosync-quorumtool"), "-e", "3" ++ ]) ++ ++ def test_error(self): ++ cmd_retval = 1 ++ cmd_output = "cmd output" ++ mock_runner = mock.MagicMock(spec_set=CommandRunner) ++ mock_runner.run.return_value = (cmd_output, cmd_retval) ++ ++ assert_raise_library_error( ++ lambda: lib.set_expected_votes(mock_runner, 3), ++ ( ++ severity.ERROR, ++ report_codes.COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR, ++ { ++ "reason": cmd_output, ++ } ++ ) ++ ) ++ ++ mock_runner.run.assert_called_once_with([ ++ self.path("corosync-quorumtool"), "-e", "3" ++ ]) ++ +diff --git a/pcs/usage.py b/pcs/usage.py +index 542f806..ee53a2f 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -1352,6 +1352,10 @@ Commands: + to set up configuration properly unless old and new host is the same + machine. + ++ expected-votes <votes> ++ Set expected votes in the live cluster to specified value. This only ++ affects the live cluster, not changes any configuration files. ++ + unblock [--force] + Cancel waiting for all nodes when establishing quorum. Useful in + situations where you know the cluster is inquorate, but you are +-- +1.8.3.1 + diff --git a/SOURCES/bz1329472-01-when-removing-a-remote-node-remove-it-from-pacemakers-caches-as-well.patch b/SOURCES/bz1329472-01-when-removing-a-remote-node-remove-it-from-pacemakers-caches-as-well.patch new file mode 100644 index 0000000..0a4e1cf --- /dev/null +++ b/SOURCES/bz1329472-01-when-removing-a-remote-node-remove-it-from-pacemakers-caches-as-well.patch @@ -0,0 +1,278 @@ +From 6805a235de50925ed7f30ac79b3d96be3f5d71df Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Mon, 25 Jul 2016 14:23:23 +0200 +Subject: [PATCH 1/2] remove dead code + +Function resource_master_remove could not ever be called because conditions +with xpath queries were never True. In the case when the resource_id was +an id of a master resource, it got changed to the id of the master's child +right at the beginning of resource_remove function. +--- + pcs/resource.py | 49 ++++++------------------------------------------- + 1 file changed, 6 insertions(+), 43 deletions(-) + +diff --git a/pcs/resource.py b/pcs/resource.py +index 9384a21..24128ba 100644 +--- a/pcs/resource.py ++++ b/pcs/resource.py +@@ -1618,45 +1618,9 @@ def resource_master_create(dom, argv, update=False, master_id=None): + + return dom, master_element.getAttribute("id") + +-def resource_master_remove(argv): +- if len(argv) < 1: +- usage.resource() +- sys.exit(1) +- +- dom = utils.get_cib_dom() +- master_id = argv.pop(0) +- +- master_found = False +-# Check to see if there's a resource/group with the master_id if so, we remove the parent +- for rg in (dom.getElementsByTagName("primitive") + dom.getElementsByTagName("group")): +- if rg.getAttribute("id") == master_id and rg.parentNode.tagName == "master": +- master_id = rg.parentNode.getAttribute("id") +- +- resources_to_cleanup = [] +- for master in dom.getElementsByTagName("master"): +- if master.getAttribute("id") == master_id: +- childNodes = master.getElementsByTagName("primitive") +- for child in childNodes: +- resources_to_cleanup.append(child.getAttribute("id")) +- master_found = True +- break +- +- if not master_found: +- utils.err("Unable to find multi-state resource with id %s" % master_id) +- +- constraints_element = dom.getElementsByTagName("constraints") +- if len(constraints_element) > 0: +- constraints_element = constraints_element[0] +- for resource_id in resources_to_cleanup: +- remove_resource_references( +- dom, resource_id, constraints_element=constraints_element +- ) +- master.parentNode.removeChild(master) +- print("Removing Master - " + master_id) +- utils.replace_cib_configuration(dom) +- + def resource_remove(resource_id, output = True): + dom = utils.get_cib_dom() ++ # if resource is a clone or a master, work with its child instead + cloned_resource = utils.dom_get_clone_ms_resource(dom, resource_id) + if cloned_resource: + resource_id = cloned_resource.getAttribute("id") +@@ -1704,16 +1668,15 @@ def resource_remove(resource_id, output = True): + resource_remove(res.getAttribute("id")) + sys.exit(0) + ++ # now we know resource is not a group, a clone nor a master ++ # because of the conditions above ++ if not utils.does_exist('//resources/descendant::primitive[@id="'+resource_id+'"]'): ++ utils.err("Resource '{0}' does not exist.".format(resource_id)) ++ + group_xpath = '//group/primitive[@id="'+resource_id+'"]/..' + group = utils.get_cib_xpath(group_xpath) + num_resources_in_group = 0 + +- if not utils.does_exist('//resources/descendant::primitive[@id="'+resource_id+'"]'): +- if utils.does_exist('//resources/master[@id="'+resource_id+'"]'): +- return resource_master_remove([resource_id]) +- +- utils.err("Resource '{0}' does not exist.".format(resource_id)) +- + if (group != ""): + num_resources_in_group = len(parseString(group).documentElement.getElementsByTagName("primitive")) + +-- +1.8.3.1 + + +From cd96c34c7ad1f4f767c0d14475b683a70c3b0862 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Mon, 25 Jul 2016 17:34:35 +0200 +Subject: [PATCH 2/2] when removing a remote node remove it from pacemaker's + caches as well + +--- + pcs/cluster.py | 6 +++++ + pcs/resource.py | 64 ++++++++++++++++++++++++++++---------------------- + pcs/test/test_utils.py | 9 +++++++ + pcs/utils.py | 8 +++++++ + 4 files changed, 59 insertions(+), 28 deletions(-) + +diff --git a/pcs/cluster.py b/pcs/cluster.py +index 7a8615d..1c3b425 100644 +--- a/pcs/cluster.py ++++ b/pcs/cluster.py +@@ -1922,6 +1922,12 @@ def cluster_remote_node(argv): + nvpair.parentNode.removeChild(nvpair) + dom = constraint.remove_constraints_containing_node(dom, hostname) + utils.replace_cib_configuration(dom) ++ if not utils.usefile: ++ output, retval = utils.run([ ++ "crm_node", "--force", "--remove", hostname ++ ]) ++ if retval != 0: ++ utils.err("unable to remove: {0}".fomat(output)) + else: + usage.cluster(["remote-node"]) + sys.exit(1) +diff --git a/pcs/resource.py b/pcs/resource.py +index 24128ba..a85f46f 100644 +--- a/pcs/resource.py ++++ b/pcs/resource.py +@@ -929,31 +929,11 @@ def resource_update(res_id,args): + ia.setAttribute("value", val) + instance_attributes.appendChild(ia) + +- meta_attributes = resource.getElementsByTagName("meta_attributes") +- if len(meta_attributes) == 0: +- meta_attributes = dom.createElement("meta_attributes") +- meta_attributes.setAttribute("id", res_id + "-meta_attributes") +- resource.appendChild(meta_attributes) +- else: +- meta_attributes = meta_attributes[0] +- +- meta_attrs = utils.convert_args_to_tuples(meta_values) +- for (key,val) in meta_attrs: +- meta_found = False +- for ma in meta_attributes.getElementsByTagName("nvpair"): +- if ma.getAttribute("name") == key: +- meta_found = True +- if val == "": +- meta_attributes.removeChild(ma) +- else: +- ma.setAttribute("value", val) +- break +- if not meta_found: +- ma = dom.createElement("nvpair") +- ma.setAttribute("id", res_id + "-meta_attributes-" + key) +- ma.setAttribute("name", key) +- ma.setAttribute("value", val) +- meta_attributes.appendChild(ma) ++ remote_node_name = utils.dom_get_resource_remote_node_name(resource) ++ utils.dom_update_meta_attr( ++ resource, ++ utils.convert_args_to_tuples(meta_values) ++ ) + + operations = resource.getElementsByTagName("operations") + if len(operations) == 0: +@@ -1005,6 +985,17 @@ def resource_update(res_id,args): + + utils.replace_cib_configuration(dom) + ++ if ( ++ remote_node_name ++ and ++ remote_node_name != utils.dom_get_resource_remote_node_name(resource) ++ ): ++ # if the resource was a remote node and it is not anymore, (or its name ++ # changed) we need to tell pacemaker about it ++ output, retval = utils.run([ ++ "crm_node", "--force", "--remove", remote_node_name ++ ]) ++ + if "--wait" in utils.pcs_options: + args = ["crm_resource", "--wait"] + if wait_timeout: +@@ -1231,10 +1222,22 @@ def resource_meta(res_id, argv): + if "--wait" in utils.pcs_options: + wait_timeout = utils.validate_wait_get_timeout() + ++ remote_node_name = utils.dom_get_resource_remote_node_name(resource_el) + utils.dom_update_meta_attr(resource_el, utils.convert_args_to_tuples(argv)) + + utils.replace_cib_configuration(dom) + ++ if ( ++ remote_node_name ++ and ++ remote_node_name != utils.dom_get_resource_remote_node_name(resource_el) ++ ): ++ # if the resource was a remote node and it is not anymore, (or its name ++ # changed) we need to tell pacemaker about it ++ output, retval = utils.run([ ++ "crm_node", "--force", "--remove", remote_node_name ++ ]) ++ + if "--wait" in utils.pcs_options: + args = ["crm_resource", "--wait"] + if wait_timeout: +@@ -1714,11 +1717,12 @@ def resource_remove(resource_id, output = True): + ) + dom = utils.get_cib_dom() + resource_el = utils.dom_get_resource(dom, resource_id) ++ remote_node_name = None + if resource_el: +- remote_node = utils.dom_get_resource_remote_node_name(resource_el) +- if remote_node: ++ remote_node_name = utils.dom_get_resource_remote_node_name(resource_el) ++ if remote_node_name: + dom = constraint.remove_constraints_containing_node( +- dom, remote_node, output ++ dom, remote_node_name, output + ) + utils.replace_cib_configuration(dom) + dom = utils.get_cib_dom() +@@ -1784,6 +1788,10 @@ def resource_remove(resource_id, output = True): + if output == True: + utils.err("Unable to remove resource '%s' (do constraints exist?)" % (resource_id)) + return False ++ if remote_node_name and not utils.usefile: ++ output, retval = utils.run([ ++ "crm_node", "--force", "--remove", remote_node_name ++ ]) + return True + + def stonith_level_rm_device(cib_dom, stn_id): +diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py +index 819f8ee..192048e 100644 +--- a/pcs/test/test_utils.py ++++ b/pcs/test/test_utils.py +@@ -273,6 +273,9 @@ class UtilsTest(unittest.TestCase): + name="remote-node" value="guest2"/> + </instance_attributes> + </primitive> ++ <primitive id="dummy3" ++ class="ocf" provider="pacemaker" type="remote"> ++ </primitive> + </resources> + """).documentElement + resources = dom.getElementsByTagName("resources")[0] +@@ -296,6 +299,12 @@ class UtilsTest(unittest.TestCase): + utils.dom_get_resource(dom, "vm-guest1") + ) + ) ++ self.assertEqual( ++ "dummy3", ++ utils.dom_get_resource_remote_node_name( ++ utils.dom_get_resource(dom, "dummy3") ++ ) ++ ) + + def test_dom_get_meta_attr_value(self): + dom = self.get_cib_empty() +diff --git a/pcs/utils.py b/pcs/utils.py +index a7ed975..25274dc 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -1252,6 +1252,14 @@ def validate_constraint_resource(dom, resource_id): + def dom_get_resource_remote_node_name(dom_resource): + if dom_resource.tagName != "primitive": + return None ++ if ( ++ dom_resource.getAttribute("class").lower() == "ocf" ++ and ++ dom_resource.getAttribute("provider").lower() == "pacemaker" ++ and ++ dom_resource.getAttribute("type").lower() == "remote" ++ ): ++ return dom_resource.getAttribute("id") + return dom_get_meta_attr_value(dom_resource, "remote-node") + + def dom_get_meta_attr_value(dom_resource, meta_name): +-- +1.8.3.1 + diff --git a/SOURCES/bz1346852-01-fix-bad-request-when-resource-removal-t.patch b/SOURCES/bz1346852-01-fix-bad-request-when-resource-removal-t.patch new file mode 100644 index 0000000..ae01e91 --- /dev/null +++ b/SOURCES/bz1346852-01-fix-bad-request-when-resource-removal-t.patch @@ -0,0 +1,315 @@ +From 4949b387cbec0b79976ca87fbde41e441c21c197 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular <omular@redhat.com> +Date: Mon, 27 Jun 2016 11:49:43 +0200 +Subject: [PATCH] bz1346852-01-fix bad request when resource removal takes + longer than pcs expects + +--- + pcs/cluster.py | 19 +++++++++++-- + pcs/pcs.8 | 4 +-- + pcs/resource.py | 3 ++- + pcs/settings_default.py | 1 + + pcs/usage.py | 5 ++-- + pcsd/remote.rb | 71 ++++++++++++++++++++++++++++++++++++++++++------- + pcsd/views/main.erb | 13 ++++++--- + pcsd/views/nodes.erb | 14 +++++----- + 8 files changed, 102 insertions(+), 28 deletions(-) + +diff --git a/pcs/cluster.py b/pcs/cluster.py +index 988ab75..9d4798c 100644 +--- a/pcs/cluster.py ++++ b/pcs/cluster.py +@@ -1171,6 +1171,9 @@ def cluster_push(argv): + + filename = None + scope = None ++ timeout = None ++ if "--wait" in utils.pcs_options: ++ timeout = utils.validate_wait_get_timeout() + for arg in argv: + if "=" not in arg: + filename = arg +@@ -1206,8 +1209,20 @@ def cluster_push(argv): + output, retval = utils.run(command) + if retval != 0: + utils.err("unable to push cib\n" + output) +- else: +- print("CIB updated") ++ print("CIB updated") ++ if "--wait" not in utils.pcs_options: ++ return ++ cmd = ["crm_resource", "--wait"] ++ if timeout: ++ cmd.extend(["--timeout", timeout]) ++ output, retval = utils.run(cmd) ++ if retval != 0: ++ msg = [] ++ if retval == settings.pacemaker_wait_timeout_status: ++ msg.append("waiting timeout") ++ if output: ++ msg.append("\n" + output) ++ utils.err("\n".join(msg).strip()) + + def cluster_edit(argv): + if 'EDITOR' in os.environ: +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index a72a9bd..949d918 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -259,8 +259,8 @@ Sync corosync configuration to all nodes found from current corosync.conf file ( + cib [filename] [scope=<scope> | \fB\-\-config\fR] + Get the raw xml from the CIB (Cluster Information Base). If a filename is provided, we save the CIB to that file, otherwise the CIB is printed. Specify scope to get a specific section of the CIB. Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults, status. \fB\-\-config\fR is the same as scope=configuration. Do not specify a scope if you want to edit the saved CIB using pcs (pcs -f <command>). + .TP +-cib-push <filename> [scope=<scope> | \fB\-\-config\fR] +-Push the raw xml from <filename> to the CIB (Cluster Information Base). You can obtain the CIB by running the 'pcs cluster cib' command, which is recommended first step when you want to perform desired modifications (pcs \fB\-f\fR <command>) for the one-off push. Specify scope to push a specific section of the CIB. Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults. \fB\-\-config\fR is the same as scope=configuration. Use of \fB\-\-config\fR is recommended. Do not specify a scope if you need to push the whole CIB or be warned in the case of outdated CIB. WARNING: the selected scope of the CIB will be overwritten by the current content of the specified file. ++cib-push <filename> [scope=<scope> | \fB\-\-config\fR] [\fB\-\-wait\fR[=<n>]] ++Push the raw xml from <filename> to the CIB (Cluster Information Base). You can obtain the CIB by running the 'pcs cluster cib' command, which is recommended first step when you want to perform desired modifications (pcs \fB\-f\fR <command>) for the one-off push. Specify scope to push a specific section of the CIB. Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults. \fB\-\-config\fR is the same as scope=configuration. Use of \fB\-\-config\fR is recommended. Do not specify a scope if you need to push the whole CIB or be warned in the case of outdated CIB. If --wait is specified wait up to 'n' seconds for changes to be applied. WARNING: the selected scope of the CIB will be overwritten by the current content of the specified file. + .TP + cib\-upgrade + Upgrade the CIB to conform to the latest version of the document schema. +diff --git a/pcs/resource.py b/pcs/resource.py +index 284bdb2..9384a21 100644 +--- a/pcs/resource.py ++++ b/pcs/resource.py +@@ -21,6 +21,8 @@ from pcs import ( + constraint, + settings, + ) ++from pcs.settings import pacemaker_wait_timeout_status as \ ++ PACEMAKER_WAIT_TIMEOUT_STATUS + import pcs.lib.cib.acl as lib_acl + import pcs.lib.pacemaker as lib_pacemaker + from pcs.lib.external import get_systemd_services +@@ -31,7 +33,6 @@ from pcs.lib.pacemaker_values import timeout_to_seconds + import pcs.lib.resource_agent as lib_ra + + +-PACEMAKER_WAIT_TIMEOUT_STATUS = 62 + RESOURCE_RELOCATE_CONSTRAINT_PREFIX = "pcs-relocate-" + + def resource_cmd(argv): +diff --git a/pcs/settings_default.py b/pcs/settings_default.py +index 9d44918..15421fd 100644 +--- a/pcs/settings_default.py ++++ b/pcs/settings_default.py +@@ -40,3 +40,4 @@ ocf_resources = os.path.join(ocf_root, "resource.d/") + nagios_metadata_path = "/usr/share/pacemaker/nagios/plugins-metadata/" + sbd_watchdog_default = "/dev/watchdog" + sbd_config = "/etc/sysconfig/sbd" ++pacemaker_wait_timeout_status = 62 +diff --git a/pcs/usage.py b/pcs/usage.py +index 42e03e6..542f806 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -653,7 +653,7 @@ Commands: + scope=configuration. Do not specify a scope if you want to edit + the saved CIB using pcs (pcs -f <command>). + +- cib-push <filename> [scope=<scope> | --config] ++ cib-push <filename> [scope=<scope> | --config] [--wait[=<n>]] + Push the raw xml from <filename> to the CIB (Cluster Information Base). + You can obtain the CIB by running the 'pcs cluster cib' command, which + is recommended first step when you want to perform desired +@@ -663,7 +663,8 @@ Commands: + crm_config, rsc_defaults, op_defaults. --config is the same as + scope=configuration. Use of --config is recommended. Do not specify + a scope if you need to push the whole CIB or be warned in the case +- of outdated CIB. ++ of outdated CIB. If --wait is specified wait up to 'n' seconds for ++ changes to be applied. + WARNING: the selected scope of the CIB will be overwritten by the + current content of the specified file. + +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index 0b2dc61..b1e00fa 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -5,6 +5,7 @@ require 'set' + require 'timeout' + require 'rexml/document' + require 'base64' ++require 'tempfile' + + require 'pcs.rb' + require 'resource.rb' +@@ -1523,23 +1524,73 @@ def remove_resource(params, request, auth_user) + return 403, 'Permission denied' + end + force = params['force'] ++ user = PCSAuth.getSuperuserAuth() + no_error_if_not_exists = params.include?('no_error_if_not_exists') +- errors = "" +- params.each { |k,v| +- if k.index("resid-") == 0 +- resid = k.gsub('resid-', '') +- command = [PCS, 'resource', 'delete', resid] +- command << '--force' if force +- out, errout, retval = run_cmd(auth_user, *command) ++ resource_list = [] ++ errors = '' ++ resource_to_remove = [] ++ params.each { |param,_| ++ if param.start_with?('resid-') ++ resource_list << param.split('resid-', 2)[1] ++ end ++ } ++ tmp_file = nil ++ if force ++ resource_to_remove = resource_list ++ else ++ begin ++ tmp_file = Tempfile.new('temp_cib') ++ _, err, retval = run_cmd(user, PCS, 'cluster', 'cib', tmp_file.path) + if retval != 0 +- unless out.index(" does not exist.") != -1 and no_error_if_not_exists +- errors += errout.join(' ').strip + "\n" ++ return [400, 'Unable to stop resource(s).'] ++ end ++ cmd = [PCS, '-f', tmp_file.path, 'resource', 'disable'] ++ resource_list.each { |resource| ++ _, err, retval = run_cmd(user, *cmd, resource) ++ if retval != 0 ++ unless ( ++ err.join('').index('unable to find a resource') != -1 and ++ no_error_if_not_exists ++ ) ++ errors += "Unable to stop resource '#{resource}': #{err.join('')}" ++ end ++ else ++ resource_to_remove << resource + end ++ } ++ _, _, retval = run_cmd( ++ user, PCS, 'cluster', 'cib-push', tmp_file.path, '--config', '--wait' ++ ) ++ if retval != 0 ++ return [400, 'Unable to stop resource(s).'] ++ end ++ errors.strip! ++ unless errors.empty? ++ $logger.info("Stopping resource(s) errors:\n#{errors}") ++ return [400, errors] ++ end ++ rescue IOError ++ return [400, 'Unable to stop resource(s).'] ++ ensure ++ if tmp_file ++ tmp_file.close! ++ end ++ end ++ end ++ resource_to_remove.each { |resource| ++ cmd = [PCS, 'resource', 'delete', resource] ++ if force ++ cmd << '--force' ++ end ++ out, err, retval = run_cmd(auth_user, *cmd) ++ if retval != 0 ++ unless out.index(' does not exist.') != -1 and no_error_if_not_exists ++ errors += err.join(' ').strip + "\n" + end + end + } + errors.strip! +- if errors == "" ++ if errors.empty? + return 200 + else + $logger.info("Remove resource errors:\n"+errors) +diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb +index b14c327..5461515 100644 +--- a/pcsd/views/main.erb ++++ b/pcsd/views/main.erb +@@ -298,14 +298,19 @@ + {{meta_attributes-table resource=resource}} + {{#if utilization_support}} + {{#if resource.is_primitive}} +- {{utilization-table entity=resource utilization=resource.utilization type="resource"}} ++ {{utilization-table ++ entity=resource ++ utilization=resource.utilization ++ type="resource" ++ table_id="resource_utilization_attributes" ++ }} + {{/if}} + {{/if}} + <br style="clear:left;"> + {{/unless}} + </div> + {{#if stonith}} +- <div style="clear:left; margin-top: 2em;" id="stonith_info_div"> ++ <div style="clear:left; margin-top: 2em;" id="stonith_agent_form"> + {{fence-form + resource=resource + agent=resource.resource_agent +@@ -314,7 +319,7 @@ + </div> + {{else}} + {{#if resource.is_primitive}} +- <div style="clear:left; margin-top: 2em;" id="resource_info_div"> ++ <div style="clear:left; margin-top: 2em;" id="resource_agent_form"> + {{resource-form + resource=resource + agent=resource.resource_agent +@@ -725,7 +730,7 @@ Use the 'Add' button to submit the form."> + <tr> + <td + {{action toggleBody}} +- id="utilization_attributes" ++ {{bind-attr id=table_id}} + class="datatable_header hover-pointer" + > + {{#if show_content}} +diff --git a/pcsd/views/nodes.erb b/pcsd/views/nodes.erb +index 478e0f6..8fccd25 100644 +--- a/pcsd/views/nodes.erb ++++ b/pcsd/views/nodes.erb +@@ -247,9 +247,8 @@ + </tr> + </table> + <table style="clear:left;float:left;margin-top:25px;"> +- <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Node Attributes ({{#if Pcs.nodesController.cur_node_attr.length}}{{Pcs.nodesController.cur_node_attr.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr> ++ <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Node Attributes ({{#if Pcs.nodesController.cur_node_attr.length}}{{Pcs.nodesController.cur_node_attr.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites" id="node_attributes"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr> + <tr><td> +- <div id="node_attributes"> + <table class="datatable"> + <tr><th>Attribute</th><th>Value</th><th>Remove</th></tr> + {{#each attr in Pcs.nodesController.cur_node_attr}} +@@ -268,14 +267,12 @@ + <td><button type="button" onclick="add_node_attr('#new_node_attr_col');" name="add">Add</button></td> + </tr> + </table> +- </div> + </td> + </tr> + </table> + <table style="clear:left;float:left;margin-top:25px;"> +- <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Fence Levels ({{#if Pcs.nodesController.cur_node_fence_levels.length}}{{Pcs.nodesController.cur_node_fence_levels.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr> ++ <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Fence Levels ({{#if Pcs.nodesController.cur_node_fence_levels.length}}{{Pcs.nodesController.cur_node_fence_levels.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites" id="fence_levels"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr> + <tr><td> +- <div id="fencelevels"> + <table class="datatable"> + <tr><th>Level</th><th>Fence Devices</th><th>Remove</th></tr> + {{#each Pcs.nodesController.cur_node_fence_levels}} +@@ -301,13 +298,16 @@ + <td><button type="button" onclick="add_remove_fence_level($(this).parent());" name="add">Add</button></td> + </tr> + </table> +- </div> + </td> + </tr> + </table> + {{#if Pcs.nodesController.utilization_support}} + <table style="clear:left; float:left; margin-top: 25px;"><tr><td> +- {{utilization-table entity=Pcs.nodesController.cur_node utilization=Pcs.nodesController.cur_node.utilization}} ++ {{utilization-table ++ entity=Pcs.nodesController.cur_node ++ utilization=Pcs.nodesController.cur_node.utilization ++ table_id="node_utilization_attributes" ++ }} + </td></tr></table> + {{/if}} + </div> +-- +1.8.3.1 + diff --git a/SOURCES/bz1346852-02-web-UI-fix-error-when-removing-resources-takes-long.patch b/SOURCES/bz1346852-02-web-UI-fix-error-when-removing-resources-takes-long.patch new file mode 100644 index 0000000..4fbbc44 --- /dev/null +++ b/SOURCES/bz1346852-02-web-UI-fix-error-when-removing-resources-takes-long.patch @@ -0,0 +1,54 @@ +From 06cef95211b84150fece67970426267849e74a36 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular <omular@redhat.com> +Date: Fri, 22 Jul 2016 08:34:21 +0200 +Subject: [PATCH] web UI: fix error when removing resources takes long + +--- + pcsd/public/js/pcsd.js | 24 +++++++++++++++++------- + 1 file changed, 17 insertions(+), 7 deletions(-) + +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index 6c88888..e763482 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -1262,7 +1262,9 @@ function remove_nodes(ids, force) { + } + + function remove_resource(ids, force) { +- var data = {}; ++ var data = { ++ no_error_if_not_exists: true ++ }; + if (force) { + data["force"] = force; + } +@@ -1287,12 +1289,20 @@ function remove_resource(ids, force) { + Pcs.update(); + }, + error: function (xhr, status, error) { +- error = $.trim(error) +- var message = "Unable to remove resources (" + error + ")"; +- if ( +- (xhr.responseText.substring(0,6) == "Error:") || ("Forbidden" == error) +- ) { +- message += "\n\n" + xhr.responseText.replace("--force", "'Enforce removal'"); ++ error = $.trim(error); ++ var message = ""; ++ if (status == "timeout" || error == "timeout") { ++ message = "Operation takes longer to complete than expected."; ++ } else { ++ message = "Unable to remove resources (" + error + ")"; ++ if ( ++ (xhr.responseText.substring(0, 6) == "Error:") || ++ ("Forbidden" == error) ++ ) { ++ message += "\n\n" + xhr.responseText.replace( ++ "--force", "'Enforce removal'" ++ ); ++ } + } + alert(message); + $("#dialog_verify_remove_resources.ui-dialog-content").each( +-- +1.8.3.1 + diff --git a/SOURCES/bz1346852-03-web-UI-correct-handling-of-timeout-when-removing-mul.patch b/SOURCES/bz1346852-03-web-UI-correct-handling-of-timeout-when-removing-mul.patch new file mode 100644 index 0000000..2067df2 --- /dev/null +++ b/SOURCES/bz1346852-03-web-UI-correct-handling-of-timeout-when-removing-mul.patch @@ -0,0 +1,34 @@ +From 900da783cefaa9f8d81ac72bf90b532638ac297b Mon Sep 17 00:00:00 2001 +From: Ondrej Mular <omular@redhat.com> +Date: Mon, 25 Jul 2016 15:30:49 +0200 +Subject: [PATCH] web UI: correct handling of timeout when removing multiple + resources + +If there are only 2 nodes in cluster (timeout for request is 30 seconds per node) +and removing resources takes longer then 1 minute, we have no other nodes to try +remove resources so it will be returned to javascript as there was no response +from cluster. Now we handle this stituation properly. +--- + pcsd/public/js/pcsd.js | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index e763482..45da010 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -1291,7 +1291,11 @@ function remove_resource(ids, force) { + error: function (xhr, status, error) { + error = $.trim(error); + var message = ""; +- if (status == "timeout" || error == "timeout") { ++ if ( ++ status == "timeout" || ++ error == "timeout" || ++ xhr.responseText == '{"noresponse":true}' ++ ) { + message = "Operation takes longer to complete than expected."; + } else { + message = "Unable to remove resources (" + error + ")"; +-- +1.8.3.1 + diff --git a/SOURCES/bz1346852-04-fix-detecting-nonexisting-resources-in-pcsd.patch b/SOURCES/bz1346852-04-fix-detecting-nonexisting-resources-in-pcsd.patch new file mode 100644 index 0000000..41eb6fc --- /dev/null +++ b/SOURCES/bz1346852-04-fix-detecting-nonexisting-resources-in-pcsd.patch @@ -0,0 +1,25 @@ +From 2b843e7582ef3160a16094526727101180649448 Mon Sep 17 00:00:00 2001 +From: Radek Steiger <rsteiger@redhat.com> +Date: Wed, 10 Aug 2016 09:56:28 +0200 +Subject: [PATCH] fix detecting nonexisting resources in pcsd + +--- + pcsd/remote.rb | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index 134ac5d..e467d0a 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -1584,7 +1584,7 @@ def remove_resource(params, request, auth_user) + out, err, retval = run_cmd(user, *(cmd + [resource])) + if retval != 0 + unless ( +- (out + err).join('').include?(' does not exist.') and ++ (out + err).join('').include?('unable to find a resource') and + no_error_if_not_exists + ) + errors += "Unable to stop resource '#{resource}': #{err.join('')}" +-- +1.8.3.1 + diff --git a/SOURCES/bz1348579-01-add-a-wrapper-for-holding-SELinux-conte.patch b/SOURCES/bz1348579-01-add-a-wrapper-for-holding-SELinux-conte.patch new file mode 100644 index 0000000..52d34b0 --- /dev/null +++ b/SOURCES/bz1348579-01-add-a-wrapper-for-holding-SELinux-conte.patch @@ -0,0 +1,62 @@ +From 0f305d7d54b40fe13b1ef2134701b5169fe79d65 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Thu, 30 Jun 2016 17:23:19 +0200 +Subject: [PATCH] add a wrapper for holding SELinux context when pcsd is + started by systemd + +--- + Makefile | 3 +++ + pcsd/pcsd.service | 2 +- + pcsd/pcsd.service-runner | 13 +++++++++++++ + 3 files changed, 17 insertions(+), 1 deletion(-) + create mode 100644 pcsd/pcsd.service-runner + +diff --git a/Makefile b/Makefile +index de216ce..f0a5d03 100644 +--- a/Makefile ++++ b/Makefile +@@ -126,6 +126,9 @@ else + ifeq ($(IS_SYSTEMCTL),true) + install -d ${DESTDIR}/${systemddir}/system/ + install -m 644 pcsd/pcsd.service ${DESTDIR}/${systemddir}/system/ ++# ${DESTDIR}${PREFIX}/lib/pcsd/pcsd holds the selinux context ++ install -m 755 pcsd/pcsd.service-runner ${DESTDIR}${PREFIX}/lib/pcsd/pcsd ++ rm ${DESTDIR}${PREFIX}/lib/pcsd/pcsd.service-runner + else + install -m 755 -D pcsd/pcsd ${DESTDIR}/${initdir}/pcsd + endif +diff --git a/pcsd/pcsd.service b/pcsd/pcsd.service +index 075a3a6..e506f1b 100644 +--- a/pcsd/pcsd.service ++++ b/pcsd/pcsd.service +@@ -4,7 +4,7 @@ Description=PCS GUI and remote configuration interface + [Service] + EnvironmentFile=/etc/sysconfig/pcsd + Environment=GEM_HOME=/usr/lib/pcsd/vendor/bundle/ruby +-ExecStart=/usr/bin/ruby -C/var/lib/pcsd -I/usr/lib/pcsd -- /usr/lib/pcsd/ssl.rb > /dev/null & ++ExecStart=/usr/lib/pcsd/pcsd > /dev/null & + + [Install] + WantedBy=multi-user.target +diff --git a/pcsd/pcsd.service-runner b/pcsd/pcsd.service-runner +new file mode 100644 +index 0000000..1949a68 +--- /dev/null ++++ b/pcsd/pcsd.service-runner +@@ -0,0 +1,13 @@ ++#!/usr/bin/ruby ++# this file is a pcsd runner callable from a systemd unit ++# it also serves as a holder of a selinux context ++ ++# add pcsd to the load path (ruby -I) ++libdir = File.dirname(__FILE__) ++$LOAD_PATH.unshift(libdir) unless $LOAD_PATH.include?(libdir) ++ ++# change current directory (ruby -C) ++Dir.chdir('/var/lib/pcsd') ++ ++# import and run pcsd ++require 'ssl' +-- +1.8.3.1 + diff --git a/SOURCES/bz1348579-02-fix-traceback-when-stopping-pcsd-shortly-after-start.patch b/SOURCES/bz1348579-02-fix-traceback-when-stopping-pcsd-shortly-after-start.patch new file mode 100644 index 0000000..3700d4d --- /dev/null +++ b/SOURCES/bz1348579-02-fix-traceback-when-stopping-pcsd-shortly-after-start.patch @@ -0,0 +1,104 @@ +From 760028cca19c07dd56162453a4eb3d3b0de7f3af Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Tue, 19 Jul 2016 13:11:04 +0200 +Subject: [PATCH] fix traceback when stopping pcsd shortly after start + +- properly notify systemd that pcsd finished starting up +- gracefully exit on SIGINT and SIGTERM +--- + pcsd/pcsd.service | 1 + + pcsd/pcsd.service-runner | 25 ++++++++++++++++++------- + pcsd/ssl.rb | 18 ++++++++++++++++++ + 3 files changed, 37 insertions(+), 7 deletions(-) + +diff --git a/pcsd/pcsd.service b/pcsd/pcsd.service +index e506f1b..20bc9ab 100644 +--- a/pcsd/pcsd.service ++++ b/pcsd/pcsd.service +@@ -5,6 +5,7 @@ Description=PCS GUI and remote configuration interface + EnvironmentFile=/etc/sysconfig/pcsd + Environment=GEM_HOME=/usr/lib/pcsd/vendor/bundle/ruby + ExecStart=/usr/lib/pcsd/pcsd > /dev/null & ++Type=notify + + [Install] + WantedBy=multi-user.target +diff --git a/pcsd/pcsd.service-runner b/pcsd/pcsd.service-runner +index 1949a68..883d290 100644 +--- a/pcsd/pcsd.service-runner ++++ b/pcsd/pcsd.service-runner +@@ -2,12 +2,23 @@ + # this file is a pcsd runner callable from a systemd unit + # it also serves as a holder of a selinux context + +-# add pcsd to the load path (ruby -I) +-libdir = File.dirname(__FILE__) +-$LOAD_PATH.unshift(libdir) unless $LOAD_PATH.include?(libdir) ++begin ++ # add pcsd to the load path (ruby -I) ++ libdir = File.dirname(__FILE__) ++ $LOAD_PATH.unshift(libdir) unless $LOAD_PATH.include?(libdir) + +-# change current directory (ruby -C) +-Dir.chdir('/var/lib/pcsd') ++ # change current directory (ruby -C) ++ Dir.chdir('/var/lib/pcsd') + +-# import and run pcsd +-require 'ssl' ++ # import and run pcsd ++ require 'ssl' ++rescue SignalException => e ++ if [Signal.list['INT'], Signal.list['TERM']].include?(e.signo) ++ # gracefully exit on SIGINT and SIGTERM ++ # pcsd sets up signal handlers later, this catches exceptions which occur ++ # by recieving signals before the handlers have been set up. ++ exit ++ else ++ raise ++ end ++end +diff --git a/pcsd/ssl.rb b/pcsd/ssl.rb +index f56c947..c00d8b3 100644 +--- a/pcsd/ssl.rb ++++ b/pcsd/ssl.rb +@@ -3,6 +3,7 @@ require 'webrick' + require 'webrick/https' + require 'openssl' + require 'rack' ++require 'socket' + + require 'bootstrap.rb' + require 'pcs.rb' +@@ -66,11 +67,28 @@ def run_server(server, webrick_options, secondary_addrs) + + $logger.info("Listening on #{primary_addr} port #{port}") + server.run(Sinatra::Application, webrick_options) { |server_instance| ++ # configure ssl options + server_instance.ssl_context.ciphers = ciphers ++ # set listening addresses + secondary_addrs.each { |addr| + $logger.info("Adding listener on #{addr} port #{port}") + server_instance.listen(addr, port) + } ++ # notify systemd we are running ++ if ISSYSTEMCTL ++ socket_name = ENV['NOTIFY_SOCKET'] ++ if socket_name ++ if socket_name.start_with?('@') ++ # abstract namespace socket ++ socket_name[0] = "\0" ++ end ++ $logger.info("Notifying systemd we are running (socket #{socket_name})") ++ sd_socket = Socket.new(Socket::AF_UNIX, Socket::SOCK_DGRAM) ++ sd_socket.connect(Socket.pack_sockaddr_un(socket_name)) ++ sd_socket.send('READY=1', 0) ++ sd_socket.close() ++ end ++ end + } + end + +-- +1.8.3.1 + diff --git a/SOURCES/bz1349465-01-allow-to-specify-bash-completion-install-dir.patch b/SOURCES/bz1349465-01-allow-to-specify-bash-completion-install-dir.patch new file mode 100644 index 0000000..acc3601 --- /dev/null +++ b/SOURCES/bz1349465-01-allow-to-specify-bash-completion-install-dir.patch @@ -0,0 +1,34 @@ +From 160b8c4657356725befb02212c148c58da3ce7aa Mon Sep 17 00:00:00 2001 +From: Ivan Devat <idevat@redhat.com> +Date: Fri, 1 Jul 2016 07:23:37 +0200 +Subject: [PATCH] allow to specify bash completion install dir + +--- + Makefile | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/Makefile b/Makefile +index 5c3e9d6..8e845d6 100644 +--- a/Makefile ++++ b/Makefile +@@ -76,11 +76,16 @@ ifndef install_settings + endif + endif + ++ ++ifndef BASH_COMPLETION_DIR ++ BASH_COMPLETION_DIR=${DESTDIR}/etc/bash_completion.d ++endif ++ + install: + $(PYTHON) setup.py install --root=$(or ${DESTDIR}, /) ${EXTRA_SETUP_OPTS} + mkdir -p ${DESTDIR}${PREFIX}/sbin/ + mv ${DESTDIR}${PREFIX}/bin/pcs ${DESTDIR}${PREFIX}/sbin/pcs +- install -D pcs/bash_completion.sh ${DESTDIR}/etc/bash_completion.d/pcs ++ install -D pcs/bash_completion.sh ${BASH_COMPLETION_DIR}/pcs + install -m644 -D pcs/pcs.8 ${DESTDIR}/${MANDIR}/man8/pcs.8 + ifeq ($(IS_DEBIAN),true) + ifeq ($(install_settings),true) +-- +1.8.3.1 + diff --git a/SOURCES/bz1349465-02-install-bash-completion-with-standard-permissions.patch b/SOURCES/bz1349465-02-install-bash-completion-with-standard-permissions.patch new file mode 100644 index 0000000..ebf7c05 --- /dev/null +++ b/SOURCES/bz1349465-02-install-bash-completion-with-standard-permissions.patch @@ -0,0 +1,25 @@ +From 01e0ceba838c558c08b11c51646af6e8d26a699b Mon Sep 17 00:00:00 2001 +From: Ivan Devat <idevat@redhat.com> +Date: Wed, 27 Jul 2016 16:09:01 +0200 +Subject: [PATCH] install bash completion with standard permissions + +--- + Makefile | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/Makefile b/Makefile +index cbbeb85..25fb87d 100644 +--- a/Makefile ++++ b/Makefile +@@ -85,7 +85,7 @@ install: + $(PYTHON) setup.py install --root=$(or ${DESTDIR}, /) ${EXTRA_SETUP_OPTS} + mkdir -p ${DESTDIR}${PREFIX}/sbin/ + mv ${DESTDIR}${PREFIX}/bin/pcs ${DESTDIR}${PREFIX}/sbin/pcs +- install -D pcs/bash_completion.sh ${BASH_COMPLETION_DIR}/pcs ++ install -D -m644 pcs/bash_completion.sh ${BASH_COMPLETION_DIR}/pcs + install -m644 -D pcs/pcs.8 ${DESTDIR}/${MANDIR}/man8/pcs.8 + ifeq ($(IS_DEBIAN),true) + ifeq ($(install_settings),true) +-- +1.8.3.1 + diff --git a/SOURCES/bz1353607-01-tests-use-safe-node-names.patch b/SOURCES/bz1353607-01-tests-use-safe-node-names.patch new file mode 100644 index 0000000..0b0238f --- /dev/null +++ b/SOURCES/bz1353607-01-tests-use-safe-node-names.patch @@ -0,0 +1,1623 @@ +From 850473a59993e1a75c248a9b3a83284f568a4bf2 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Fri, 8 Jul 2016 15:14:05 +0200 +Subject: [PATCH] tests: use safe node names + +--- + pcs/test/test_cluster.py | 466 +++++++++++++++++++++++------------------------ + 1 file changed, 233 insertions(+), 233 deletions(-) + +diff --git a/pcs/test/test_cluster.py b/pcs/test/test_cluster.py +index 2c3e71b..8a245a2 100644 +--- a/pcs/test/test_cluster.py ++++ b/pcs/test/test_cluster.py +@@ -106,7 +106,7 @@ class ClusterTest(unittest.TestCase, AssertPcsMixin): + self.assertTrue(output.startswith("\nUsage: pcs cluster setup...")) + self.assertEqual(1, returnVal) + +- output, returnVal = pcs(temp_cib, "cluster setup cname rh7-1 rh7-2") ++ output, returnVal = pcs(temp_cib, "cluster setup cname rh7-1.localhost rh7-2.localhost") + self.assertEqual( + "Error: A cluster name (--name <name>) is required to setup a cluster\n", + output +@@ -116,22 +116,22 @@ class ClusterTest(unittest.TestCase, AssertPcsMixin): + def test_cluster_setup_hostnames_resolving(self): + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --corosync_conf={0} --cluster_conf={1} --name cname nonexistant-address" ++ "cluster setup --local --corosync_conf={0} --cluster_conf={1} --name cname nonexistant-address.invalid" + .format(corosync_conf_tmp, cluster_conf_tmp) + ) + ac(output, """\ + Error: Unable to resolve all hostnames, use --force to override +-Warning: Unable to resolve hostname: nonexistant-address ++Warning: Unable to resolve hostname: nonexistant-address.invalid + """) + self.assertEqual(1, returnVal) + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --corosync_conf={0} --cluster_conf={1} --name cname nonexistant-address --force" ++ "cluster setup --local --corosync_conf={0} --cluster_conf={1} --name cname nonexistant-address.invalid --force" + .format(corosync_conf_tmp, cluster_conf_tmp) + ) + ac(output, """\ +-Warning: Unable to resolve hostname: nonexistant-address ++Warning: Unable to resolve hostname: nonexistant-address.invalid + """) + self.assertEqual(0, returnVal) + +@@ -141,7 +141,7 @@ Warning: Unable to resolve hostname: nonexistant-address + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2" ++ "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost" + .format(corosync_conf_tmp) + ) + self.assertEqual("", output) +@@ -156,12 +156,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + } +@@ -183,7 +183,7 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --corosync_conf={0} --name cname rh7-2 rh7-3" ++ "cluster setup --local --corosync_conf={0} --name cname rh7-2.localhost rh7-3.localhost" + .format(corosync_conf_tmp) + ) + self.assertEqual("""\ +@@ -198,7 +198,7 @@ Error: {0} already exists, use --force to overwrite + + output, returnVal = pcs( + temp_cib, +- "cluster setup --force --local --corosync_conf={0} --name cname rh7-2 rh7-3" ++ "cluster setup --force --local --corosync_conf={0} --name cname rh7-2.localhost rh7-3.localhost" + .format(corosync_conf_tmp) + ) + self.assertEqual("", output) +@@ -215,12 +215,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-3 ++ ring0_addr: rh7-3.localhost + nodeid: 2 + } + } +@@ -243,7 +243,7 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2" ++ "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost" + .format(cluster_conf_tmp) + ) + self.assertEqual("", output) +@@ -252,17 +252,17 @@ logging { + <cluster config_version="9" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +@@ -283,7 +283,7 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --cluster_conf={0} --name cname rh7-2 rh7-3" ++ "cluster setup --local --cluster_conf={0} --name cname rh7-2.localhost rh7-3.localhost" + .format(cluster_conf_tmp) + ) + self.assertEqual("""\ +@@ -298,7 +298,7 @@ Error: {0} already exists, use --force to overwrite + + output, returnVal = pcs( + temp_cib, +- "cluster setup --force --local --cluster_conf={0} --name cname rh7-2 rh7-3" ++ "cluster setup --force --local --cluster_conf={0} --name cname rh7-2.localhost rh7-3.localhost" + .format(cluster_conf_tmp) + ) + self.assertEqual("", output) +@@ -309,17 +309,17 @@ Error: {0} already exists, use --force to overwrite + <cluster config_version="9" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-2" nodeid="1"> ++ <clusternode name="rh7-2.localhost" nodeid="1"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-3" nodeid="2"> ++ <clusternode name="rh7-3.localhost" nodeid="2"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-3"/> ++ <device name="pcmk-redirect" port="rh7-3.localhost"/> + </method> + </fence> + </clusternode> +@@ -344,7 +344,7 @@ Error: {0} already exists, use --force to overwrite + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2" ++ "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost" + .format(corosync_conf_tmp) + ) + self.assertEqual("", output) +@@ -361,12 +361,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + } +@@ -385,10 +385,10 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster localnode add --corosync_conf={0} rh7-3" ++ "cluster localnode add --corosync_conf={0} rh7-3.localhost" + .format(corosync_conf_tmp) + ) +- self.assertEqual("rh7-3: successfully added!\n", output) ++ self.assertEqual("rh7-3.localhost: successfully added!\n", output) + self.assertEqual(0, returnVal) + with open(corosync_conf_tmp) as f: + data = f.read() +@@ -402,17 +402,17 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + + node { +- ring0_addr: rh7-3 ++ ring0_addr: rh7-3.localhost + nodeid: 3 + } + } +@@ -430,11 +430,11 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster localnode remove --corosync_conf={0} rh7-3" ++ "cluster localnode remove --corosync_conf={0} rh7-3.localhost" + .format(corosync_conf_tmp) + ) + self.assertEqual(0, returnVal) +- self.assertEqual("rh7-3: successfully removed!\n", output) ++ self.assertEqual("rh7-3.localhost: successfully removed!\n", output) + with open(corosync_conf_tmp) as f: + data = f.read() + ac(data, """\ +@@ -447,12 +447,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + } +@@ -471,10 +471,10 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster localnode add --corosync_conf={0} rh7-3,192.168.1.3" ++ "cluster localnode add --corosync_conf={0} rh7-3.localhost,192.168.1.3" + .format(corosync_conf_tmp) + ) +- self.assertEqual("rh7-3,192.168.1.3: successfully added!\n", output) ++ self.assertEqual("rh7-3.localhost,192.168.1.3: successfully added!\n", output) + self.assertEqual(0, returnVal) + with open(corosync_conf_tmp) as f: + data = f.read() +@@ -488,17 +488,17 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + + node { +- ring0_addr: rh7-3 ++ ring0_addr: rh7-3.localhost + ring1_addr: 192.168.1.3 + nodeid: 3 + } +@@ -517,11 +517,11 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster localnode remove --corosync_conf={0} rh7-2" ++ "cluster localnode remove --corosync_conf={0} rh7-2.localhost" + .format(corosync_conf_tmp) + ) + self.assertEqual(0, returnVal) +- self.assertEqual("rh7-2: successfully removed!\n", output) ++ self.assertEqual("rh7-2.localhost: successfully removed!\n", output) + with open(corosync_conf_tmp) as f: + data = f.read() + ac(data, """\ +@@ -534,12 +534,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-3 ++ ring0_addr: rh7-3.localhost + ring1_addr: 192.168.1.3 + nodeid: 3 + } +@@ -559,11 +559,11 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster localnode remove --corosync_conf={0} rh7-3,192.168.1.3" ++ "cluster localnode remove --corosync_conf={0} rh7-3.localhost,192.168.1.3" + .format(corosync_conf_tmp) + ) + self.assertEqual(0, returnVal) +- self.assertEqual("rh7-3,192.168.1.3: successfully removed!\n", output) ++ self.assertEqual("rh7-3.localhost,192.168.1.3: successfully removed!\n", output) + with open(corosync_conf_tmp) as f: + data = f.read() + ac(data, """\ +@@ -576,7 +576,7 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + } +@@ -601,7 +601,7 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --auto_tie_breaker=1" ++ "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --auto_tie_breaker=1" + .format(corosync_conf_tmp) + ) + self.assertEqual("", output) +@@ -618,12 +618,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + } +@@ -642,10 +642,10 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster localnode add --corosync_conf={0} rh7-3" ++ "cluster localnode add --corosync_conf={0} rh7-3.localhost" + .format(corosync_conf_tmp) + ) +- self.assertEqual(output, "rh7-3: successfully added!\n") ++ self.assertEqual(output, "rh7-3.localhost: successfully added!\n") + self.assertEqual(0, returnVal) + with open(corosync_conf_tmp) as f: + data = f.read() +@@ -659,17 +659,17 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + + node { +- ring0_addr: rh7-3 ++ ring0_addr: rh7-3.localhost + nodeid: 3 + } + } +@@ -688,10 +688,10 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster localnode remove --corosync_conf={0} rh7-3" ++ "cluster localnode remove --corosync_conf={0} rh7-3.localhost" + .format(corosync_conf_tmp) + ) +- self.assertEqual("rh7-3: successfully removed!\n", output) ++ self.assertEqual("rh7-3.localhost: successfully removed!\n", output) + self.assertEqual(0, returnVal) + with open(corosync_conf_tmp) as f: + data = f.read() +@@ -705,12 +705,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + } +@@ -734,7 +734,7 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 rh7-3" ++ "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost rh7-3.localhost" + .format(corosync_conf_tmp) + ) + self.assertEqual("", output) +@@ -751,17 +751,17 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + + node { +- ring0_addr: rh7-3 ++ ring0_addr: rh7-3.localhost + nodeid: 3 + } + } +@@ -784,7 +784,7 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --transport udp" ++ "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --transport udp" + .format(corosync_conf_tmp) + ) + self.assertEqual("", output) +@@ -801,12 +801,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + } +@@ -834,7 +834,7 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2" ++ "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost" + .format(cluster_conf_tmp) + ) + ac(output, "") +@@ -845,17 +845,17 @@ logging { + <cluster config_version="9" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +@@ -873,10 +873,10 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster localnode add --cluster_conf={0} rh7-3" ++ "cluster localnode add --cluster_conf={0} rh7-3.localhost" + .format(cluster_conf_tmp) + ) +- ac(output, "rh7-3: successfully added!\n") ++ ac(output, "rh7-3.localhost: successfully added!\n") + self.assertEqual(returnVal, 0) + with open(cluster_conf_tmp) as f: + data = f.read() +@@ -884,24 +884,24 @@ logging { + <cluster config_version="13" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-3" nodeid="3"> ++ <clusternode name="rh7-3.localhost" nodeid="3"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-3"/> ++ <device name="pcmk-redirect" port="rh7-3.localhost"/> + </method> + </fence> + </clusternode> +@@ -919,10 +919,10 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster localnode remove --cluster_conf={0} rh7-3" ++ "cluster localnode remove --cluster_conf={0} rh7-3.localhost" + .format(cluster_conf_tmp) + ) +- ac(output, "rh7-3: successfully removed!\n") ++ ac(output, "rh7-3.localhost: successfully removed!\n") + self.assertEqual(returnVal, 0) + + with open(cluster_conf_tmp) as f: +@@ -931,17 +931,17 @@ logging { + <cluster config_version="15" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +@@ -959,10 +959,10 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster localnode add --cluster_conf={0} rh7-3,192.168.1.3" ++ "cluster localnode add --cluster_conf={0} rh7-3.localhost,192.168.1.3" + .format(cluster_conf_tmp) + ) +- ac(output, "rh7-3,192.168.1.3: successfully added!\n") ++ ac(output, "rh7-3.localhost,192.168.1.3: successfully added!\n") + self.assertEqual(returnVal, 0) + + with open(cluster_conf_tmp) as f: +@@ -971,25 +971,25 @@ logging { + <cluster config_version="20" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-3" nodeid="3"> ++ <clusternode name="rh7-3.localhost" nodeid="3"> + <altname name="192.168.1.3"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-3"/> ++ <device name="pcmk-redirect" port="rh7-3.localhost"/> + </method> + </fence> + </clusternode> +@@ -1007,10 +1007,10 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster localnode remove --cluster_conf={0} rh7-2" ++ "cluster localnode remove --cluster_conf={0} rh7-2.localhost" + .format(cluster_conf_tmp) + ) +- ac(output, "rh7-2: successfully removed!\n") ++ ac(output, "rh7-2.localhost: successfully removed!\n") + self.assertEqual(returnVal, 0) + + with open(cluster_conf_tmp) as f: +@@ -1019,18 +1019,18 @@ logging { + <cluster config_version="22" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-3" nodeid="3"> ++ <clusternode name="rh7-3.localhost" nodeid="3"> + <altname name="192.168.1.3"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-3"/> ++ <device name="pcmk-redirect" port="rh7-3.localhost"/> + </method> + </fence> + </clusternode> +@@ -1048,10 +1048,10 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster localnode remove --cluster_conf={0} rh7-3,192.168.1.3" ++ "cluster localnode remove --cluster_conf={0} rh7-3.localhost,192.168.1.3" + .format(cluster_conf_tmp) + ) +- ac(output, "rh7-3,192.168.1.3: successfully removed!\n") ++ ac(output, "rh7-3.localhost,192.168.1.3: successfully removed!\n") + self.assertEqual(returnVal, 0) + + with open(cluster_conf_tmp) as f: +@@ -1060,10 +1060,10 @@ logging { + <cluster config_version="23" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +@@ -1086,7 +1086,7 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 rh7-3" ++ "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost rh7-3.localhost" + .format(cluster_conf_tmp) + ) + ac(output, "") +@@ -1097,24 +1097,24 @@ logging { + <cluster config_version="12" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-3" nodeid="3"> ++ <clusternode name="rh7-3.localhost" nodeid="3"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-3"/> ++ <device name="pcmk-redirect" port="rh7-3.localhost"/> + </method> + </fence> + </clusternode> +@@ -1137,7 +1137,7 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --transport udpu" ++ "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --transport udpu" + .format(cluster_conf_tmp) + ) + ac(output, """\ +@@ -1150,17 +1150,17 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft + <cluster config_version="9" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +@@ -1182,7 +1182,7 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --ipv6" ++ "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --ipv6" + .format(corosync_conf_tmp) + ) + self.assertEqual("", output) +@@ -1200,12 +1200,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + } +@@ -1228,7 +1228,7 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --ipv6" ++ "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --ipv6" + .format(cluster_conf_tmp) + ) + ac(output, """\ +@@ -1241,17 +1241,17 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters + <cluster config_version="9" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +@@ -1272,14 +1272,14 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters + return + + o,r = pcs( +- "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr0 1.1.2.0" ++ "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr0 1.1.2.0" + .format(corosync_conf_tmp) + ) + assert r == 1 + ac(o, "Error: --addr0 can only be used once\n") + + o,r = pcs( +- "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp" ++ "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp" + .format(corosync_conf_tmp) + ) + assert r == 1 +@@ -1289,7 +1289,7 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters + ) + + o,r = pcs( +- "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0" ++ "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0" + .format(corosync_conf_tmp) + ) + ac(o,"") +@@ -1321,12 +1321,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + } +@@ -1348,7 +1348,7 @@ logging { + return + + o,r = pcs( +- "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9" ++ "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9" + .format(corosync_conf_tmp) + ) + ac(o,"") +@@ -1380,12 +1380,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + } +@@ -1407,7 +1407,7 @@ logging { + return + + o,r = pcs( +- "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0" ++ "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0" + .format(corosync_conf_tmp) + ) + ac(o,"") +@@ -1439,12 +1439,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + } +@@ -1466,7 +1466,7 @@ logging { + return + + o,r = pcs( +- "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp" ++ "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp" + .format(corosync_conf_tmp) + ) + ac(o,"") +@@ -1500,12 +1500,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + } +@@ -1527,14 +1527,14 @@ logging { + return + + o,r = pcs( +- "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp" ++ "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp" + .format(corosync_conf_tmp) + ) + ac(o, "Error: using a RRP mode of 'active' is not supported or tested, use --force to override\n") + assert r == 1 + + o,r = pcs( +- "cluster setup --force --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp" ++ "cluster setup --force --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp" + .format(corosync_conf_tmp) + ) + ac(o, "Warning: using a RRP mode of 'active' is not supported or tested\n") +@@ -1566,12 +1566,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + } +@@ -1593,14 +1593,14 @@ logging { + return + + o,r = pcs( +- "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp" ++ "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp" + .format(corosync_conf_tmp) + ) + ac(o, "Error: using a RRP mode of 'active' is not supported or tested, use --force to override\n") + assert r == 1 + + o,r = pcs( +- "cluster setup --force --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp" ++ "cluster setup --force --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp" + .format(corosync_conf_tmp) + ) + ac(o, "Warning: using a RRP mode of 'active' is not supported or tested\n") +@@ -1631,12 +1631,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + } +@@ -1658,25 +1658,25 @@ logging { + return + + o,r = pcs( +- "cluster setup --local --corosync_conf={0} --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2,192.168.99.3" ++ "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost,192.168.99.2,192.168.99.3" + .format(corosync_conf_tmp) + ) +- ac(o,"Error: You cannot specify more than two addresses for a node: rh7-2,192.168.99.2,192.168.99.3\n") ++ ac(o,"Error: You cannot specify more than two addresses for a node: rh7-2.localhost,192.168.99.2,192.168.99.3\n") + assert r == 1 + + o,r = pcs( +- "cluster setup --local --corosync_conf={0} --name cname rh7-1,192.168.99.1 rh7-2" ++ "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost" + .format(corosync_conf_tmp) + ) + ac(o,"Error: if one node is configured for RRP, all nodes must be configured for RRP\n") + assert r == 1 + +- o,r = pcs("cluster setup --force --local --name test99 rh7-1 rh7-2 --addr0 1.1.1.1") ++ o,r = pcs("cluster setup --force --local --name test99 rh7-1.localhost rh7-2.localhost --addr0 1.1.1.1") + ac(o,"Error: --addr0 and --addr1 can only be used with --transport=udp\n") + assert r == 1 + + o,r = pcs( +- "cluster setup --local --corosync_conf={0} --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2" ++ "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost,192.168.99.2" + .format(corosync_conf_tmp) + ) + ac(o,"") +@@ -1694,13 +1694,13 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + ring1_addr: 192.168.99.1 + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + ring1_addr: 192.168.99.2 + nodeid: 2 + } +@@ -1723,49 +1723,49 @@ logging { + return + + o,r = pcs( +- "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --wait_for_all=2" ++ "cluster setup --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --wait_for_all=2" + .format(corosync_conf_tmp) + ) + ac(o, "Error: '2' is not a valid --wait_for_all value, use 0, 1\n") + assert r == 1 + + o,r = pcs( +- "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --wait_for_all=2" ++ "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --wait_for_all=2" + .format(corosync_conf_tmp) + ) + ac(o, "Error: '2' is not a valid --wait_for_all value, use 0, 1\n") + assert r == 1 + + o,r = pcs( +- "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --auto_tie_breaker=2" ++ "cluster setup --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --auto_tie_breaker=2" + .format(corosync_conf_tmp) + ) + ac(o, "Error: '2' is not a valid --auto_tie_breaker value, use 0, 1\n") + assert r == 1 + + o,r = pcs( +- "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --auto_tie_breaker=2" ++ "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --auto_tie_breaker=2" + .format(corosync_conf_tmp) + ) + ac(o, "Error: '2' is not a valid --auto_tie_breaker value, use 0, 1\n") + assert r == 1 + + o,r = pcs( +- "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --last_man_standing=2" ++ "cluster setup --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --last_man_standing=2" + .format(corosync_conf_tmp) + ) + ac(o, "Error: '2' is not a valid --last_man_standing value, use 0, 1\n") + assert r == 1 + + o,r = pcs( +- "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --last_man_standing=2" ++ "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --last_man_standing=2" + .format(corosync_conf_tmp) + ) + ac(o, "Error: '2' is not a valid --last_man_standing value, use 0, 1\n") + assert r == 1 + + o,r = pcs( +- "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --wait_for_all=1 --auto_tie_breaker=1 --last_man_standing=1 --last_man_standing_window=12000" ++ "cluster setup --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --wait_for_all=1 --auto_tie_breaker=1 --last_man_standing=1 --last_man_standing_window=12000" + .format(corosync_conf_tmp) + ) + ac(o,"") +@@ -1782,12 +1782,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + } +@@ -1813,14 +1813,14 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr0 1.1.2.0" ++ "cluster setup --local --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr0 1.1.2.0" + ) + ac(output, "Error: --addr0 can only be used once\n") + self.assertEqual(returnVal, 1) + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp" ++ "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp" + .format(cluster_conf_tmp) + ) + ac(output, """\ +@@ -1831,7 +1831,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in + + output, returnVal = pcs( + temp_cib, +- "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0" ++ "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0" + .format(cluster_conf_tmp) + ) + ac(output, "") +@@ -1843,19 +1843,19 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in + <cluster config_version="14" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <altname name="1.1.2.0"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <altname name="1.1.2.0"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +@@ -1881,7 +1881,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in + + output, returnVal = pcs( + temp_cib, +- "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9" ++ "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9" + .format(cluster_conf_tmp) + ) + ac(output, "") +@@ -1893,19 +1893,19 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in + <cluster config_version="14" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <altname name="1.1.2.0"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <altname name="1.1.2.0"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +@@ -1931,7 +1931,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in + + output, returnVal = pcs( + temp_cib, +- "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0" ++ "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0" + .format(cluster_conf_tmp) + ) + ac(output, "") +@@ -1943,19 +1943,19 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in + <cluster config_version="14" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <altname name="1.1.2.0"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <altname name="1.1.2.0"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +@@ -1981,7 +1981,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp" ++ "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp" + .format(cluster_conf_tmp) + ) + ac(output, "") +@@ -1993,19 +1993,19 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in + <cluster config_version="14" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <altname name="1.1.2.0"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <altname name="1.1.2.0"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +@@ -2031,7 +2031,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp" ++ "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp" + .format(cluster_conf_tmp) + ) + ac( +@@ -2042,7 +2042,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in + + output, returnVal = pcs( + temp_cib, +- "cluster setup --force --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp" ++ "cluster setup --force --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp" + .format(cluster_conf_tmp) + ) + ac( +@@ -2056,19 +2056,19 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in + <cluster config_version="14" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <altname name="1.1.2.0"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <altname name="1.1.2.0"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +@@ -2094,7 +2094,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp" ++ "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp" + .format(cluster_conf_tmp) + ) + ac(output, """\ +@@ -2105,7 +2105,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in + + output, returnVal = pcs( + temp_cib, +- "cluster setup --force --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp" ++ "cluster setup --force --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp" + .format(cluster_conf_tmp) + ) + ac(output, """\ +@@ -2119,19 +2119,19 @@ Warning: using a RRP mode of 'active' is not supported or tested + <cluster config_version="12" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <altname name="1.1.2.0"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <altname name="1.1.2.0"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +@@ -2154,17 +2154,17 @@ Warning: using a RRP mode of 'active' is not supported or tested + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --cluster_conf={0} --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2,192.168.99.3" ++ "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost,192.168.99.2,192.168.99.3" + .format(cluster_conf_tmp) + ) + ac(output, """\ +-Error: You cannot specify more than two addresses for a node: rh7-2,192.168.99.2,192.168.99.3 ++Error: You cannot specify more than two addresses for a node: rh7-2.localhost,192.168.99.2,192.168.99.3 + """) + self.assertEqual(returnVal, 1) + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --name cname rh7-1,192.168.99.1 rh7-2" ++ "cluster setup --local --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost" + ) + ac(output, """\ + Error: if one node is configured for RRP, all nodes must be configured for RRP +@@ -2173,7 +2173,7 @@ Error: if one node is configured for RRP, all nodes must be configured for RRP + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --name test99 rh7-1 rh7-2 --addr0 1.1.1.1 --transport=udpu" ++ "cluster setup --local --name test99 rh7-1.localhost rh7-2.localhost --addr0 1.1.1.1 --transport=udpu" + ) + ac(output, """\ + Error: --addr0 and --addr1 can only be used with --transport=udp +@@ -2183,7 +2183,7 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --cluster_conf={0} --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2" ++ "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost,192.168.99.2" + .format(cluster_conf_tmp) + ) + ac(output, "") +@@ -2194,19 +2194,19 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft + <cluster config_version="12" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <altname name="192.168.99.1"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <altname name="192.168.99.2"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +@@ -2231,19 +2231,19 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft + <cluster config_version="12" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <altname name="1.1.2.0"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <altname name="1.1.2.0"/> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +@@ -2262,7 +2262,7 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode passive --broadcast0 --transport udp" ++ "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode passive --broadcast0 --transport udp" + .format(cluster_conf_tmp) + ) + ac(output, """\ +@@ -2277,7 +2277,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --broadcast0 --transport udp" ++ "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --broadcast0 --transport udp" + .format(cluster_conf_tmp) + ) + ac(output, """\ +@@ -2294,7 +2294,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --cluster_conf={0} --name test99 rh7-1 rh7-2 --wait_for_all=2 --auto_tie_breaker=3 --last_man_standing=4 --last_man_standing_window=5" ++ "cluster setup --local --cluster_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --wait_for_all=2 --auto_tie_breaker=3 --last_man_standing=4 --last_man_standing_window=5" + .format(cluster_conf_tmp) + ) + ac(output, """\ +@@ -2310,17 +2310,17 @@ Warning: --last_man_standing_window ignored as it is not supported on CMAN clust + <cluster config_version="9" name="test99"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +@@ -2341,7 +2341,7 @@ Warning: --last_man_standing_window ignored as it is not supported on CMAN clust + return + + o,r = pcs( +- "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005" ++ "cluster setup --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005" + .format(corosync_conf_tmp) + ) + ac(o,"") +@@ -2364,12 +2364,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + } +@@ -2392,7 +2392,7 @@ logging { + + output, returnVal = pcs( + temp_cib, +- "cluster setup --local --cluster_conf={0} --name test99 rh7-1 rh7-2 --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005" ++ "cluster setup --local --cluster_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005" + .format(cluster_conf_tmp) + ) + ac(output, """\ +@@ -2405,17 +2405,17 @@ Warning: --token_coefficient ignored as it is not supported on CMAN clusters + <cluster config_version="10" name="test99"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +@@ -2583,12 +2583,12 @@ Warning: --token_coefficient ignored as it is not supported on CMAN clusters + return + + self.assert_pcs_fail( +- "cluster setup --local --name cname rh7-1 rh7-2 --transport=unknown", ++ "cluster setup --local --name cname rh7-1.localhost rh7-2.localhost --transport=unknown", + "Error: 'unknown' is not a valid transport value, use udp, udpu, use --force to override\n" + ) + + self.assert_pcs_success( +- "cluster setup --local --name cname rh7-1 rh7-2 --transport=unknown --force", ++ "cluster setup --local --name cname rh7-1.localhost rh7-2.localhost --transport=unknown --force", + "Warning: 'unknown' is not a valid transport value, use udp, udpu\n" + ) + with open(corosync_conf_tmp) as f: +@@ -2603,12 +2603,12 @@ totem { + + nodelist { + node { +- ring0_addr: rh7-1 ++ ring0_addr: rh7-1.localhost + nodeid: 1 + } + + node { +- ring0_addr: rh7-2 ++ ring0_addr: rh7-2.localhost + nodeid: 2 + } + } +@@ -2630,12 +2630,12 @@ logging { + return + + self.assert_pcs_fail( +- "cluster setup --local --name cname rh7-1 rh7-2 --transport=rdma", ++ "cluster setup --local --name cname rh7-1.localhost rh7-2.localhost --transport=rdma", + "Error: 'rdma' is not a valid transport value, use udp, udpu, use --force to override\n" + ) + + self.assert_pcs_success( +- "cluster setup --local --name cname rh7-1 rh7-2 --transport=rdma --force", ++ "cluster setup --local --name cname rh7-1.localhost rh7-2.localhost --transport=rdma --force", + "Warning: 'rdma' is not a valid transport value, use udp, udpu\n" + ) + with open(cluster_conf_tmp) as f: +@@ -2644,17 +2644,17 @@ logging { + <cluster config_version="9" name="cname"> + <fence_daemon/> + <clusternodes> +- <clusternode name="rh7-1" nodeid="1"> ++ <clusternode name="rh7-1.localhost" nodeid="1"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-1"/> ++ <device name="pcmk-redirect" port="rh7-1.localhost"/> + </method> + </fence> + </clusternode> +- <clusternode name="rh7-2" nodeid="2"> ++ <clusternode name="rh7-2.localhost" nodeid="2"> + <fence> + <method name="pcmk-method"> +- <device name="pcmk-redirect" port="rh7-2"/> ++ <device name="pcmk-redirect" port="rh7-2.localhost"/> + </method> + </fence> + </clusternode> +-- +1.8.3.1 + diff --git a/SOURCES/bz1354498-01-handle-exceptions-when-waiting-for-response-from-user.patch b/SOURCES/bz1354498-01-handle-exceptions-when-waiting-for-response-from-user.patch new file mode 100644 index 0000000..73bcba3 --- /dev/null +++ b/SOURCES/bz1354498-01-handle-exceptions-when-waiting-for-response-from-user.patch @@ -0,0 +1,47 @@ +From 35d19fd293a758c0696410a490daa349bfcc9e21 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Mon, 8 Aug 2016 15:05:51 +0200 +Subject: [PATCH] handle exceptions when waiting for response from user + +--- + pcs/utils.py | 22 ++++++++++++++++------ + 1 file changed, 16 insertions(+), 6 deletions(-) + +diff --git a/pcs/utils.py b/pcs/utils.py +index 8b2cf7c..53cc0b0 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -1801,14 +1801,24 @@ def get_terminal_input(message=None): + if message: + sys.stdout.write(message) + sys.stdout.flush() +- if PYTHON2: +- return raw_input("") +- else: +- return input("") ++ try: ++ if PYTHON2: ++ return raw_input("") ++ else: ++ return input("") ++ except EOFError: ++ return "" ++ except KeyboardInterrupt: ++ print("Interrupted") ++ sys.exit(1) + + def get_terminal_password(message="Password: "): +- if sys.stdout.isatty(): +- return getpass.getpass(message) ++ if sys.stdin.isatty(): ++ try: ++ return getpass.getpass(message) ++ except KeyboardInterrupt: ++ print("Interrupted") ++ sys.exit(1) + else: + return get_terminal_input(message) + +-- +1.8.3.1 + diff --git a/SOURCES/bz1357945-01-add-support-for-clufter-s-dist-parameter.patch b/SOURCES/bz1357945-01-add-support-for-clufter-s-dist-parameter.patch new file mode 100644 index 0000000..a2beae5 --- /dev/null +++ b/SOURCES/bz1357945-01-add-support-for-clufter-s-dist-parameter.patch @@ -0,0 +1,253 @@ +From 4ffe5f795247be1f1a14100721bf09d54c904a94 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Wed, 20 Jul 2016 16:42:37 +0200 +Subject: [PATCH] add support for clufter's 'dist' parameter + +--- + pcs/config.py | 69 ++++++++++++++++++++++++++++++++++++++++++++++++----------- + pcs/pcs.8 | 10 ++++----- + pcs/usage.py | 21 ++++++++++++------ + 3 files changed, 76 insertions(+), 24 deletions(-) + +diff --git a/pcs/config.py b/pcs/config.py +index 9119c3c..e410a5a 100644 +--- a/pcs/config.py ++++ b/pcs/config.py +@@ -18,8 +18,10 @@ import logging + import pwd + import grp + import time ++import platform + + try: ++ import clufter.facts + import clufter.format_manager + import clufter.filter_manager + import clufter.command_manager +@@ -555,6 +557,7 @@ def config_import_cman(argv): + cluster_conf = settings.cluster_conf_file + dry_run_output = None + output_format = "cluster.conf" if utils.is_rhel6() else "corosync.conf" ++ dist = None + invalid_args = False + for arg in argv: + if "=" in arg: +@@ -571,6 +574,8 @@ def config_import_cman(argv): + output_format = value + else: + invalid_args = True ++ elif name == "dist": ++ dist = value + else: + invalid_args = True + else: +@@ -588,12 +593,34 @@ def config_import_cman(argv): + force = "--force" in utils.pcs_options + interactive = "--interactive" in utils.pcs_options + ++ if dist is not None: ++ if output_format == "cluster.conf": ++ if not clufter.facts.cluster_pcs_flatiron("linux", dist.split(",")): ++ utils.err("dist does not match output-format") ++ elif output_format == "corosync.conf": ++ if not clufter.facts.cluster_pcs_needle("linux", dist.split(",")): ++ utils.err("dist does not match output-format") ++ elif ( ++ (output_format == "cluster.conf" and utils.is_rhel6()) ++ or ++ (output_format == "corosync.conf" and not utils.is_rhel6()) ++ ): ++ dist = ",".join(platform.linux_distribution(full_distribution_name=0)) ++ elif output_format == "cluster.conf": ++ dist = "redhat,6.7,Santiago" ++ elif output_format == "corosync.conf": ++ dist = "redhat,7.1,Maipo" ++ else: ++ # for output-format=pcs-command[-verbose] ++ dist = ",".join(platform.linux_distribution(full_distribution_name=0)) ++ + clufter_args = { + "input": str(cluster_conf), + "cib": {"passin": "bytestring"}, + "nocheck": force, + "batch": True, + "sys": "linux", ++ "dist": dist, + # Make it work on RHEL6 as well for sure + "color": "always" if sys.stdout.isatty() else "never" + } +@@ -606,11 +633,9 @@ def config_import_cman(argv): + logging.getLogger("clufter").setLevel(logging.DEBUG) + if output_format == "cluster.conf": + clufter_args["ccs_pcmk"] = {"passin": "bytestring"} +- clufter_args["dist"] = "redhat,6.7,Santiago" + cmd_name = "ccs2pcs-flatiron" + elif output_format == "corosync.conf": + clufter_args["coro"] = {"passin": "struct"} +- clufter_args["dist"] = "redhat,7.1,Maipo" + cmd_name = "ccs2pcs-needle" + elif output_format in ("pcs-commands", "pcs-commands-verbose"): + clufter_args["output"] = {"passin": "bytestring"} +@@ -624,7 +649,15 @@ def config_import_cman(argv): + clufter_args["text_width"] = "-1" + clufter_args["silent"] = False + clufter_args["noguidance"] = False +- cmd_name = "ccs2pcscmd-flatiron" ++ if clufter.facts.cluster_pcs_flatiron("linux", dist.split(",")): ++ cmd_name = "ccs2pcscmd-flatiron" ++ elif clufter.facts.cluster_pcs_needle("linux", dist.split(",")): ++ cmd_name = "ccs2pcscmd-needle" ++ else: ++ utils.err( ++ "unrecognized dist, try something recognized" ++ + " (e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty)" ++ ) + clufter_args_obj = type(str("ClufterOptions"), (object, ), clufter_args) + + # run convertor +@@ -737,29 +770,36 @@ def config_export_pcs_commands(argv, verbose=False): + interactive = "--interactive" in utils.pcs_options + invalid_args = False + output_file = None ++ dist = None + for arg in argv: + if "=" in arg: + name, value = arg.split("=", 1) + if name == "output": + output_file = value ++ elif name == "dist": ++ dist = value + else: + invalid_args = True + else: + invalid_args = True +- if invalid_args or not output_file: ++ # check options ++ if invalid_args: + usage.config(["export", "pcs-commands"]) + sys.exit(1) ++ # complete optional options ++ if dist is None: ++ dist = ",".join(platform.linux_distribution(full_distribution_name=0)) + + # prepare convertor options + clufter_args = { + "nocheck": force, + "batch": True, + "sys": "linux", ++ "dist": dist, + # Make it work on RHEL6 as well for sure + "color": "always" if sys.stdout.isatty() else "never", + "coro": settings.corosync_conf_file, + "ccs": settings.cluster_conf_file, +- "output": {"passin": "bytestring"}, + "start_wait": "60", + "tmp_cib": "tmp-cib.xml", + "force": force, +@@ -767,6 +807,10 @@ def config_export_pcs_commands(argv, verbose=False): + "silent": True, + "noguidance": True, + } ++ if output_file: ++ clufter_args["output"] = {"passin": "bytestring"} ++ else: ++ clufter_args["output"] = "-" + if interactive: + if "EDITOR" not in os.environ: + utils.err("$EDITOR environment variable is not set") +@@ -791,13 +835,14 @@ def config_export_pcs_commands(argv, verbose=False): + "Error: unable to export cluster configuration" + ) + +- # save commands +- ok, message = utils.write_file( +- output_file, +- clufter_args_obj.output["passout"] +- ) +- if not ok: +- utils.err(message) ++ # save commands if not printed to stdout by clufter ++ if output_file: ++ ok, message = utils.write_file( ++ output_file, ++ clufter_args_obj.output["passout"] ++ ) ++ if not ok: ++ utils.err(message) + + def run_clufter(cmd_name, cmd_args, debug, force, err_prefix): + try: +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index a26c94b..66ffb8c 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -624,14 +624,14 @@ Show specified configuration checkpoint. + checkpoint restore <checkpoint_number> + Restore cluster configuration to specified checkpoint. + .TP +-import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] [output\-format=corosync.conf|cluster.conf] +-Converts CMAN cluster configuration to Pacemaker cluster configuration. Converted configuration will be saved to 'output' file. To send the configuration to the cluster nodes the 'pcs config restore' command can be used. If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually. If no input is specified /etc/cluster/cluster.conf will be used. You can force to create output containing either cluster.conf or corosync.conf using the output-format option. ++import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] [output\-format=corosync.conf|cluster.conf] [dist=<dist>] ++Converts CMAN cluster configuration to Pacemaker cluster configuration. Converted configuration will be saved to 'output' file. To send the configuration to the cluster nodes the 'pcs config restore' command can be used. If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually. If no input is specified /etc/cluster/cluster.conf will be used. You can force to create output containing either cluster.conf or corosync.conf using the output-format option. Optionally you can specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty. If 'dist' is not specified, it defaults to this node's version if that matches output-format, otherwise redhat,6.7 is used for cluster.conf and redhat,7.1 is used for corosync.conf. + .TP +-import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] output\-format=pcs-commands|pcs-commands-verbose ++import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] output\-format=pcs-commands|pcs-commands-verbose [dist=<dist>] + Converts CMAN cluster configuration to a list of pcs commands which recreates the same cluster as Pacemaker cluster when executed. Commands will be saved to 'output' file. For other options see above. + .TP +-export pcs\-commands|pcs\-commands\-verbose output=<filename> +-Creates a list of pcs commands which upon execution recreates the current cluster running on this node. Commands will be saved to 'output' file. Use pcs\-commands to get a simple list of commands, whereas pcs\-commands\-verbose creates a list including comments and debug messages. ++export pcs\-commands|pcs\-commands\-verbose output=<filename> [dist=<dist>] ++Creates a list of pcs commands which upon execution recreates the current cluster running on this node. Commands will be saved to 'output' file or written to stdout if 'output' is not specified. Use pcs\-commands to get a simple list of commands, whereas pcs\-commands\-verbose creates a list including comments and debug messages. Optionally specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty. If 'dist' is not specified, it defaults to this node's version. + .SS "pcsd" + .TP + certkey <certificate file> <key file> +diff --git a/pcs/usage.py b/pcs/usage.py +index 0605cd7..0474324 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -1173,7 +1173,7 @@ Commands: + Restore cluster configuration to specified checkpoint. + + import-cman output=<filename> [input=<filename>] [--interactive] +- [output-format=corosync.conf|cluster.conf] ++ [output-format=corosync.conf|cluster.conf] [dist=<dist>] + Converts CMAN cluster configuration to Pacemaker cluster configuration. + Converted configuration will be saved to 'output' file. To send + the configuration to the cluster nodes the 'pcs config restore' +@@ -1181,20 +1181,27 @@ Commands: + prompted to solve incompatibilities manually. If no input is specified + /etc/cluster/cluster.conf will be used. You can force to create output + containing either cluster.conf or corosync.conf using the output-format +- option. ++ option. Optionally you can specify output version by setting 'dist' ++ option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty. ++ If 'dist' is not specified, it defaults to this nodei's version if that ++ matches output-format, otherwise redhat,6.7 is used for cluster.conf ++ and redhat,7.1 is used for corosync.conf. + + import-cman output=<filename> [input=<filename>] [--interactive] +- output-format=pcs-commands|pcs-commands-verbose ++ output-format=pcs-commands|pcs-commands-verbose [dist=<dist>] + Converts CMAN cluster configuration to a list of pcs commands which + recreates the same cluster as Pacemaker cluster when executed. Commands + will be saved to 'output' file. For other options see above. + +- export pcs-commands|pcs-commands-verbose output=<filename> ++ export pcs-commands|pcs-commands-verbose [output=<filename>] [dist=<dist>] + Creates a list of pcs commands which upon execution recreates + the current cluster running on this node. Commands will be saved +- to 'output' file. Use pcs-commands to get a simple list of commands, +- whereas pcs-commands-verbose creates a list including comments and debug +- messages. ++ to 'output' file or written to stdout if 'output' is not specified. Use ++ pcs-commands to get a simple list of commands, whereas ++ pcs-commands-verbose creates a list including comments and debug ++ messages. Optionally specify output version by setting 'dist' option ++ e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty. If 'dist' ++ is not specified, it defaults to this node's version. + """ + if pout: + print(sub_usage(args, output)) +-- +1.8.3.1 + diff --git a/SOURCES/bz1357945-02-doc-fixes-regarding-clufter.patch b/SOURCES/bz1357945-02-doc-fixes-regarding-clufter.patch new file mode 100644 index 0000000..aad1e01 --- /dev/null +++ b/SOURCES/bz1357945-02-doc-fixes-regarding-clufter.patch @@ -0,0 +1,76 @@ +From 4d8b5102f6504b2faf8219e799244f37dde2db10 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Wed, 3 Aug 2016 14:55:31 +0200 +Subject: [PATCH] doc: fixes regarding clufter + +--- + pcs/pcs.8 | 8 ++++---- + pcs/usage.py | 15 +++++++++------ + 2 files changed, 13 insertions(+), 10 deletions(-) + +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 0e8e967..09c0235 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -625,13 +625,13 @@ checkpoint restore <checkpoint_number> + Restore cluster configuration to specified checkpoint. + .TP + import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] [output\-format=corosync.conf|cluster.conf] [dist=<dist>] +-Converts CMAN cluster configuration to Pacemaker cluster configuration. Converted configuration will be saved to 'output' file. To send the configuration to the cluster nodes the 'pcs config restore' command can be used. If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually. If no input is specified /etc/cluster/cluster.conf will be used. You can force to create output containing either cluster.conf or corosync.conf using the output-format option. Optionally you can specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty. If 'dist' is not specified, it defaults to this node's version if that matches output-format, otherwise redhat,6.7 is used for cluster.conf and redhat,7.1 is used for corosync.conf. ++Converts CMAN cluster configuration to Pacemaker cluster configuration. Converted configuration will be saved to 'output' file. To send the configuration to the cluster nodes the 'pcs config restore' command can be used. If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually. If no input is specified /etc/cluster/cluster.conf will be used. You can force to create output containing either cluster.conf or corosync.conf using the output-format option. Optionally you can specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty. You can get the list of supported dist values by running the "clufter \fB\-\-list-dists\fR" command. If 'dist' is not specified, it defaults to this node's version if that matches output-format, otherwise redhat,6.7 is used for cluster.conf and redhat,7.1 is used for corosync.conf. + .TP + import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] output\-format=pcs-commands|pcs-commands-verbose [dist=<dist>] + Converts CMAN cluster configuration to a list of pcs commands which recreates the same cluster as Pacemaker cluster when executed. Commands will be saved to 'output' file. For other options see above. + .TP +-export pcs\-commands|pcs\-commands\-verbose output=<filename> [dist=<dist>] +-Creates a list of pcs commands which upon execution recreates the current cluster running on this node. Commands will be saved to 'output' file or written to stdout if 'output' is not specified. Use pcs\-commands to get a simple list of commands, whereas pcs\-commands\-verbose creates a list including comments and debug messages. Optionally specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty. If 'dist' is not specified, it defaults to this node's version. ++export pcs\-commands|pcs\-commands\-verbose [output=<filename>] [dist=<dist>] ++Creates a list of pcs commands which upon execution recreates the current cluster running on this node. Commands will be saved to 'output' file or written to stdout if 'output' is not specified. Use pcs\-commands to get a simple list of commands, whereas pcs\-commands\-verbose creates a list including comments and debug messages. Optionally specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty. You can get the list of supported dist values by running the "clufter \fB\-\-list-dists\fR" command. If 'dist' is not specified, it defaults to this node's version. + .SS "pcsd" + .TP + certkey <certificate file> <key file> +@@ -644,7 +644,7 @@ clear-auth [\fB\-\-local\fR] [\fB\-\-remote\fR] + Removes all system tokens which allow pcs/pcsd on the current system to authenticate with remote pcs/pcsd instances and vice\-versa. After this command is run this node will need to be re\-authenticated with other nodes (using 'pcs cluster auth'). Using \fB\-\-local\fR only removes tokens used by local pcs (and pcsd if root) to connect to other pcsd instances, using \fB\-\-remote\fR clears authentication tokens used by remote systems to connect to the local pcsd instance. + .SS "node" + .TP +-attribute [[<node>] [\fB\-\-name\fR <attr>] | <node> <name>=<value> ...] ++attribute [[<node>] [\fB\-\-name\fR <name>] | <node> <name>=<value> ...] + Manage node attributes. If no parameters are specified, show attributes of all nodes. If one parameter is specified, show attributes of specified node. If \fB\-\-name\fR is specified, show specified attribute's value from all nodes. If more parameters are specified, set attributes of specified node. Attributes can be removed by setting an attribute without a value. + .TP + maintenance [\fB\-\-all\fR] | [<node>]... +diff --git a/pcs/usage.py b/pcs/usage.py +index 7cfb33e..ef60b64 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -1182,10 +1182,11 @@ Commands: + /etc/cluster/cluster.conf will be used. You can force to create output + containing either cluster.conf or corosync.conf using the output-format + option. Optionally you can specify output version by setting 'dist' +- option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty. +- If 'dist' is not specified, it defaults to this nodei's version if that +- matches output-format, otherwise redhat,6.7 is used for cluster.conf +- and redhat,7.1 is used for corosync.conf. ++ option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty. You ++ can get the list of supported dist values by running the "clufter ++ --list-dists" command. If 'dist' is not specified, it defaults to this ++ node's version if that matches output-format, otherwise redhat,6.7 is ++ used for cluster.conf and redhat,7.1 is used for corosync.conf. + + import-cman output=<filename> [input=<filename>] [--interactive] + output-format=pcs-commands|pcs-commands-verbose [dist=<dist>] +@@ -1200,8 +1201,10 @@ Commands: + pcs-commands to get a simple list of commands, whereas + pcs-commands-verbose creates a list including comments and debug + messages. Optionally specify output version by setting 'dist' option +- e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty. If 'dist' +- is not specified, it defaults to this node's version. ++ e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty. You can get ++ the list of supported dist values by running the "clufter --list-dists" ++ command. If 'dist' is not specified, it defaults to this node's ++ version. + """ + if pout: + print(sub_usage(args, output)) +-- +1.8.3.1 + diff --git a/SOURCES/bz1359154-01-fix-exceptions-when-authenticating-cluster-nodes.patch b/SOURCES/bz1359154-01-fix-exceptions-when-authenticating-cluster-nodes.patch new file mode 100644 index 0000000..7acb761 --- /dev/null +++ b/SOURCES/bz1359154-01-fix-exceptions-when-authenticating-cluster-nodes.patch @@ -0,0 +1,136 @@ +From 54e03344d1d10b66bb0aad92bf072c283ec07185 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Tue, 26 Jul 2016 13:44:09 +0200 +Subject: [PATCH] fix exceptions when authenticating cluster nodes + +--- + pcsd/pcs.rb | 70 ++++++++++++++++++++++++++++++------------------------------ + pcsd/pcsd.rb | 18 ++++++++++++++-- + 2 files changed, 51 insertions(+), 37 deletions(-) + +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index 0956de9..ad54a75 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -395,47 +395,47 @@ end + + def send_request(auth_user, node, request, post=false, data={}, remote=true, raw_data=nil, timeout=30, cookies_data=nil) + cookies_data = {} if not cookies_data +- begin +- request = "/#{request}" if not request.start_with?("/") ++ request = "/#{request}" if not request.start_with?("/") + +- # fix ipv6 address for URI.parse +- node6 = node +- if (node.include?(":") and ! node.start_with?("[")) +- node6 = "[#{node}]" +- end ++ # fix ipv6 address for URI.parse ++ node6 = node ++ if (node.include?(":") and ! node.start_with?("[")) ++ node6 = "[#{node}]" ++ end + +- if remote +- uri = URI.parse("https://#{node6}:2224/remote" + request) +- else +- uri = URI.parse("https://#{node6}:2224" + request) +- end ++ if remote ++ uri = URI.parse("https://#{node6}:2224/remote" + request) ++ else ++ uri = URI.parse("https://#{node6}:2224" + request) ++ end + +- if post +- req = Net::HTTP::Post.new(uri.path) +- raw_data ? req.body = raw_data : req.set_form_data(data) +- else +- req = Net::HTTP::Get.new(uri.path) +- req.set_form_data(data) +- end ++ if post ++ req = Net::HTTP::Post.new(uri.path) ++ raw_data ? req.body = raw_data : req.set_form_data(data) ++ else ++ req = Net::HTTP::Get.new(uri.path) ++ req.set_form_data(data) ++ end + +- cookies_to_send = [] +- cookies_data_default = {} +- # Let's be safe about characters in cookie variables and do base64. +- # We cannot do it for CIB_user however to be backward compatible +- # so we at least remove disallowed characters. +- cookies_data_default['CIB_user'] = PCSAuth.cookieUserSafe( +- auth_user[:username].to_s +- ) +- cookies_data_default['CIB_user_groups'] = PCSAuth.cookieUserEncode( +- (auth_user[:usergroups] || []).join(' ') +- ) ++ cookies_to_send = [] ++ cookies_data_default = {} ++ # Let's be safe about characters in cookie variables and do base64. ++ # We cannot do it for CIB_user however to be backward compatible ++ # so we at least remove disallowed characters. ++ cookies_data_default['CIB_user'] = PCSAuth.cookieUserSafe( ++ auth_user[:username].to_s ++ ) ++ cookies_data_default['CIB_user_groups'] = PCSAuth.cookieUserEncode( ++ (auth_user[:usergroups] || []).join(' ') ++ ) + +- cookies_data_default.update(cookies_data) +- cookies_data_default.each { |name, value| +- cookies_to_send << CGI::Cookie.new('name' => name, 'value' => value).to_s +- } +- req.add_field('Cookie', cookies_to_send.join(';')) ++ cookies_data_default.update(cookies_data) ++ cookies_data_default.each { |name, value| ++ cookies_to_send << CGI::Cookie.new('name' => name, 'value' => value).to_s ++ } ++ req.add_field('Cookie', cookies_to_send.join(';')) + ++ begin + # uri.host returns "[addr]" for ipv6 addresses, which is wrong + # uri.hostname returns "addr" for ipv6 addresses, which is correct, but it + # is not available in older ruby versions +diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb +index d3032cf..287cf03 100644 +--- a/pcsd/pcsd.rb ++++ b/pcsd/pcsd.rb +@@ -75,6 +75,7 @@ if development? + end + + before do ++ # nobody is logged in yet + @auth_user = nil + + # get session storage instance from env +@@ -83,8 +84,21 @@ before do + $session_storage_env = env + end + +- if request.path != '/login' and not request.path == "/logout" and not request.path == '/remote/auth' and not request.path == '/login-status' +- protected! ++ # urls which are accesible for everybody including not logged in users ++ always_accessible = [ ++ '/login', ++ '/logout', ++ '/login-status', ++ '/remote/auth', ++ ] ++ if not always_accessible.include?(request.path) ++ # Sets @auth_user to a hash containing info about logged in user or halts ++ # the request processing if login credentials are incorrect. ++ protected! ++ else ++ # Set a sane default: nobody is logged in, but we do not need to check both ++ # for nil and empty username (if auth_user and auth_user[:username]) ++ @auth_user = {} if not @auth_user + end + $cluster_name = get_cluster_name() + end +-- +1.8.3.1 + diff --git a/SOURCES/bz1366307-01-web-ui-fix-bad-using-of-html-ids.patch b/SOURCES/bz1366307-01-web-ui-fix-bad-using-of-html-ids.patch new file mode 100644 index 0000000..5ef6de1 --- /dev/null +++ b/SOURCES/bz1366307-01-web-ui-fix-bad-using-of-html-ids.patch @@ -0,0 +1,74 @@ +From 25413c28853e1d350982feba7e306e05b6e74f49 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular <omular@redhat.com> +Date: Sun, 14 Aug 2016 13:38:52 +0200 +Subject: [PATCH] web UI: fix bad using of HTML ids + +--- + pcsd/public/js/nodes-ember.js | 5 +++++ + pcsd/public/js/pcsd.js | 11 ++++++++--- + pcsd/views/main.erb | 4 ++-- + 3 files changed, 15 insertions(+), 5 deletions(-) + +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index c51a341..f176c39 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -823,6 +823,11 @@ Pcs.ResourceObj = Ember.Object.extend({ + id: null, + _id: Ember.computed.alias('id'), + name: Ember.computed.alias('id'), ++ treeview_element_id: function() { ++ if (this.get("id")) { ++ return this.get("id") + "-treeview-element"; ++ } ++ }.property("id"), + parent: null, + meta_attr: [], + meta_attributes: Ember.computed.alias('meta_attr'), +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index 56219d4..1060bd3 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -2077,8 +2077,13 @@ function fix_auth_of_cluster() { + }); + } + +-function get_tree_view_element_id(element) { +- return $(element).parents('table.tree-element')[0].id; ++function get_tree_view_resource_id(element) { ++ var suffix = '-treeview-element'; ++ var element_id = $(element).parents('table.tree-element')[0].id; ++ if (element_id && element_id.endsWith(suffix)) { ++ return element_id.substr(0, element_id.lastIndexOf(suffix)); ++ } ++ return null; + } + + function get_list_view_element_id(element) { +@@ -2166,7 +2171,7 @@ function tree_view_onclick(resource_id) { + } + + function tree_view_select(element_id) { +- var e = $('#' + element_id); ++ var e = $(`#${element_id}-treeview-element`); + var view = e.parents('table.tree-view'); + view.find('div.arrow').hide(); + view.find('tr.children').hide(); +diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb +index 64fe560..8de1c60 100644 +--- a/pcsd/views/main.erb ++++ b/pcsd/views/main.erb +@@ -99,8 +99,8 @@ + </script> + + <script type="text/x-handlebars" data-template-name="components/resource-tree-element"> +- <table class="tree-element" cellpadding="0" cellspacing="0" style="width: 100%; border: none;" {{bind-attr id=node._id}}> +- <tr class="tree-element-name" onclick="tree_view_onclick(get_tree_view_element_id(this));" onmouseover="$(this).addClass('mouse_on_row');" onmouseout="$(this).removeClass('mouse_on_row');" {{bind-attr nodeID=node.id}}> ++ <table class="tree-element" cellpadding="0" cellspacing="0" style="width: 100%; border: none;" {{bind-attr id=node.treeview_element_id}}> ++ <tr class="tree-element-name" onclick="tree_view_onclick(get_tree_view_resource_id(this));" onmouseover="$(this).addClass('mouse_on_row');" onmouseout="$(this).removeClass('mouse_on_row');" {{bind-attr nodeID=node.id}}> + <td style="width:20px;" class="node_list_check"> + <input type="checkbox" onchange="tree_view_checkbox_onchange(this)"> + </td> +-- +1.8.3.1 + diff --git a/SOURCES/change-cman-to-rhel6-in-messages.patch b/SOURCES/change-cman-to-rhel6-in-messages.patch new file mode 100644 index 0000000..c7b0d88 --- /dev/null +++ b/SOURCES/change-cman-to-rhel6-in-messages.patch @@ -0,0 +1,271 @@ +From 0c8d98bb420b5ea366de361758c6c01851f94630 Mon Sep 17 00:00:00 2001 +From: Ivan Devat <idevat@redhat.com> +Date: Mon, 23 May 2016 17:00:13 +0200 +Subject: [PATCH] change cman to rhel6 in messages + +--- + pcs/cluster.py | 6 +++--- + pcs/lib/reports.py | 8 ++++---- + pcs/pcs.8 | 10 +++++----- + pcs/usage.py | 20 ++++++++++---------- + pcsd/views/_dialogs.erb | 2 +- + pcsd/views/manage.erb | 16 ++++++++-------- + pcsd/views/nodes.erb | 2 +- + 7 files changed, 32 insertions(+), 32 deletions(-) + +diff --git a/pcs/cluster.py b/pcs/cluster.py +index e5ad1ec..4572643 100644 +--- a/pcs/cluster.py ++++ b/pcs/cluster.py +@@ -1553,7 +1553,7 @@ def cluster_node(argv): + else: + utils.err("Unable to update any nodes") + if utils.is_cman_with_udpu_transport(): +- print("Warning: Using udpu transport on a CMAN cluster, " ++ print("Warning: Using udpu transport on a RHEL 6 cluster, " + + "cluster restart is required to apply node addition") + if wait: + print() +@@ -1628,7 +1628,7 @@ def cluster_node(argv): + output, retval = utils.reloadCorosync() + output, retval = utils.run(["crm_node", "--force", "-R", node0]) + if utils.is_cman_with_udpu_transport(): +- print("Warning: Using udpu transport on a CMAN cluster, " ++ print("Warning: Using udpu transport on a RHEL 6 cluster, " + + "cluster restart is required to apply node removal") + + def cluster_localnode(argv): +@@ -1796,7 +1796,7 @@ def cluster_uidgid(argv, silent_list = False): + + def cluster_get_corosync_conf(argv): + if utils.is_rhel6(): +- utils.err("corosync.conf is not supported on CMAN clusters") ++ utils.err("corosync.conf is not supported on RHEL 6 clusters") + + if len(argv) > 1: + usage.cluster() +diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py +index cff491c..89888c5 100644 +--- a/pcs/lib/reports.py ++++ b/pcs/lib/reports.py +@@ -893,7 +893,7 @@ def cman_unsupported_command(): + """ + return ReportItem.error( + report_codes.CMAN_UNSUPPORTED_COMMAND, +- "This command is not supported on CMAN clusters" ++ "This command is not supported on RHEL 6 clusters" + ) + + def id_already_exists(id): +@@ -1138,7 +1138,7 @@ def cman_ignored_option(option): + """ + return ReportItem.warning( + report_codes.IGNORED_CMAN_UNSUPPORTED_OPTION, +- '{option_name} ignored as it is not supported on CMAN clusters', ++ '{option_name} ignored as it is not supported on RHEL 6 clusters', + info={'option_name': option} + ) + +@@ -1159,7 +1159,7 @@ def cman_udpu_restart_required(): + """ + return ReportItem.warning( + report_codes.CMAN_UDPU_RESTART_REQUIRED, +- "Using udpu transport on a CMAN cluster, " ++ "Using udpu transport on a RHEL 6 cluster, " + + "cluster restart is required after node add or remove" + ) + +@@ -1169,7 +1169,7 @@ def cman_broadcast_all_rings(): + """ + return ReportItem.warning( + report_codes.CMAN_BROADCAST_ALL_RINGS, +- "Enabling broadcast for all rings as CMAN does not support " ++ "Enabling broadcast for all rings as RHEL 6 does not support " + + "broadcast in only one ring" + ) + +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 1efe8f4..dffaddd 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -197,13 +197,13 @@ auth [node] [...] [\fB\-u\fR username] [\fB\-p\fR password] [\fB\-\-force\fR] [\ + Authenticate pcs to pcsd on nodes specified, or on all nodes configured in corosync.conf if no nodes are specified (authorization tokens are stored in ~/.pcs/tokens or /var/lib/pcsd/tokens for root). By default all nodes are also authenticated to each other, using \fB\-\-local\fR only authenticates the local node (and does not authenticate the remote nodes with each other). Using \fB\-\-force\fR forces re-authentication to occur. + .TP + setup [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1-altaddr]> [<node2[,node2-altaddr]>] [...] [\fB\-\-transport\fR udpu|udp] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] | [\fB\-\-broadcast1\fR]]]] [\fB\-\-wait_for_all\fR=<0|1>] [\fB\-\-auto_tie_breaker\fR=<0|1>] [\fB\-\-last_man_standing\fR=<0|1> [\fB\-\-last_man_standing_window\fR=<time in ms>]] [\fB\-\-ipv6\fR] [\fB\-\-token\fR <timeout>] [\fB\-\-token_coefficient\fR <timeout>] [\fB\-\-join\fR <timeout>] [\fB\-\-consensus\fR <timeout>] [\fB\-\-miss_count_const\fR <count>] [\fB\-\-fail_recv_const\fR <failures>] +-Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR will only perform changes on the local node, \fB\-\-start\fR will also start the cluster on the specified nodes, \fB\-\-wait\fR will wait up to 'n' seconds for the nodes to start, \fB\-\-enable\fR will enable corosync and pacemaker on node startup, \fB\-\-transport\fR allows specification of corosync transport (default: udpu; udp for CMAN clusters), \fB\-\-rrpmode\fR allows you to set the RRP mode of the system. Currently only 'passive' is supported or tested (using 'active' is not recommended). The \fB\-\-wait_for_all\fR, \fB\-\-auto_tie_breaker\fR, \fB\-\-last_man_standing\fR, \fB\-\-last_man_standing_window\fR options are all documented in corosync's votequorum(5) man page. These options are not supported on CMAN clusters. ++Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR will only perform changes on the local node, \fB\-\-start\fR will also start the cluster on the specified nodes, \fB\-\-wait\fR will wait up to 'n' seconds for the nodes to start, \fB\-\-enable\fR will enable corosync and pacemaker on node startup, \fB\-\-transport\fR allows specification of corosync transport (default: udpu; udp for RHEL 6 clusters), \fB\-\-rrpmode\fR allows you to set the RRP mode of the system. Currently only 'passive' is supported or tested (using 'active' is not recommended). The \fB\-\-wait_for_all\fR, \fB\-\-auto_tie_breaker\fR, \fB\-\-last_man_standing\fR, \fB\-\-last_man_standing_window\fR options are all documented in corosync's votequorum(5) man page. These options are not supported on RHEL 6 clusters. + +-\fB\-\-ipv6\fR will configure corosync to use ipv6 (instead of ipv4). This option is not supported on CMAN clusters. ++\fB\-\-ipv6\fR will configure corosync to use ipv6 (instead of ipv4). This option is not supported on RHEL 6 clusters. + + \fB\-\-token\fR <timeout> sets time in milliseconds until a token loss is declared after not receiving a token (default 1000 ms) + +-\fB\-\-token_coefficient\fR <timeout> sets time in milliseconds used for clusters with at least 3 nodes as a coefficient for real token timeout calculation (token + (number_of_nodes - 2) * token_coefficient) (default 650 ms) This option is not supported on CMAN clusters. ++\fB\-\-token_coefficient\fR <timeout> sets time in milliseconds used for clusters with at least 3 nodes as a coefficient for real token timeout calculation (token + (number_of_nodes - 2) * token_coefficient) (default 650 ms) This option is not supported on RHEL 6 clusters. + + \fB\-\-join\fR <timeout> sets time in milliseconds to wait for join messages (default 50 ms) + +@@ -684,10 +684,10 @@ checkpoint restore <checkpoint_number> + Restore cluster configuration to specified checkpoint. + .TP + import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] [output\-format=corosync.conf|cluster.conf] [dist=<dist>] +-Converts CMAN cluster configuration to Pacemaker cluster configuration. Converted configuration will be saved to 'output' file. To send the configuration to the cluster nodes the 'pcs config restore' command can be used. If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually. If no input is specified /etc/cluster/cluster.conf will be used. You can force to create output containing either cluster.conf or corosync.conf using the output-format option. Optionally you can specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty. You can get the list of supported dist values by running the "clufter \fB\-\-list-dists\fR" command. If 'dist' is not specified, it defaults to this node's version if that matches output-format, otherwise redhat,6.7 is used for cluster.conf and redhat,7.1 is used for corosync.conf. ++Converts RHEL 6 (CMAN) cluster configuration to Pacemaker cluster configuration. Converted configuration will be saved to 'output' file. To send the configuration to the cluster nodes the 'pcs config restore' command can be used. If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually. If no input is specified /etc/cluster/cluster.conf will be used. You can force to create output containing either cluster.conf or corosync.conf using the output-format option. Optionally you can specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty. You can get the list of supported dist values by running the "clufter \fB\-\-list-dists\fR" command. If 'dist' is not specified, it defaults to this node's version if that matches output-format, otherwise redhat,6.7 is used for cluster.conf and redhat,7.1 is used for corosync.conf. + .TP + import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] output\-format=pcs-commands|pcs-commands-verbose [dist=<dist>] +-Converts CMAN cluster configuration to a list of pcs commands which recreates the same cluster as Pacemaker cluster when executed. Commands will be saved to 'output' file. For other options see above. ++Converts RHEL 6 (CMAN) cluster configuration to a list of pcs commands which recreates the same cluster as Pacemaker cluster when executed. Commands will be saved to 'output' file. For other options see above. + .TP + export pcs\-commands|pcs\-commands\-verbose [output=<filename>] [dist=<dist>] + Creates a list of pcs commands which upon execution recreates the current cluster running on this node. Commands will be saved to 'output' file or written to stdout if 'output' is not specified. Use pcs\-commands to get a simple list of commands, whereas pcs\-commands\-verbose creates a list including comments and debug messages. Optionally specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty. You can get the list of supported dist values by running the "clufter \fB\-\-list-dists\fR" command. If 'dist' is not specified, it defaults to this node's version. +diff --git a/pcs/usage.py b/pcs/usage.py +index ea407c3..0ebebe0 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -553,22 +553,22 @@ Commands: + --wait will wait up to 'n' seconds for the nodes to start, + --enable will enable corosync and pacemaker on node startup, + --transport allows specification of corosync transport (default: udpu; +- udp for CMAN clusters), ++ udp for RHEL 6 clusters), + --rrpmode allows you to set the RRP mode of the system. Currently only + 'passive' is supported or tested (using 'active' is not + recommended). + The --wait_for_all, --auto_tie_breaker, --last_man_standing, + --last_man_standing_window options are all documented in corosync's +- votequorum(5) man page. These options are not supported on CMAN ++ votequorum(5) man page. These options are not supported on RHEL 6 + clusters. + --ipv6 will configure corosync to use ipv6 (instead of ipv4). This +- option is not supported on CMAN clusters. ++ option is not supported on RHEL 6 clusters. + --token <timeout> sets time in milliseconds until a token loss is + declared after not receiving a token (default 1000 ms) + --token_coefficient <timeout> sets time in milliseconds used for clusters + with at least 3 nodes as a coefficient for real token timeout calculation + (token + (number_of_nodes - 2) * token_coefficient) (default 650 ms) +- This option is not supported on CMAN clusters. ++ This option is not supported on RHEL 6 clusters. + --join <timeout> sets time in milliseconds to wait for join messages + (default 50 ms) + --consensus <timeout> sets time in milliseconds to wait for consensus +@@ -1187,9 +1187,9 @@ Commands: + + import-cman output=<filename> [input=<filename>] [--interactive] + [output-format=corosync.conf|cluster.conf] [dist=<dist>] +- Converts CMAN cluster configuration to Pacemaker cluster configuration. +- Converted configuration will be saved to 'output' file. To send +- the configuration to the cluster nodes the 'pcs config restore' ++ Converts RHEL 6 (CMAN) cluster configuration to Pacemaker cluster ++ configuration. Converted configuration will be saved to 'output' file. ++ To send the configuration to the cluster nodes the 'pcs config restore' + command can be used. If --interactive is specified you will be + prompted to solve incompatibilities manually. If no input is specified + /etc/cluster/cluster.conf will be used. You can force to create output +@@ -1203,9 +1203,9 @@ Commands: + + import-cman output=<filename> [input=<filename>] [--interactive] + output-format=pcs-commands|pcs-commands-verbose [dist=<dist>] +- Converts CMAN cluster configuration to a list of pcs commands which +- recreates the same cluster as Pacemaker cluster when executed. Commands +- will be saved to 'output' file. For other options see above. ++ Converts RHEL 6 (CMAN) cluster configuration to a list of pcs commands ++ which recreates the same cluster as Pacemaker cluster when executed. ++ Commands will be saved to 'output' file. For other options see above. + + export pcs-commands|pcs-commands-verbose [output=<filename>] [dist=<dist>] + Creates a list of pcs commands which upon execution recreates +diff --git a/pcsd/views/_dialogs.erb b/pcsd/views/_dialogs.erb +index d18ac71..21be443 100644 +--- a/pcsd/views/_dialogs.erb ++++ b/pcsd/views/_dialogs.erb +@@ -40,7 +40,7 @@ + <p style="font-size:12px;">Are you sure you want to remove the following node(s)?</p> + <span class="name_list"></span> + {{#if Pcs.is_cman_with_udpu_transport}} +- <p style="color: orange">This is a CMAN cluster with UDPU transport, cluster restart is required to apply node removal.</p> ++ <p style="color: orange">This is a RHEL 6 cluster with UDPU transport, cluster restart is required to apply node removal.</p> + {{/if}} + </div> + +diff --git a/pcsd/views/manage.erb b/pcsd/views/manage.erb +index 39ab41f..cacd6cb 100644 +--- a/pcsd/views/manage.erb ++++ b/pcsd/views/manage.erb +@@ -213,9 +213,9 @@ + <tr><td align=center style="color: red" colspan=2"><span id="at_least_one_node_error_msg" style="display:none;">At least one valid node must be entered.</span></td></tr> + <tr><td align=center style="color: red" colspan=2"><span id="bad_cluster_name_error_msg" style="display:none;">You may not leave the cluster name field blank</span></td></tr> + <tr><td align=center style="color: red" colspan=2"><span id="addr0_addr1_mismatch_error_msg" style="display:none;">Ring 1 addresses do not match to Ring 0 addresses</span></td></tr> +- <tr><td align=center style="color: red" colspan=2"><span id="cman_udpu_transport_error_msg" style="display:none;">Cannot use UDPU transport on CMAN cluster. Please select UDP transport in Advanced options.</span></td></tr> ++ <tr><td align=center style="color: red" colspan=2"><span id="cman_udpu_transport_error_msg" style="display:none;">Cannot use UDPU transport on RHEL 6 cluster. Please select UDP transport in Advanced options.</span></td></tr> + <tr><td align=center style="color: red" colspan=2"><span id="rhel_version_mismatch_error_msg" style="display:none;">Cannot create a cluster using nodes running different versions of RHEL.</span></td></tr> +- <tr><td align=center style="color: red" colspan=2"><span id="cman_mismatch_error_msg" style="display:none;">Cannot mix nodes running CMAN and nodes without CMAN in one cluster.</span></td></tr> ++ <tr><td align=center style="color: red" colspan=2"><span id="cman_mismatch_error_msg" style="display:none;">Cannot mix nodes running RHEL 6 and nodes running RHEL 6 in one cluster.</span></td></tr> + </table> + <span onclick='$(".advanced_open").toggle();$("#advanced_cluster_create_options").toggle();'><span class="advanced_open rightarrow sprites"></span><span class="advanced_open downarrow sprites" style="display:none;"></span>Advanced Options:</span> + <div id="advanced_cluster_create_options" style="display:none;"> +@@ -245,7 +245,7 @@ remaining 3 nodes will be fenced. + + It is very useful when combined with Last Man Standing. + +-This option has no effect on CMAN cluster." %> ++This option has no effect on RHEL 6 cluster." %> + <% auto_tie_desc = "\ + Enables Auto Tie Breaker (ATB) feature (default: off). + +@@ -258,7 +258,7 @@ partition, or the set of nodes that are still in contact with the \ + node that has the lowest nodeid will remain quorate. The other nodes \ + will be inquorate. + +-This option has no effect on CMAN cluster." %> ++This option has no effect on RHEL 6 cluster." %> + <% last_man_desc = "\ + Enables Last Man Standing (LMS) feature (default: off). + +@@ -279,18 +279,18 @@ Using the above 8 node cluster example, with LMS enabled the cluster \ + can retain quorum and continue operating by losing, in a cascade \ + fashion, up to 6 nodes with only 2 remaining active. + +-This option has no effect on CMAN cluster." %> ++This option has no effect on RHEL 6 cluster." %> + <% last_man_window_desc = "\ + Tunes Last Man Standing Window (default: 10000 ms) + + The window of time between when a node (or group of nodes die) and quorum \ + is recalculated if the 'Last Man Standing' option is enabled. + +-This option has no effect on CMAN cluster." %> ++This option has no effect on RHEL 6 cluster." %> + <% use_ipv6_desc = "\ + Enables IPv6 for cluster communication (default: off) + +-This option has no effect on CMAN cluster." %> ++This option has no effect on RHEL 6 cluster." %> + <% token_timeout = "\ + Sets time in milliseconds until a token loss is declared after not receiving \ + a token (default: 1000 ms)" %> +@@ -299,7 +299,7 @@ Sets time in milliseconds used for clusters with at least 3 nodes \ + as a coefficient for real token timeout calculation \ + (token + (number_of_nodes - 2) * token_coefficient) (default: 650 ms) + +-This option has no effect on CMAN cluster." %> ++This option has no effect on RHEL 6 cluster." %> + <% join_timeout = "\ + Sets time in milliseconds to wait for join messages (default: 50 ms)" %> + <% consensus_timeout = "\ +diff --git a/pcsd/views/nodes.erb b/pcsd/views/nodes.erb +index 8fccd25..517e19d 100644 +--- a/pcsd/views/nodes.erb ++++ b/pcsd/views/nodes.erb +@@ -350,7 +350,7 @@ + {{/if}} + {{#if Pcs.is_cman_with_udpu_transport}} + <tr> +- <td colspan="2" style="color: orange">This is a CMAN cluster with UDPU transport,<br>cluster restart is required to apply node addition.</td> ++ <td colspan="2" style="color: orange">This is a RHEL 6 cluster with UDPU transport,<br>cluster restart is required to apply node addition.</td> + </tr> + {{/if}} + </table> +-- +1.8.3.1 + diff --git a/SOURCES/fix-pcs-constraint-ticket-set-help.patch b/SOURCES/fix-pcs-constraint-ticket-set-help.patch new file mode 100644 index 0000000..7d73ab0 --- /dev/null +++ b/SOURCES/fix-pcs-constraint-ticket-set-help.patch @@ -0,0 +1,41 @@ +From 10ed9eaf5f9485b0186fdc1546e22bb321d47e85 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Thu, 23 Jun 2016 13:31:29 +0200 +Subject: [PATCH] fix "pcs constraint ticket set" help + +--- + pcs/pcs.8 | 2 +- + pcs/usage.py | 4 ++-- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 38a4913..0e230b7 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -482,7 +482,7 @@ ticket add <ticket> [<role>] <resource id> [options] [id=constraint\-id] + Create a ticket constraint for <resource id>. Available option is loss-policy=fence/stop/freeze/demote. A role can be master, slave, started or stopped. + .TP + ticket set <resource1> [resourceN]... [options] [set <resourceX> ... [options]] [setoptions [constraint_options]] +-Create a ticket constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Required constraint option is ticket. Optional constraint option is loss-policy=fence/stop/freeze/demote. ++Create a ticket constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Required constraint option is ticket=<ticket>. Optional constraint options are id=<constraint-id> and loss-policy=fence/stop/freeze/demote. + .TP + remove [constraint id]... + Remove constraint(s) or constraint rules with the specified id(s). +diff --git a/pcs/usage.py b/pcs/usage.py +index 9d24b78..c4c417a 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -998,8 +998,8 @@ Commands: + Create a ticket constraint with a resource set. + Available options are sequential=true/false, require-all=true/false, + action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. +- Required constraint option is ticket. +- Optional constraint option is loss-policy=fence/stop/freeze/demote. ++ Required constraint option is ticket=<ticket>. Optional constraint ++ options are id=<constraint-id> and loss-policy=fence/stop/freeze/demote. + + remove [constraint id]... + Remove constraint(s) or constraint rules with the specified id(s). +-- +1.8.3.1 + diff --git a/SOURCES/fix-qdevice-tests-failing-due-to-multithreading.patch b/SOURCES/fix-qdevice-tests-failing-due-to-multithreading.patch new file mode 100644 index 0000000..a0a7a6a --- /dev/null +++ b/SOURCES/fix-qdevice-tests-failing-due-to-multithreading.patch @@ -0,0 +1,151 @@ +From c178935290a5387fdfbd54330769ee159d8916b6 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Fri, 1 Jul 2016 10:39:48 +0200 +Subject: [PATCH] test: fix qdevice tests failing due to multithreading + +--- + pcs/test/test_lib_commands_quorum.py | 37 ++++++++++++++++++++++-------------- + 1 file changed, 23 insertions(+), 14 deletions(-) + +diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py +index c12ab66..826251a 100644 +--- a/pcs/test/test_lib_commands_quorum.py ++++ b/pcs/test/test_lib_commands_quorum.py +@@ -763,7 +763,7 @@ class AddDeviceNetTest(TestCase): + len(client_setup_calls), + len(mock_client_setup.mock_calls) + ) +- mock_client_setup.assert_has_calls(client_setup_calls) ++ mock_client_setup.assert_has_calls(client_setup_calls, any_order=True) + mock_get_cert_request.assert_called_once_with( + "mock_runner", + self.cluster_name +@@ -786,7 +786,7 @@ class AddDeviceNetTest(TestCase): + len(client_import_calls), + len(mock_import_cert.mock_calls) + ) +- mock_import_cert.assert_has_calls(client_import_calls) ++ mock_import_cert.assert_has_calls(client_import_calls, any_order=True) + + def test_error_get_ca_cert( + self, mock_get_ca, mock_client_setup, mock_get_cert_request, +@@ -894,7 +894,7 @@ class AddDeviceNetTest(TestCase): + len(client_setup_calls), + len(mock_client_setup.mock_calls) + ) +- mock_client_setup.assert_has_calls(client_setup_calls) ++ mock_client_setup.assert_has_calls(client_setup_calls, any_order=True) + + def test_error_client_setup_skip_offline( + self, mock_get_ca, mock_client_setup, mock_get_cert_request, +@@ -959,7 +959,7 @@ class AddDeviceNetTest(TestCase): + len(client_setup_calls), + len(mock_client_setup.mock_calls) + ) +- mock_client_setup.assert_has_calls(client_setup_calls) ++ mock_client_setup.assert_has_calls(client_setup_calls, any_order=True) + + def test_generate_cert_request_error( + self, mock_get_ca, mock_client_setup, mock_get_cert_request, +@@ -1004,7 +1004,7 @@ class AddDeviceNetTest(TestCase): + len(client_setup_calls), + len(mock_client_setup.mock_calls) + ) +- mock_client_setup.assert_has_calls(client_setup_calls) ++ mock_client_setup.assert_has_calls(client_setup_calls, any_order=True) + mock_get_cert_request.assert_called_once_with( + "mock_runner", + self.cluster_name +@@ -1062,7 +1062,7 @@ class AddDeviceNetTest(TestCase): + len(client_setup_calls), + len(mock_client_setup.mock_calls) + ) +- mock_client_setup.assert_has_calls(client_setup_calls) ++ mock_client_setup.assert_has_calls(client_setup_calls, any_order=True) + mock_get_cert_request.assert_called_once_with( + "mock_runner", + self.cluster_name +@@ -1119,7 +1119,7 @@ class AddDeviceNetTest(TestCase): + len(client_setup_calls), + len(mock_client_setup.mock_calls) + ) +- mock_client_setup.assert_has_calls(client_setup_calls) ++ mock_client_setup.assert_has_calls(client_setup_calls, any_order=True) + mock_get_cert_request.assert_called_once_with( + "mock_runner", + self.cluster_name +@@ -1201,7 +1201,7 @@ class AddDeviceNetTest(TestCase): + len(client_setup_calls), + len(mock_client_setup.mock_calls) + ) +- mock_client_setup.assert_has_calls(client_setup_calls) ++ mock_client_setup.assert_has_calls(client_setup_calls, any_order=True) + mock_get_cert_request.assert_called_once_with( + "mock_runner", + self.cluster_name +@@ -1224,7 +1224,7 @@ class AddDeviceNetTest(TestCase): + len(client_import_calls), + len(mock_import_cert.mock_calls) + ) +- mock_import_cert.assert_has_calls(client_import_calls) ++ mock_import_cert.assert_has_calls(client_import_calls, any_order=True) + + def test_client_import_cert_error_skip_offline( + self, mock_get_ca, mock_client_setup, mock_get_cert_request, +@@ -1282,7 +1282,7 @@ class AddDeviceNetTest(TestCase): + len(client_setup_calls), + len(mock_client_setup.mock_calls) + ) +- mock_client_setup.assert_has_calls(client_setup_calls) ++ mock_client_setup.assert_has_calls(client_setup_calls, any_order=True) + mock_get_cert_request.assert_called_once_with( + "mock_runner", + self.cluster_name +@@ -1305,7 +1305,7 @@ class AddDeviceNetTest(TestCase): + len(client_import_calls), + len(mock_import_cert.mock_calls) + ) +- mock_import_cert.assert_has_calls(client_import_calls) ++ mock_import_cert.assert_has_calls(client_import_calls, any_order=True) + + + @mock.patch.object(LibraryEnvironment, "push_corosync_conf") +@@ -1507,7 +1507,10 @@ class RemoveDeviceNetTest(TestCase): + len(client_destroy_calls), + len(mock_client_destroy.mock_calls) + ) +- mock_client_destroy.assert_has_calls(client_destroy_calls) ++ mock_client_destroy.assert_has_calls( ++ client_destroy_calls, ++ any_order=True ++ ) + + def test_error_client_destroy(self, mock_client_destroy): + def raiser(communicator, node): +@@ -1561,7 +1564,10 @@ class RemoveDeviceNetTest(TestCase): + len(client_destroy_calls), + len(mock_client_destroy.mock_calls) + ) +- mock_client_destroy.assert_has_calls(client_destroy_calls) ++ mock_client_destroy.assert_has_calls( ++ client_destroy_calls, ++ any_order=True ++ ) + + def test_error_client_destroy_skip_offline(self, mock_client_destroy): + def raiser(communicator, node): +@@ -1606,7 +1612,10 @@ class RemoveDeviceNetTest(TestCase): + len(client_destroy_calls), + len(mock_client_destroy.mock_calls) + ) +- mock_client_destroy.assert_has_calls(client_destroy_calls) ++ mock_client_destroy.assert_has_calls( ++ client_destroy_calls, ++ any_order=True ++ ) + + + @mock.patch.object(LibraryEnvironment, "push_corosync_conf") +-- +1.8.3.1 + diff --git a/SOURCES/pcsd-bundle-config-1 b/SOURCES/pcsd-bundle-config-1 new file mode 100644 index 0000000..c067a62 --- /dev/null +++ b/SOURCES/pcsd-bundle-config-1 @@ -0,0 +1,5 @@ +--- +BUNDLE_FROZEN: '1' +BUNDLE_PATH: vendor/bundle +BUNDLE_DISABLE_SHARED_GEMS: '1' +BUNDLE_BUILD: --with-ldflags="-Wl,-z,now -Wl,-z,relro" diff --git a/SOURCES/pcsd-fix-syntax-error-on-ruby-1.8.patch b/SOURCES/pcsd-fix-syntax-error-on-ruby-1.8.patch new file mode 100644 index 0000000..da88ed6 --- /dev/null +++ b/SOURCES/pcsd-fix-syntax-error-on-ruby-1.8.patch @@ -0,0 +1,25 @@ +From 96885f66dde45fd8edf2b916ce63bbc236cfe28a Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Mon, 18 Jul 2016 15:03:42 +0200 +Subject: [PATCH] pcsd: fix syntax error on ruby 1.8 + +--- + pcsd/remote.rb | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index 6a3a692..25fb74d 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -1558,7 +1558,7 @@ def remove_resource(params, request, auth_user) + end + cmd = [PCS, '-f', tmp_file.path, 'resource', 'disable'] + resource_list.each { |resource| +- _, err, retval = run_cmd(user, *cmd, resource) ++ _, err, retval = run_cmd(user, *(cmd + [resource])) + if retval != 0 + unless ( + err.join('').index('unable to find a resource') != -1 and +-- +1.8.3.1 + diff --git a/SOURCES/rhel7.patch b/SOURCES/rhel7.patch new file mode 100644 index 0000000..5f73344 --- /dev/null +++ b/SOURCES/rhel7.patch @@ -0,0 +1,80 @@ +From e27fb389233d1f66dcda32fa7d06192a82f5944f Mon Sep 17 00:00:00 2001 +From: Ivan Devat <idevat@redhat.com> +Date: Tue, 24 May 2016 07:26:15 +0200 +Subject: [PATCH] adapt working with gems to rhel 7 + +--- + pcsd/Gemfile | 7 +------ + pcsd/Gemfile.lock | 3 --- + pcsd/Makefile | 5 +---- + 3 files changed, 2 insertions(+), 13 deletions(-) + +diff --git a/pcsd/Gemfile b/pcsd/Gemfile +index e851eaf..6526f53 100644 +--- a/pcsd/Gemfile ++++ b/pcsd/Gemfile +@@ -1,9 +1,5 @@ + source 'https://rubygems.org' + +-source 'https://tojeline.fedorapeople.org/rubygems/' do +- gem 'rpam-ruby19-feist', :platform => :ruby_18 +-end +- + gem 'sinatra' + gem 'sinatra-contrib' + gem 'rack' +@@ -12,8 +8,7 @@ gem 'tilt' + gem 'eventmachine' + gem 'rack-test' + gem 'backports' +-gem 'rpam-ruby19', :platform => [:ruby_19, :ruby_20, :ruby_21, :ruby_22] +-gem 'json' ++gem 'rpam-ruby19' + gem 'multi_json' + gem 'open4' + gem 'orderedhash' +diff --git a/pcsd/Gemfile.lock b/pcsd/Gemfile.lock +index eff055a..15ee9b3 100644 +--- a/pcsd/Gemfile.lock ++++ b/pcsd/Gemfile.lock +@@ -1,10 +1,8 @@ + GEM + remote: https://rubygems.org/ +- remote: https://tojeline.fedorapeople.org/rubygems/ + specs: + backports (3.6.8) + eventmachine (1.2.0.1) +- json (1.8.3) + multi_json (1.12.0) + open4 (1.3.4) + orderedhash (0.0.6) +@@ -33,7 +31,6 @@ PLATFORMS + DEPENDENCIES + backports + eventmachine +- json + multi_json + open4 + orderedhash +diff --git a/pcsd/Makefile b/pcsd/Makefile +index 798a8bd..08d9cf1 100644 +--- a/pcsd/Makefile ++++ b/pcsd/Makefile +@@ -1,4 +1,4 @@ +-build_gems: get_gems ++build_gems: + bundle install --local --deployment + + # RHEL6 needs special rpam-ruby19 gem to work with 1.8.7 +@@ -21,8 +21,5 @@ build_gems_rhel6: + vendor/cache/sinatra-contrib-1.4.7.gem \ + -- '--with-ldflags="-Wl,-z,now -Wl,-z,relro"' + +-get_gems: +- bundle package +- + clean: + rm -rfv vendor/ +-- +1.8.3.1 + diff --git a/SOURCES/show-only-warning-when-crm_mon-xml-is-invalid.patch b/SOURCES/show-only-warning-when-crm_mon-xml-is-invalid.patch new file mode 100644 index 0000000..59e9748 --- /dev/null +++ b/SOURCES/show-only-warning-when-crm_mon-xml-is-invalid.patch @@ -0,0 +1,86 @@ +From 7e4c0a0ea1cadf2c887994afa1e0f728ce64c1aa Mon Sep 17 00:00:00 2001 +From: Ivan Devat <idevat@redhat.com> +Date: Mon, 29 Aug 2016 18:16:41 +0200 +Subject: [PATCH] show only warning when crm_mon xml is invalid + +--- + pcs/lib/pacemaker_state.py | 13 ++++++++++--- + pcs/test/test_lib_pacemaker_state.py | 24 ++++++++++++++++++++---- + 2 files changed, 30 insertions(+), 7 deletions(-) + +diff --git a/pcs/lib/pacemaker_state.py b/pcs/lib/pacemaker_state.py +index b413b90..e300da7 100644 +--- a/pcs/lib/pacemaker_state.py ++++ b/pcs/lib/pacemaker_state.py +@@ -136,10 +136,17 @@ class _NodeSection(_Element): + def _get_valid_cluster_state_dom(xml): + try: + dom = etree.fromstring(xml) +- if os.path.isfile(settings.crm_mon_schema): +- etree.RelaxNG(file=settings.crm_mon_schema).assertValid(dom) ++ if( ++ os.path.isfile(settings.crm_mon_schema) ++ and ++ not etree.RelaxNG(file=settings.crm_mon_schema).validate(dom) ++ ): ++ print( ++ "Warning: xml with cluster status does not conform to the" ++ " crm_mon schema" ++ ) + return dom +- except (etree.XMLSyntaxError, etree.DocumentInvalid): ++ except etree.XMLSyntaxError: + raise LibraryError(reports.cluster_state_invalid_format()) + + class ClusterState(_Element): +diff --git a/pcs/test/test_lib_pacemaker_state.py b/pcs/test/test_lib_pacemaker_state.py +index 13f6eb0..83b30a3 100644 +--- a/pcs/test/test_lib_pacemaker_state.py ++++ b/pcs/test/test_lib_pacemaker_state.py +@@ -5,6 +5,14 @@ from __future__ import ( + unicode_literals, + ) + ++import sys ++try: ++ from cStringIO import StringIO ++except ImportError: ++ #python 3 ++ from io import StringIO ++ ++ + from pcs.test.tools.pcs_unittest import TestCase + from lxml import etree + +@@ -84,16 +92,24 @@ class ClusterStatusTest(TestBase): + ) + + def test_refuse_invalid_document(self): ++ #commands writes to stdout ++ #we want clean test output, so we capture it ++ tmp_stdout = sys.stdout ++ stdout_catpture = StringIO() ++ sys.stdout = stdout_catpture + self.covered_status.append_to_first_tag_name( + 'nodes', + '<node without="required attributes" />' + ) +- +- assert_raise_library_error( +- lambda: ClusterState(str(self.covered_status)), +- (severities.ERROR, report_codes.BAD_CLUSTER_STATE_FORMAT, {}) ++ ClusterState(str(self.covered_status)) ++ self.assertEqual( ++ stdout_catpture.getvalue(), ++ "Warning: xml with cluster status does not conform to the crm_mon" ++ " schema\n" + ) + ++ sys.stdout = tmp_stdout ++ + + class WorkWithClusterStatusNodesTest(TestBase): + def fixture_node_string(self, **kwargs): +-- +1.8.3.1 + diff --git a/SOURCES/test-corrections.patch b/SOURCES/test-corrections.patch new file mode 100644 index 0000000..ff3d123 --- /dev/null +++ b/SOURCES/test-corrections.patch @@ -0,0 +1,1312 @@ +From 60a297aa6a1e2d31619da281d843235edcaa43bb Mon Sep 17 00:00:00 2001 +From: Ivan Devat <idevat@redhat.com> +Date: Wed, 14 Sep 2016 09:04:57 +0200 +Subject: [PATCH] squash test corrections + +test: import correct unittest version + +test: merge modules importing unittest with version care + +test: factor out recurring fixture in tests +--- + pcs/cli/booth/test/test_command.py | 4 ++-- + pcs/cli/booth/test/test_env.py | 4 ++-- + pcs/cli/common/test/test_completion.py | 2 +- + pcs/cli/common/test/test_console_report.py | 2 +- + pcs/cli/common/test/test_lib_wrapper.py | 4 ++-- + pcs/cli/common/test/test_middleware.py | 2 +- + pcs/cli/common/test/test_parse_args.py | 2 +- + pcs/cli/constraint/test/test_command.py | 4 ++-- + pcs/cli/constraint/test/test_console_report.py | 2 +- + pcs/cli/constraint/test/test_parse_args.py | 9 ++------- + pcs/cli/constraint_all/test/test_console_report.py | 4 ++-- + pcs/cli/constraint_ticket/test/test_command.py | 4 ++-- + pcs/cli/constraint_ticket/test/test_console_report.py | 2 +- + pcs/cli/constraint_ticket/test/test_parse_args.py | 2 +- + pcs/lib/booth/test/test_config_exchange.py | 2 +- + pcs/lib/booth/test/test_config_files.py | 10 ++++------ + pcs/lib/booth/test/test_config_structure.py | 4 ++-- + pcs/lib/booth/test/test_env.py | 11 ++++------- + pcs/lib/booth/test/test_resource.py | 4 ++-- + pcs/lib/booth/test/test_status.py | 4 ++-- + pcs/lib/booth/test/test_sync.py | 4 ++-- + pcs/lib/cib/test/test_alert.py | 4 ++-- + pcs/lib/cib/test/test_constraint.py | 4 ++-- + pcs/lib/cib/test/test_constraint_colocation.py | 4 ++-- + pcs/lib/cib/test/test_constraint_order.py | 4 ++-- + pcs/lib/cib/test/test_constraint_ticket.py | 4 ++-- + pcs/lib/cib/test/test_nvpair.py | 2 +- + pcs/lib/cib/test/test_resource.py | 2 +- + pcs/lib/cib/test/test_resource_set.py | 4 ++-- + pcs/lib/commands/test/test_alert.py | 4 ++-- + pcs/lib/commands/test/test_booth.py | 10 ++++------ + pcs/lib/commands/test/test_constraint_common.py | 4 ++-- + pcs/lib/commands/test/test_ticket.py | 2 +- + pcs/lib/test/misc.py | 2 +- + pcs/lib/test/test_env_file.py | 4 ++-- + pcs/lib/test/test_errors.py | 2 +- + pcs/lib/test/test_pacemaker_values.py | 2 +- + pcs/test/test_acl.py | 2 +- + pcs/test/test_cluster.py | 2 +- + pcs/test/test_common_tools.py | 2 +- + pcs/test/test_constraints.py | 2 +- + pcs/test/test_lib_cib_acl.py | 2 +- + pcs/test/test_lib_cib_tools.py | 4 ++-- + pcs/test/test_lib_commands_qdevice.py | 4 ++-- + pcs/test/test_lib_commands_quorum.py | 4 ++-- + pcs/test/test_lib_commands_sbd.py | 4 ++-- + pcs/test/test_lib_corosync_config_facade.py | 2 +- + pcs/test/test_lib_corosync_config_parser.py | 2 +- + pcs/test/test_lib_corosync_live.py | 4 ++-- + pcs/test/test_lib_corosync_qdevice_client.py | 4 ++-- + pcs/test/test_lib_corosync_qdevice_net.py | 4 ++-- + pcs/test/test_lib_env.py | 4 ++-- + pcs/test/test_lib_external.py | 4 ++-- + pcs/test/test_lib_node.py | 2 +- + pcs/test/test_lib_nodes_task.py | 4 ++-- + pcs/test/test_lib_pacemaker.py | 4 ++-- + pcs/test/test_lib_pacemaker_state.py | 2 +- + pcs/test/test_lib_resource_agent.py | 4 ++-- + pcs/test/test_lib_sbd.py | 4 ++-- + pcs/test/test_lib_tools.py | 2 +- + pcs/test/test_node.py | 2 +- + pcs/test/test_properties.py | 2 +- + pcs/test/test_quorum.py | 2 +- + pcs/test/test_resource.py | 2 +- + pcs/test/test_rule.py | 2 +- + pcs/test/test_stonith.py | 2 +- + pcs/test/test_utils.py | 2 +- + pcs/test/tools/misc.py | 14 ++++++++++++++ + pcs/test/tools/pcs_mock.py | 13 ------------- + pcs/test/tools/pcs_unittest.py | 19 +++++++++++++++++++ + 70 files changed, 142 insertions(+), 134 deletions(-) + delete mode 100644 pcs/test/tools/pcs_mock.py + +diff --git a/pcs/cli/booth/test/test_command.py b/pcs/cli/booth/test/test_command.py +index 019a74f..44d7a12 100644 +--- a/pcs/cli/booth/test/test_command.py ++++ b/pcs/cli/booth/test/test_command.py +@@ -5,10 +5,10 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.cli.booth import command +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + + + class ConfigSetupTest(TestCase): +diff --git a/pcs/cli/booth/test/test_env.py b/pcs/cli/booth/test/test_env.py +index 1ead6f2..b1d80aa 100644 +--- a/pcs/cli/booth/test/test_env.py ++++ b/pcs/cli/booth/test/test_env.py +@@ -5,12 +5,12 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.cli.booth.env import middleware_config + from pcs.common import report_codes, env_file_role_codes + from pcs.lib.errors import LibraryEnvError, ReportItem +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + + + class BoothConfTest(TestCase): +diff --git a/pcs/cli/common/test/test_completion.py b/pcs/cli/common/test/test_completion.py +index 865da2c..daec1bc 100644 +--- a/pcs/cli/common/test/test_completion.py ++++ b/pcs/cli/common/test/test_completion.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.cli.common.completion import ( + _find_suggestions, +diff --git a/pcs/cli/common/test/test_console_report.py b/pcs/cli/common/test/test_console_report.py +index 23cf8e9..63fe55c 100644 +--- a/pcs/cli/common/test/test_console_report.py ++++ b/pcs/cli/common/test/test_console_report.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + from pcs.cli.common.console_report import indent + + class IndentTest(TestCase): +diff --git a/pcs/cli/common/test/test_lib_wrapper.py b/pcs/cli/common/test/test_lib_wrapper.py +index c10bb62..149e612 100644 +--- a/pcs/cli/common/test/test_lib_wrapper.py ++++ b/pcs/cli/common/test/test_lib_wrapper.py +@@ -4,10 +4,10 @@ from __future__ import ( + print_function, + unicode_literals, + ) +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.cli.common.lib_wrapper import Library, bind +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + from pcs.lib.errors import ReportItem + from pcs.lib.errors import LibraryEnvError + +diff --git a/pcs/cli/common/test/test_middleware.py b/pcs/cli/common/test/test_middleware.py +index c030cd9..7eefbca 100644 +--- a/pcs/cli/common/test/test_middleware.py ++++ b/pcs/cli/common/test/test_middleware.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.cli.common import middleware + +diff --git a/pcs/cli/common/test/test_parse_args.py b/pcs/cli/common/test/test_parse_args.py +index eb358a5..23704b9 100644 +--- a/pcs/cli/common/test/test_parse_args.py ++++ b/pcs/cli/common/test/test_parse_args.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + from pcs.cli.common.parse_args import( + split_list, + prepare_options, +diff --git a/pcs/cli/constraint/test/test_command.py b/pcs/cli/constraint/test/test_command.py +index 5b493cd..6a79e00 100644 +--- a/pcs/cli/constraint/test/test_command.py ++++ b/pcs/cli/constraint/test/test_command.py +@@ -5,10 +5,10 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + from pcs.cli.constraint import command + +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + + def fixture_constraint(): + return { +diff --git a/pcs/cli/constraint/test/test_console_report.py b/pcs/cli/constraint/test/test_console_report.py +index b20bc80..084124c 100644 +--- a/pcs/cli/constraint/test/test_console_report.py ++++ b/pcs/cli/constraint/test/test_console_report.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + from pcs.cli.constraint import console_report + + class OptionsTest(TestCase): +diff --git a/pcs/cli/constraint/test/test_parse_args.py b/pcs/cli/constraint/test/test_parse_args.py +index 7673023..484cb8d 100644 +--- a/pcs/cli/constraint/test/test_parse_args.py ++++ b/pcs/cli/constraint/test/test_parse_args.py +@@ -5,16 +5,11 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.cli.common.errors import CmdLineInputError + from pcs.cli.constraint.parse_args import prepare_set_args, prepare_resource_sets +- +- +-try: +- import unittest.mock as mock +-except ImportError: +- import mock ++from pcs.test.tools.pcs_unittest import mock + + + @mock.patch("pcs.cli.common.parse_args.prepare_options") +diff --git a/pcs/cli/constraint_all/test/test_console_report.py b/pcs/cli/constraint_all/test/test_console_report.py +index 1cf5721..61be2cc 100644 +--- a/pcs/cli/constraint_all/test/test_console_report.py ++++ b/pcs/cli/constraint_all/test/test_console_report.py +@@ -5,8 +5,8 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import TestCase ++from pcs.test.tools.pcs_unittest import mock + from pcs.cli.constraint_all import console_report + + class ConstraintTest(TestCase): +diff --git a/pcs/cli/constraint_ticket/test/test_command.py b/pcs/cli/constraint_ticket/test/test_command.py +index 045d336..d40d421 100644 +--- a/pcs/cli/constraint_ticket/test/test_command.py ++++ b/pcs/cli/constraint_ticket/test/test_command.py +@@ -5,9 +5,9 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + from pcs.cli.common.errors import CmdLineInputError + from pcs.cli.constraint_ticket import command + +diff --git a/pcs/cli/constraint_ticket/test/test_console_report.py b/pcs/cli/constraint_ticket/test/test_console_report.py +index b352287..11af2e2 100644 +--- a/pcs/cli/constraint_ticket/test/test_console_report.py ++++ b/pcs/cli/constraint_ticket/test/test_console_report.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + from pcs.cli.constraint_ticket import console_report + + class ConstraintPlainTest(TestCase): +diff --git a/pcs/cli/constraint_ticket/test/test_parse_args.py b/pcs/cli/constraint_ticket/test/test_parse_args.py +index 9d23167..4a592c2 100644 +--- a/pcs/cli/constraint_ticket/test/test_parse_args.py ++++ b/pcs/cli/constraint_ticket/test/test_parse_args.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + from pcs.cli.constraint_ticket import parse_args + from pcs.cli.common.errors import CmdLineInputError + +diff --git a/pcs/lib/booth/test/test_config_exchange.py b/pcs/lib/booth/test/test_config_exchange.py +index eb1885c..9717a96 100644 +--- a/pcs/lib/booth/test/test_config_exchange.py ++++ b/pcs/lib/booth/test/test_config_exchange.py +@@ -4,7 +4,7 @@ from __future__ import ( + print_function, + unicode_literals, + ) +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + from pcs.lib.booth import config_structure, config_exchange + + +diff --git a/pcs/lib/booth/test/test_config_files.py b/pcs/lib/booth/test/test_config_files.py +index 8266cac..d0df256 100644 +--- a/pcs/lib/booth/test/test_config_files.py ++++ b/pcs/lib/booth/test/test_config_files.py +@@ -6,7 +6,7 @@ from __future__ import ( + ) + + import os.path +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.common import report_codes, env_file_role_codes as file_roles + from pcs.lib.booth import config_files +@@ -14,12 +14,10 @@ from pcs.lib.errors import ReportItemSeverity as severities + from pcs.settings import booth_config_dir as BOOTH_CONFIG_DIR + from pcs.test.tools.assertions import assert_raise_library_error, assert_report_item_list_equal + from pcs.test.tools.custom_mock import MockLibraryReportProcessor +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.misc import create_patcher ++from pcs.test.tools.pcs_unittest import mock + +-def patch_config_files(target, *args, **kwargs): +- return mock.patch( +- "pcs.lib.booth.config_files.{0}".format(target), *args, **kwargs +- ) ++patch_config_files = create_patcher("pcs.lib.booth.config_files") + + @mock.patch("os.path.isdir") + @mock.patch("os.listdir") +diff --git a/pcs/lib/booth/test/test_config_structure.py b/pcs/lib/booth/test/test_config_structure.py +index 1dd07cb..5e7ac68 100644 +--- a/pcs/lib/booth/test/test_config_structure.py ++++ b/pcs/lib/booth/test/test_config_structure.py +@@ -5,13 +5,13 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.common import report_codes + from pcs.lib.booth import config_structure + from pcs.lib.errors import ReportItemSeverity as severities + from pcs.test.tools.assertions import assert_raise_library_error +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + + + class ValidateTicketExistsTest(TestCase): +diff --git a/pcs/lib/booth/test/test_env.py b/pcs/lib/booth/test/test_env.py +index 77e0944..993d709 100644 +--- a/pcs/lib/booth/test/test_env.py ++++ b/pcs/lib/booth/test/test_env.py +@@ -8,20 +8,17 @@ from __future__ import ( + import grp + import os + import pwd +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs import settings + from pcs.common import report_codes + from pcs.lib.booth import env + from pcs.lib.errors import ReportItemSeverity as severities + from pcs.test.tools.assertions import assert_raise_library_error +-from pcs.test.tools.misc import get_test_resource as rc +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.misc import get_test_resource as rc, create_patcher ++from pcs.test.tools.pcs_unittest import mock + +-def patch_env(target, *args, **kwargs): +- return mock.patch( +- "pcs.lib.booth.env.{0}".format(target), *args, **kwargs +- ) ++patch_env = create_patcher("pcs.lib.booth.env") + + class GetConfigFileNameTest(TestCase): + @patch_env("os.path.exists") +diff --git a/pcs/lib/booth/test/test_resource.py b/pcs/lib/booth/test/test_resource.py +index dd72c1e..8971438 100644 +--- a/pcs/lib/booth/test/test_resource.py ++++ b/pcs/lib/booth/test/test_resource.py +@@ -5,12 +5,12 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from lxml import etree + + import pcs.lib.booth.resource as booth_resource +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + from pcs.test.tools.misc import get_test_resource as rc + + +diff --git a/pcs/lib/booth/test/test_status.py b/pcs/lib/booth/test/test_status.py +index 0ea837a..d47ffca 100644 +--- a/pcs/lib/booth/test/test_status.py ++++ b/pcs/lib/booth/test/test_status.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + try: + # python 2 +@@ -15,7 +15,7 @@ except ImportError: + # python 3 + from urllib.parse import parse_qs as url_decode + +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + from pcs.test.tools.assertions import assert_raise_library_error + + from pcs import settings +diff --git a/pcs/lib/booth/test/test_sync.py b/pcs/lib/booth/test/test_sync.py +index 9ba6e80..701b086 100644 +--- a/pcs/lib/booth/test/test_sync.py ++++ b/pcs/lib/booth/test/test_sync.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + import json + import base64 +@@ -16,7 +16,7 @@ except ImportError: + # python 3 + from urllib.parse import parse_qs as url_decode + +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + from pcs.test.tools.assertions import ( + assert_report_item_list_equal, + assert_raise_library_error, +diff --git a/pcs/lib/cib/test/test_alert.py b/pcs/lib/cib/test/test_alert.py +index 50eaef6..c47dd1e 100644 +--- a/pcs/lib/cib/test/test_alert.py ++++ b/pcs/lib/cib/test/test_alert.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from lxml import etree + +@@ -17,7 +17,7 @@ from pcs.test.tools.assertions import( + assert_xml_equal, + assert_report_item_list_equal, + ) +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + from pcs.test.tools.custom_mock import MockLibraryReportProcessor + + +diff --git a/pcs/lib/cib/test/test_constraint.py b/pcs/lib/cib/test/test_constraint.py +index 961f8b0..a4ee636 100644 +--- a/pcs/lib/cib/test/test_constraint.py ++++ b/pcs/lib/cib/test/test_constraint.py +@@ -6,7 +6,7 @@ from __future__ import ( + ) + + from functools import partial +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from lxml import etree + +@@ -18,7 +18,7 @@ from pcs.test.tools.assertions import( + assert_xml_equal, + ) + from pcs.test.tools.custom_mock import MockLibraryReportProcessor +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + from pcs.test.tools.assertions import ( + assert_report_item_list_equal, + ) +diff --git a/pcs/lib/cib/test/test_constraint_colocation.py b/pcs/lib/cib/test/test_constraint_colocation.py +index 377b981..6a85d8a 100644 +--- a/pcs/lib/cib/test/test_constraint_colocation.py ++++ b/pcs/lib/cib/test/test_constraint_colocation.py +@@ -5,13 +5,13 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.common import report_codes + from pcs.lib.cib.constraint import colocation + from pcs.lib.errors import ReportItemSeverity as severities + from pcs.test.tools.assertions import assert_raise_library_error +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + + + #Patch check_new_id_applicable is always desired when working with +diff --git a/pcs/lib/cib/test/test_constraint_order.py b/pcs/lib/cib/test/test_constraint_order.py +index 02d1c5f..3cb33d1 100644 +--- a/pcs/lib/cib/test/test_constraint_order.py ++++ b/pcs/lib/cib/test/test_constraint_order.py +@@ -5,13 +5,13 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.common import report_codes + from pcs.lib.cib.constraint import order + from pcs.lib.errors import ReportItemSeverity as severities + from pcs.test.tools.assertions import assert_raise_library_error +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + + + #Patch check_new_id_applicable is always desired when working with +diff --git a/pcs/lib/cib/test/test_constraint_ticket.py b/pcs/lib/cib/test/test_constraint_ticket.py +index 87fd1e5..ede748e 100644 +--- a/pcs/lib/cib/test/test_constraint_ticket.py ++++ b/pcs/lib/cib/test/test_constraint_ticket.py +@@ -6,13 +6,13 @@ from __future__ import ( + ) + + from functools import partial +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.common import report_codes + from pcs.lib.cib.constraint import ticket + from pcs.lib.errors import ReportItemSeverity as severities + from pcs.test.tools.assertions import assert_raise_library_error +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + + + @mock.patch("pcs.lib.cib.constraint.ticket.tools.check_new_id_applicable") +diff --git a/pcs/lib/cib/test/test_nvpair.py b/pcs/lib/cib/test/test_nvpair.py +index 6907f25..56ba4d1 100644 +--- a/pcs/lib/cib/test/test_nvpair.py ++++ b/pcs/lib/cib/test/test_nvpair.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from lxml import etree + +diff --git a/pcs/lib/cib/test/test_resource.py b/pcs/lib/cib/test/test_resource.py +index ef33ef6..c1e21a0 100644 +--- a/pcs/lib/cib/test/test_resource.py ++++ b/pcs/lib/cib/test/test_resource.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + from lxml import etree + from pcs.lib.cib.resource import find_by_id + +diff --git a/pcs/lib/cib/test/test_resource_set.py b/pcs/lib/cib/test/test_resource_set.py +index 7b77ac4..e4fd8e4 100644 +--- a/pcs/lib/cib/test/test_resource_set.py ++++ b/pcs/lib/cib/test/test_resource_set.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from lxml import etree + +@@ -16,7 +16,7 @@ from pcs.test.tools.assertions import( + assert_raise_library_error, + assert_xml_equal + ) +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + + + class PrepareSetTest(TestCase): +diff --git a/pcs/lib/commands/test/test_alert.py b/pcs/lib/commands/test/test_alert.py +index bced45e..bc68baf 100644 +--- a/pcs/lib/commands/test/test_alert.py ++++ b/pcs/lib/commands/test/test_alert.py +@@ -8,9 +8,9 @@ from __future__ import ( + import logging + from lxml import etree + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + from pcs.test.tools.assertions import ( + assert_raise_library_error, + assert_xml_equal, +diff --git a/pcs/lib/commands/test/test_booth.py b/pcs/lib/commands/test/test_booth.py +index d2429b6..08d2c79 100644 +--- a/pcs/lib/commands/test/test_booth.py ++++ b/pcs/lib/commands/test/test_booth.py +@@ -8,14 +8,15 @@ from __future__ import ( + import os + import base64 + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + from pcs.test.tools.custom_mock import MockLibraryReportProcessor + from pcs.test.tools.assertions import ( + assert_raise_library_error, + assert_report_item_list_equal, + ) ++from pcs.test.tools.misc import create_patcher + + from pcs import settings + from pcs.common import report_codes +@@ -32,10 +33,7 @@ from pcs.lib.external import ( + StopServiceError + ) + +-def patch_commands(target, *args, **kwargs): +- return mock.patch( +- "pcs.lib.commands.booth.{0}".format(target), *args, **kwargs +- ) ++patch_commands = create_patcher("pcs.lib.commands.booth") + + @mock.patch("pcs.lib.booth.config_files.generate_key", return_value="key value") + @mock.patch("pcs.lib.commands.booth.build", return_value="config content") +diff --git a/pcs/lib/commands/test/test_constraint_common.py b/pcs/lib/commands/test/test_constraint_common.py +index e0872ff..cb5e177 100644 +--- a/pcs/lib/commands/test/test_constraint_common.py ++++ b/pcs/lib/commands/test/test_constraint_common.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from lxml import etree + +@@ -17,7 +17,7 @@ from pcs.test.tools.assertions import( + assert_xml_equal, + ) + from pcs.test.tools.custom_mock import MockLibraryReportProcessor +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + + + def fixture_cib_and_constraints(): +diff --git a/pcs/lib/commands/test/test_ticket.py b/pcs/lib/commands/test/test_ticket.py +index d8b8a5f..586ca4b 100644 +--- a/pcs/lib/commands/test/test_ticket.py ++++ b/pcs/lib/commands/test/test_ticket.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.common import report_codes + from pcs.lib.commands.constraint import ticket as ticket_command +diff --git a/pcs/lib/test/misc.py b/pcs/lib/test/misc.py +index 1b1670a..be99bb2 100644 +--- a/pcs/lib/test/misc.py ++++ b/pcs/lib/test/misc.py +@@ -9,7 +9,7 @@ import logging + + from pcs.lib.env import LibraryEnvironment as Env + from pcs.test.tools.custom_mock import MockLibraryReportProcessor +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + + + def get_mocked_env(**kwargs): +diff --git a/pcs/lib/test/test_env_file.py b/pcs/lib/test/test_env_file.py +index 3e27af1..754b40e 100644 +--- a/pcs/lib/test/test_env_file.py ++++ b/pcs/lib/test/test_env_file.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.common import report_codes + from pcs.lib.env_file import RealFile, GhostFile +@@ -15,7 +15,7 @@ from pcs.test.tools.assertions import( + assert_report_item_list_equal + ) + from pcs.test.tools.custom_mock import MockLibraryReportProcessor +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + + + class GhostFileReadTest(TestCase): +diff --git a/pcs/lib/test/test_errors.py b/pcs/lib/test/test_errors.py +index 2e99e19..871aa76 100644 +--- a/pcs/lib/test/test_errors.py ++++ b/pcs/lib/test/test_errors.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.lib.errors import LibraryEnvError + +diff --git a/pcs/lib/test/test_pacemaker_values.py b/pcs/lib/test/test_pacemaker_values.py +index 7979990..62b8e91 100644 +--- a/pcs/lib/test/test_pacemaker_values.py ++++ b/pcs/lib/test/test_pacemaker_values.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.test.tools.assertions import assert_raise_library_error + +diff --git a/pcs/test/test_acl.py b/pcs/test/test_acl.py +index b053614..186c035 100644 +--- a/pcs/test/test_acl.py ++++ b/pcs/test/test_acl.py +@@ -6,7 +6,7 @@ from __future__ import ( + ) + + import shutil +-import unittest ++from pcs.test.tools import pcs_unittest as unittest + + from pcs.test.tools.assertions import AssertPcsMixin + from pcs.test.tools.misc import ( +diff --git a/pcs/test/test_cluster.py b/pcs/test/test_cluster.py +index 8a245a2..36f3687 100644 +--- a/pcs/test/test_cluster.py ++++ b/pcs/test/test_cluster.py +@@ -7,7 +7,7 @@ from __future__ import ( + + import os + import shutil +-import unittest ++from pcs.test.tools import pcs_unittest as unittest + + from pcs.test.tools.assertions import AssertPcsMixin + from pcs.test.tools.misc import ( +diff --git a/pcs/test/test_common_tools.py b/pcs/test/test_common_tools.py +index 5c8482e..5290e6d 100644 +--- a/pcs/test/test_common_tools.py ++++ b/pcs/test/test_common_tools.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + import time + + from pcs.common import tools +diff --git a/pcs/test/test_constraints.py b/pcs/test/test_constraints.py +index 364b40d..7c76e09 100644 +--- a/pcs/test/test_constraints.py ++++ b/pcs/test/test_constraints.py +@@ -7,7 +7,7 @@ from __future__ import ( + + import os + import shutil +-import unittest ++from pcs.test.tools import pcs_unittest as unittest + + from pcs.test.tools.assertions import AssertPcsMixin, console_report + from pcs.test.tools.misc import ( +diff --git a/pcs/test/test_lib_cib_acl.py b/pcs/test/test_lib_cib_acl.py +index 7e1750e..efaad7e 100644 +--- a/pcs/test/test_lib_cib_acl.py ++++ b/pcs/test/test_lib_cib_acl.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.test.tools.assertions import ( + assert_raise_library_error, +diff --git a/pcs/test/test_lib_cib_tools.py b/pcs/test/test_lib_cib_tools.py +index 0fd4d22..ffc2642 100644 +--- a/pcs/test/test_lib_cib_tools.py ++++ b/pcs/test/test_lib_cib_tools.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from os.path import join + from lxml import etree +@@ -15,7 +15,7 @@ from pcs.test.tools.assertions import ( + assert_xml_equal, + ) + from pcs.test.tools.misc import get_test_resource as rc +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + from pcs.test.tools.xml import get_xml_manipulation_creator_from_file + + from pcs import settings +diff --git a/pcs/test/test_lib_commands_qdevice.py b/pcs/test/test_lib_commands_qdevice.py +index ff588d5..10841e9 100644 +--- a/pcs/test/test_lib_commands_qdevice.py ++++ b/pcs/test/test_lib_commands_qdevice.py +@@ -5,11 +5,11 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + import base64 + import logging + +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + from pcs.test.tools.assertions import ( + assert_raise_library_error, + assert_report_item_list_equal, +diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py +index d286a8f..d7701af 100644 +--- a/pcs/test/test_lib_commands_quorum.py ++++ b/pcs/test/test_lib_commands_quorum.py +@@ -6,7 +6,7 @@ from __future__ import ( + ) + + import logging +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.test.tools.assertions import ( + assert_raise_library_error, +@@ -17,7 +17,7 @@ from pcs.test.tools.misc import ( + ac, + get_test_resource as rc, + ) +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + + from pcs.common import report_codes + from pcs.lib.env import LibraryEnvironment +diff --git a/pcs/test/test_lib_commands_sbd.py b/pcs/test/test_lib_commands_sbd.py +index 0663082..f8146ce 100644 +--- a/pcs/test/test_lib_commands_sbd.py ++++ b/pcs/test/test_lib_commands_sbd.py +@@ -7,9 +7,9 @@ from __future__ import ( + + import logging + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + from pcs.test.tools.assertions import ( + assert_raise_library_error, + assert_report_item_list_equal, +diff --git a/pcs/test/test_lib_corosync_config_facade.py b/pcs/test/test_lib_corosync_config_facade.py +index 91f7b40..4373d65 100644 +--- a/pcs/test/test_lib_corosync_config_facade.py ++++ b/pcs/test/test_lib_corosync_config_facade.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + import re + + from pcs.test.tools.assertions import ( +diff --git a/pcs/test/test_lib_corosync_config_parser.py b/pcs/test/test_lib_corosync_config_parser.py +index da20889..a68710b 100644 +--- a/pcs/test/test_lib_corosync_config_parser.py ++++ b/pcs/test/test_lib_corosync_config_parser.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-import unittest ++from pcs.test.tools import pcs_unittest as unittest + + from pcs.test.tools.misc import ac + +diff --git a/pcs/test/test_lib_corosync_live.py b/pcs/test/test_lib_corosync_live.py +index 0fc5eb2..3173195 100644 +--- a/pcs/test/test_lib_corosync_live.py ++++ b/pcs/test/test_lib_corosync_live.py +@@ -5,13 +5,13 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + import os.path + + from pcs.test.tools.assertions import assert_raise_library_error + from pcs.test.tools.misc import get_test_resource as rc +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + + from pcs import settings + from pcs.common import report_codes +diff --git a/pcs/test/test_lib_corosync_qdevice_client.py b/pcs/test/test_lib_corosync_qdevice_client.py +index e0332f1..0b5bd67 100644 +--- a/pcs/test/test_lib_corosync_qdevice_client.py ++++ b/pcs/test/test_lib_corosync_qdevice_client.py +@@ -5,9 +5,9 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + from pcs.test.tools.assertions import assert_raise_library_error + + from pcs.common import report_codes +diff --git a/pcs/test/test_lib_corosync_qdevice_net.py b/pcs/test/test_lib_corosync_qdevice_net.py +index 3d473f7..340a8dc 100644 +--- a/pcs/test/test_lib_corosync_qdevice_net.py ++++ b/pcs/test/test_lib_corosync_qdevice_net.py +@@ -5,12 +5,12 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + import base64 + import os.path + +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + from pcs.test.tools.assertions import assert_raise_library_error + from pcs.test.tools.misc import get_test_resource + +diff --git a/pcs/test/test_lib_env.py b/pcs/test/test_lib_env.py +index c6322b7..205fd60 100644 +--- a/pcs/test/test_lib_env.py ++++ b/pcs/test/test_lib_env.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + import logging + from lxml import etree + +@@ -16,7 +16,7 @@ from pcs.test.tools.assertions import ( + ) + from pcs.test.tools.custom_mock import MockLibraryReportProcessor + from pcs.test.tools.misc import get_test_resource as rc +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + + from pcs.lib.env import LibraryEnvironment + from pcs.common import report_codes +diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py +index b0ffdbb..aafbe85 100644 +--- a/pcs/test/test_lib_external.py ++++ b/pcs/test/test_lib_external.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + import os.path + import logging + try: +@@ -27,7 +27,7 @@ from pcs.test.tools.assertions import ( + assert_report_item_list_equal, + ) + from pcs.test.tools.custom_mock import MockLibraryReportProcessor +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + + from pcs import settings + from pcs.common import report_codes +diff --git a/pcs/test/test_lib_node.py b/pcs/test/test_lib_node.py +index 6c841d3..caf128f 100644 +--- a/pcs/test/test_lib_node.py ++++ b/pcs/test/test_lib_node.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + import pcs.lib.node as lib + +diff --git a/pcs/test/test_lib_nodes_task.py b/pcs/test/test_lib_nodes_task.py +index cff88eb..6f05b15 100644 +--- a/pcs/test/test_lib_nodes_task.py ++++ b/pcs/test/test_lib_nodes_task.py +@@ -5,14 +5,14 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.test.tools.assertions import ( + assert_raise_library_error, + assert_report_item_list_equal, + ) + from pcs.test.tools.custom_mock import MockLibraryReportProcessor +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + + from pcs.common import report_codes + from pcs.lib.external import NodeCommunicator, NodeAuthenticationException +diff --git a/pcs/test/test_lib_pacemaker.py b/pcs/test/test_lib_pacemaker.py +index 0edee5c..c475db6 100644 +--- a/pcs/test/test_lib_pacemaker.py ++++ b/pcs/test/test_lib_pacemaker.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + import os.path + + from pcs.test.tools.assertions import ( +@@ -13,7 +13,7 @@ from pcs.test.tools.assertions import ( + assert_xml_equal, + ) + from pcs.test.tools.misc import get_test_resource as rc +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + from pcs.test.tools.xml import XmlManipulation + + from pcs import settings +diff --git a/pcs/test/test_lib_pacemaker_state.py b/pcs/test/test_lib_pacemaker_state.py +index 54f536d..13f6eb0 100644 +--- a/pcs/test/test_lib_pacemaker_state.py ++++ b/pcs/test/test_lib_pacemaker_state.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + from lxml import etree + + from pcs.test.tools.assertions import assert_raise_library_error +diff --git a/pcs/test/test_lib_resource_agent.py b/pcs/test/test_lib_resource_agent.py +index 5704184..08f9061 100644 +--- a/pcs/test/test_lib_resource_agent.py ++++ b/pcs/test/test_lib_resource_agent.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + import os.path + + from lxml import etree +@@ -14,7 +14,7 @@ from pcs.test.tools.assertions import ( + ExtendedAssertionsMixin, + assert_xml_equal, + ) +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + from pcs.test.tools.xml import XmlManipulation as XmlMan + + +diff --git a/pcs/test/test_lib_sbd.py b/pcs/test/test_lib_sbd.py +index 516e0bd..720d8b1 100644 +--- a/pcs/test/test_lib_sbd.py ++++ b/pcs/test/test_lib_sbd.py +@@ -6,9 +6,9 @@ from __future__ import ( + ) + + import json +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + +-from pcs.test.tools.pcs_mock import mock ++from pcs.test.tools.pcs_unittest import mock + from pcs.test.tools.assertions import ( + assert_raise_library_error, + assert_report_item_list_equal, +diff --git a/pcs/test/test_lib_tools.py b/pcs/test/test_lib_tools.py +index 5141ca9..606cb05 100644 +--- a/pcs/test/test_lib_tools.py ++++ b/pcs/test/test_lib_tools.py +@@ -5,7 +5,7 @@ from __future__ import ( + unicode_literals, + ) + +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.lib import tools + +diff --git a/pcs/test/test_node.py b/pcs/test/test_node.py +index 785c711..9b45e07 100644 +--- a/pcs/test/test_node.py ++++ b/pcs/test/test_node.py +@@ -6,7 +6,7 @@ from __future__ import ( + ) + + import shutil +-import unittest ++from pcs.test.tools import pcs_unittest as unittest + + from pcs.test.tools.assertions import AssertPcsMixin + from pcs.test.tools.misc import ( +diff --git a/pcs/test/test_properties.py b/pcs/test/test_properties.py +index fbaf880..9634cca 100644 +--- a/pcs/test/test_properties.py ++++ b/pcs/test/test_properties.py +@@ -6,7 +6,7 @@ from __future__ import ( + ) + + import shutil +-import unittest ++from pcs.test.tools import pcs_unittest as unittest + + from pcs.test.tools.assertions import AssertPcsMixin + from pcs.test.tools.misc import ( +diff --git a/pcs/test/test_quorum.py b/pcs/test/test_quorum.py +index 86de4c6..4f15d7f 100644 +--- a/pcs/test/test_quorum.py ++++ b/pcs/test/test_quorum.py +@@ -6,7 +6,7 @@ from __future__ import ( + ) + + import shutil +-from unittest import TestCase ++from pcs.test.tools.pcs_unittest import TestCase + + from pcs.test.tools.assertions import AssertPcsMixin + from pcs.test.tools.misc import ( +diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py +index 614b895..87a7fa8 100644 +--- a/pcs/test/test_resource.py ++++ b/pcs/test/test_resource.py +@@ -8,7 +8,7 @@ from __future__ import ( + import os + import shutil + import re +-import unittest ++from pcs.test.tools import pcs_unittest as unittest + + from pcs.test.tools.assertions import AssertPcsMixin + from pcs.test.tools.misc import ( +diff --git a/pcs/test/test_rule.py b/pcs/test/test_rule.py +index 8cf717a..ad3448d 100644 +--- a/pcs/test/test_rule.py ++++ b/pcs/test/test_rule.py +@@ -6,7 +6,7 @@ from __future__ import ( + ) + + import shutil +-import unittest ++from pcs.test.tools import pcs_unittest as unittest + import xml.dom.minidom + + from pcs import rule +diff --git a/pcs/test/test_stonith.py b/pcs/test/test_stonith.py +index a6ee2f5..82b2c84 100644 +--- a/pcs/test/test_stonith.py ++++ b/pcs/test/test_stonith.py +@@ -6,7 +6,7 @@ from __future__ import ( + ) + + import shutil +-import unittest ++from pcs.test.tools import pcs_unittest as unittest + + from pcs.test.tools.misc import ( + ac, +diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py +index 43145fd..252de30 100644 +--- a/pcs/test/test_utils.py ++++ b/pcs/test/test_utils.py +@@ -6,7 +6,7 @@ from __future__ import ( + ) + + import sys +-import unittest ++from pcs.test.tools import pcs_unittest as unittest + import xml.dom.minidom + import xml.etree.cElementTree as ET + from time import sleep +diff --git a/pcs/test/tools/misc.py b/pcs/test/tools/misc.py +index a78ccdc..745b228 100644 +--- a/pcs/test/tools/misc.py ++++ b/pcs/test/tools/misc.py +@@ -10,6 +10,7 @@ import os.path + import re + + from pcs import utils ++from pcs.test.tools.pcs_unittest import mock + + + testdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +@@ -50,3 +51,16 @@ def is_minimum_pacemaker_version(cmajor, cminor, crev): + or + (major == cmajor and minor == cminor and rev >= crev) + ) ++ ++def create_patcher(target_prefix): ++ """ ++ Return function for patching tests with preconfigured target prefix ++ string target_prefix is prefix for patched names. Typicaly tested module ++ like for example "pcs.lib.commands.booth". Between target_prefix and target ++ is "." (dot) ++ """ ++ def patch(target, *args, **kwargs): ++ return mock.patch( ++ "{0}.{1}".format(target_prefix, target), *args, **kwargs ++ ) ++ return patch +diff --git a/pcs/test/tools/pcs_mock.py b/pcs/test/tools/pcs_mock.py +deleted file mode 100644 +index d84ac67..0000000 +--- a/pcs/test/tools/pcs_mock.py ++++ /dev/null +@@ -1,13 +0,0 @@ +-try: +- import unittest.mock as mock +-except ImportError: +- import mock +- +-if not hasattr(mock.Mock, "assert_not_called"): +- def __assert_not_called(self, *args, **kwargs): +- if self.call_count != 0: +- msg = ("Expected '%s' to not have been called. Called %s times." % +- (self._mock_name or 'mock', self.call_count)) +- raise AssertionError(msg) +- mock.Mock.assert_not_called = __assert_not_called +- +diff --git a/pcs/test/tools/pcs_unittest.py b/pcs/test/tools/pcs_unittest.py +index 4a3205d..af549ae 100644 +--- a/pcs/test/tools/pcs_unittest.py ++++ b/pcs/test/tools/pcs_unittest.py +@@ -1,7 +1,26 @@ + import sys ++#In package unittest there is no module mock before python 3.3. In python 3 ++#module mock is not imported by * because module mock is not imported in ++#unittest/__init__.py + major, minor = sys.version_info[:2] + if major == 2 and minor == 6: ++ #we use features that are missing before 2.7 (like test skipping, ++ #assertRaises as context manager...) so we need unittest2 + from unittest2 import * ++ import mock + else: + from unittest import * ++ try: ++ import unittest.mock as mock ++ except ImportError: ++ import mock + del major, minor, sys ++ ++#backport of assert_not_called (new in version 3.5) ++if not hasattr(mock.Mock, "assert_not_called"): ++ def __assert_not_called(self, *args, **kwargs): ++ if self.call_count != 0: ++ msg = ("Expected '%s' to not have been called. Called %s times." % ++ (self._mock_name or 'mock', self.call_count)) ++ raise AssertionError(msg) ++ mock.Mock.assert_not_called = __assert_not_called +-- +1.8.3.1 + diff --git a/SOURCES/test-fix-an-occasional-multithread-test-fail.patch b/SOURCES/test-fix-an-occasional-multithread-test-fail.patch new file mode 100644 index 0000000..8a3f660 --- /dev/null +++ b/SOURCES/test-fix-an-occasional-multithread-test-fail.patch @@ -0,0 +1,28 @@ +From c4e916c7ae9f5bb040d8268f93d5949e1cd078f8 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek <tojeline@redhat.com> +Date: Tue, 26 Jul 2016 10:17:34 +0200 +Subject: [PATCH] test: fix an occasional multithread test fail + +--- + pcs/test/test_utils.py | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py +index 192048e..43145fd 100644 +--- a/pcs/test/test_utils.py ++++ b/pcs/test/test_utils.py +@@ -1806,7 +1806,10 @@ class RunParallelTest(unittest.TestCase): + wait_seconds=.1 + ) + +- self.assertEqual(log, ['first', 'second']) ++ self.assertEqual( ++ sorted(log), ++ sorted(['first', 'second']) ++ ) + + def test_wait_for_slower_workers(self): + log = [] +-- +1.8.3.1 + diff --git a/SPECS/pcs.spec b/SPECS/pcs.spec index 738f4b5..98bca23 100644 --- a/SPECS/pcs.spec +++ b/SPECS/pcs.spec @@ -1,93 +1,310 @@ Name: pcs -Version: 0.9.143 -Release: 15%{?dist} +Version: 0.9.152 +Release: 10%{?dist} License: GPLv2 -URL: http://github.com/feist/pcs +URL: https://github.com/ClusterLabs/pcs Group: System Environment/Base -#BuildArch: x86_64 -BuildRequires: python2-devel Summary: Pacemaker Configuration System -Source0: https://tojeline.fedorapeople.org/pkgs/pcs/pcs-withgems-%{version}.tar.gz +#building only for architectures with pacemaker and corosync available +ExclusiveArch: i686 x86_64 s390x +#part after last slash is recognized as filename in look-aside repository +#desired name is achived by trick with hash anchor +Source0: %{url}/archive/%{version}.tar.gz#/%{name}-%{version}.tar.gz Source1: HAM-logo.png -Source2: favicon.ico -Patch0: bz1122818-01-fix-resource-relocation-of-globally-unique-clones.patch -Patch1: bz1158577-01-improve-logging-in-pcsd.patch -Patch2: bz1189857-01-fix-Add-Resource-form-in-web-UI.patch -Patch3: bz1235022-01-add-nagios-support-to-pcs-resource-list-and-web-UI.patch -Patch4: bz1122818-02-fix-resource-relocate-for-remote-nodes.patch -Patch5: bz1253491-01-fix-pcs-pcsd-path-detection.patch -Patch6: bz1253294-01-fixed-command-injection-vulnerability.patch -Patch7: bz1258619-01-fix-ruby-traceback-on-pcsd-startup.patch -Patch8: bz1158577-02-fix-certificates-syncing.patch -Patch9: bz1189857-02-fix-tree-view-of-resources-in-web-UI.patch -Patch10: bz1158566-01-fix-dashboard-in-web-UI.patch -Patch11: bz1189857-03-web-UI-prevents-running-update-multiple-times-at-onc.patch -Patch12: bz1189857-04-fix-constraints-removing-in-web-UI.patch -Patch13: bz1158571-01-web-UI-mark-unsaved-permissions-forms.patch -Patch14: bz1189857-05-remove-removing-constriants-from-client-side-javascr.patch -Patch15: bz1235022-02-fix-crash-when-missing-nagios-metadata.patch -Patch16: bz1158571-02-check-and-refresh-user-auth-info-upon-each-request.patch -Patch17: bz1257369-01-always-print-output-of-crm_resource-cleanup.patch -Patch18: bz1158566-02-fix-loading-cluster-status-for-web-UI.patch -Patch19: bz1158569-01-fixed-a-typo-in-an-error-message.patch -Patch20: bz1158571-03-fix-checking-user-s-group-membership.patch -Patch21: bz1188361-01-Make-port-parameter-of-fence-agents-optional.patch -Patch22: bz1158569-02-fix-authentication-in-web-UI.patch -Patch23: bz1158566-03-web-UI-multiple-fixes-in-the-dashboard.patch -Patch24: bz1198640-01-web-UI-allows-spaces-in-optional-arguments-when-crea.patch -Patch25: bz1189857-06-web-UI-fixes-in-nodes-resources-fence-devices.patch -Patch26: bz1245264-01-Added-more-detailed-warnings-for-pcs-stonith-confirm.patch -Patch27: bz1189857-07-web-UI-fixes.patch -Patch28: bz1265425-01-Fix-for-crm_node-l-output-change.patch -Patch29: bz1268801-Fixed-issue-with-resource-manage-not-removing-meta-a.patch -Patch30: bz1268801-Fixes-for-managing-special-cases-of-unmanaged-resour.patch -Patch31: bz1268801-Fixes-for-managing-special-cases-of-unmanaged-resour-2.patch -Patch32: bz1272412-01-fix-setting-cluster-properties-in-web-UI.patch - +Source2: pcsd-bundle-config-1 +Source3: https://rubygems.org/downloads/backports-3.6.8.gem +Source4: https://rubygems.org/downloads/eventmachine-1.2.0.1.gem +Source5: https://rubygems.org/downloads/multi_json-1.12.0.gem +Source6: https://rubygems.org/downloads/open4-1.3.4.gem +Source7: https://rubygems.org/downloads/orderedhash-0.0.6.gem +Source8: https://rubygems.org/downloads/rack-protection-1.5.3.gem +Source9: https://rubygems.org/downloads/rack-test-0.6.3.gem +Source10: https://rubygems.org/downloads/rack-1.6.4.gem +Source11: https://rubygems.org/downloads/rpam-ruby19-1.2.1.gem +Source12: https://rubygems.org/downloads/sinatra-contrib-1.4.7.gem +Source13: https://rubygems.org/downloads/sinatra-1.4.7.gem +Source14: https://rubygems.org/downloads/tilt-2.0.3.gem +Source15: https://github.com/testing-cabal/mock/archive/1.0.1.tar.gz#/mock-1.0.1.tar.gz + +Patch0: bz1315371-01-add-support-for-pacemaker-alerts.patch +Patch1: bz1158805-01-add-support-for-qdevice-qnetd-provided-.patch +Patch2: bz1164402-01-sbd-fix-call_node-calls-on-python3.patch +Patch3: bz1346852-01-fix-bad-request-when-resource-removal-t.patch +Patch4: bz1327739-01-add-pcs-quorum-expected-votes-command.patch +Patch5: bz1348579-01-add-a-wrapper-for-holding-SELinux-conte.patch +Patch6: bz1349465-01-allow-to-specify-bash-completion-install-dir.patch +Patch7: fix-qdevice-tests-failing-due-to-multithreading.patch +Patch8: bz1281364-01-gui-add-constraint-colocation-set-support.patch +Patch9: bz1269242-01-fix-displaying-cluster-config-when-cib-is-provided-a.patch +Patch10: bz1353607-01-tests-use-safe-node-names.patch +Patch11: bz1315371-02-use-recipient-id-as-identifier-instead-of-its-value.patch +Patch12: bz1158805-01-cli-improve-quorum-device-commands-syntax.patch +Patch13: bz1289418-01-fixes-for-pcs-cli-running-on-a-remote-node.patch +Patch14: pcsd-fix-syntax-error-on-ruby-1.8.patch +Patch15: bz1348579-02-fix-traceback-when-stopping-pcsd-shortly-after-start.patch +Patch16: bz1225423-01-allow-to-remove-a-dead-node-from-a-cluster.patch +Patch17: bz1357945-01-add-support-for-clufter-s-dist-parameter.patch +Patch18: bz1302010-01-fix-filter-by-property-name-in-pcs-property-show.patch +Patch19: bz1301993-01-improve-node-properties-commands.patch +Patch20: bz1346852-02-web-UI-fix-error-when-removing-resources-takes-long.patch +Patch21: bz1231858-01-web-UI-fix-occasional-issue-with-not-showing-optiona.patch +Patch22: bz1315371-03-improve-alerts-help.patch +Patch23: bz1346852-03-web-UI-correct-handling-of-timeout-when-removing-mul.patch +Patch24: bz1303136-01-fix-check-if-id-exists-in-cib.patch +Patch25: bz1329472-01-when-removing-a-remote-node-remove-it-from-pacemakers-caches-as-well.patch +Patch26: test-fix-an-occasional-multithread-test-fail.patch +Patch27: bz1359154-01-fix-exceptions-when-authenticating-cluster-nodes.patch +Patch28: bz1231858-02-web-UI-don-t-change-current-resource-in-URL-if-not-i.patch +Patch29: bz1349465-02-install-bash-completion-with-standard-permissions.patch +Patch30: bz1357945-02-doc-fixes-regarding-clufter.patch +Patch31: bz1281391-01-web-UI-add-possibility-to-change-order-of-resources-.patch +Patch32: bz1264360-01-web-UI-add-support-for-unmanaged-resources.patch +Patch33: bz1308514-01-add-booth-support.patch +Patch34: bz1298585-01-add-possibility-to-hide-inactive-resources-in-pcs-resource-show.patch +Patch35: bz1354498-01-handle-exceptions-when-waiting-for-response-from-user.patch +Patch36: bz1346852-04-fix-detecting-nonexisting-resources-in-pcsd.patch +Patch37: bz1164402-02-sbd-fixes.patch +Patch38: bz1315371-04-alerts-related-fixes.patch +Patch39: bz1366307-01-web-ui-fix-bad-using-of-html-ids.patch +Patch40: bz1247088-01-fix-error-message-in-node-maintenanceunmaintenance-commands.patch +Patch41: bz1308514-02-booth-support-improvements.patch +Patch42: bz1164402-03-sbd-fix-check-if-atb-is-required-when-enabling-sbd.patch +Patch43: bz1264360-02-web-ui-change-way-of-displaying-status-of-unmanaged-primitive-resources.patch +Patch44: test-corrections.patch +Patch45: bz1158805-02-add-support-for-qdeviceqnetd-provided-by-corosync.patch +Patch46: bz1308514-03-wider-support-for-booth-configuration-beside-mere.patch +#forgotten patch (chronologically should be before Patch0 +Patch47: fix-pcs-constraint-ticket-set-help.patch +Patch48: bz1305049-01-pcs-does-not-support-ticket-constraints.patch +Patch49: bz1158500-01-add-support-for-utilization-attributes.patch +Patch50: bz1281391-02-web-ui-reset-selected-group-when-displaying-new-resource-dialog.patch +Patch51: bz1231858-03-resourcefence-agent-options-form-needs-an-overhau.patch +Patch52: bz1158805-03-add-support-for-qdeviceqnetd-provided-by-corosync.patch +Patch53: bz1305049-02-pcs-does-not-support-ticket-constraints.patch +Patch54: rhel7.patch +Patch55: change-cman-to-rhel6-in-messages.patch +Patch56: show-only-warning-when-crm_mon-xml-is-invalid.patch + +BuildRequires: python2-devel python-setuptools +BuildRequires: gcc gcc-c++ BuildRequires: ruby >= 2.0.0 ruby-devel rubygems pam-devel git +BuildRequires: rubygem-json BuildRequires: systemd-units rubygem-bundler +BuildRequires: rubygem-minitest +# following for UpdateTimestamps sanitization function +BuildRequires: diffstat +#following BuildRequires are needed by tests +BuildRequires: python-lxml +BuildRequires: corosync +BuildRequires: pacemaker +BuildRequires: pacemaker-cli +BuildRequires: fence-agents-all + Requires(post): systemd Requires(preun): systemd Requires(postun): systemd -Requires: pacemaker-cli corosync ruby >= 2.0.0 pacemaker python-clufter -Requires: psmisc initscripts openssl - -Provides: bundled(rubygem-backports) = 3.6.4 -Provides: bundled(rubygem-eventmachine) = 1.0.7 -Provides: bundled(rubygem-monkey-lib) = 0.5.4 -Provides: bundled(rubygem-multi_json) = 1.11.1 +Requires: corosync pacemaker pacemaker-cli +Requires: psmisc openssl +Requires: python-lxml +Requires: python-setuptools +Requires: python-clufter >= 0.59.0 +Requires: ruby >= 2.0.0 +Requires: rubygem-json + +Provides: bundled(rubygem-backports) = 3.6.8 +Provides: bundled(rubygem-eventmachine) = 1.2.0.1 +Provides: bundled(rubygem-multi_json) = 1.12.0 Provides: bundled(rubygem-open4) = 1.3.4 Provides: bundled(rubygem-orderedhash) = 0.0.6 Provides: bundled(rubygem-rack) = 1.6.4 Provides: bundled(rubygem-rack-protection) = 1.5.3 Provides: bundled(rubygem-rack-test) = 0.6.3 Provides: bundled(rubygem-rpam-ruby19) = 1.2.1 -Provides: bundled(rubygem-sinatra) = 1.4.6 -Provides: bundled(rubygem-sinatra-contrib) = 1.4.4 -Provides: bundled(rubygem-sinatra-sugar) = 0.5.1 -Provides: bundled(rubygem-tilt) = 1.4.1 +Provides: bundled(rubygem-sinatra) = 1.4.7 +Provides: bundled(rubygem-sinatra-contrib) = 1.4.7 +Provides: bundled(rubygem-tilt) = 2.0.3 %description pcs is a corosync and pacemaker configuration tool. It permits users to easily view, modify and created pacemaker based clusters. +%define PCS_PREFIX /usr %prep %autosetup -p1 -S git +# -- following borrowed from python-simplejon.el5 -- +# Update timestamps on the files touched by a patch, to avoid non-equal +# .pyc/.pyo files across the multilib peers within a build, where "Level" +# is the patch prefix option (e.g. -p1) +UpdateTimestamps() { + Level=$1 + PatchFile=$2 + # Locate the affected files: + for f in $(diffstat $Level -l $PatchFile); do + # Set the files to have the same timestamp as that of the patch: + touch -r $PatchFile $f + done +} +UpdateTimestamps -p1 %{PATCH0} +UpdateTimestamps -p1 %{PATCH1} +UpdateTimestamps -p1 %{PATCH2} +UpdateTimestamps -p1 %{PATCH3} +UpdateTimestamps -p1 %{PATCH4} +UpdateTimestamps -p1 %{PATCH5} +UpdateTimestamps -p1 %{PATCH6} +UpdateTimestamps -p1 %{PATCH7} +UpdateTimestamps -p1 %{PATCH8} +UpdateTimestamps -p1 %{PATCH9} +UpdateTimestamps -p1 %{PATCH10} +UpdateTimestamps -p1 %{PATCH11} +UpdateTimestamps -p1 %{PATCH12} +UpdateTimestamps -p1 %{PATCH13} +UpdateTimestamps -p1 %{PATCH14} +UpdateTimestamps -p1 %{PATCH15} +UpdateTimestamps -p1 %{PATCH16} +UpdateTimestamps -p1 %{PATCH17} +UpdateTimestamps -p1 %{PATCH18} +UpdateTimestamps -p1 %{PATCH19} +UpdateTimestamps -p1 %{PATCH20} +UpdateTimestamps -p1 %{PATCH21} +UpdateTimestamps -p1 %{PATCH22} +UpdateTimestamps -p1 %{PATCH23} +UpdateTimestamps -p1 %{PATCH24} +UpdateTimestamps -p1 %{PATCH25} +UpdateTimestamps -p1 %{PATCH26} +UpdateTimestamps -p1 %{PATCH27} +UpdateTimestamps -p1 %{PATCH28} +UpdateTimestamps -p1 %{PATCH29} +UpdateTimestamps -p1 %{PATCH30} +UpdateTimestamps -p1 %{PATCH31} +UpdateTimestamps -p1 %{PATCH32} +UpdateTimestamps -p1 %{PATCH33} +UpdateTimestamps -p1 %{PATCH34} +UpdateTimestamps -p1 %{PATCH35} +UpdateTimestamps -p1 %{PATCH36} +UpdateTimestamps -p1 %{PATCH37} +UpdateTimestamps -p1 %{PATCH38} +UpdateTimestamps -p1 %{PATCH39} +UpdateTimestamps -p1 %{PATCH40} +UpdateTimestamps -p1 %{PATCH41} +UpdateTimestamps -p1 %{PATCH42} +UpdateTimestamps -p1 %{PATCH43} +UpdateTimestamps -p1 %{PATCH44} +UpdateTimestamps -p1 %{PATCH45} +UpdateTimestamps -p1 %{PATCH46} +UpdateTimestamps -p1 %{PATCH47} +UpdateTimestamps -p1 %{PATCH48} +UpdateTimestamps -p1 %{PATCH49} +UpdateTimestamps -p1 %{PATCH50} +UpdateTimestamps -p1 %{PATCH51} +UpdateTimestamps -p1 %{PATCH52} +UpdateTimestamps -p1 %{PATCH53} +UpdateTimestamps -p1 %{PATCH54} + cp -f %SOURCE1 pcsd/public/images -cp -f %SOURCE2 pcsd/public - + +mkdir -p pcsd/.bundle +cp -f %SOURCE2 pcsd/.bundle/config + +mkdir -p pcsd/vendor/cache +#copy ruby gems +cp -f %SOURCE3 pcsd/vendor/cache +cp -f %SOURCE4 pcsd/vendor/cache +cp -f %SOURCE5 pcsd/vendor/cache +cp -f %SOURCE6 pcsd/vendor/cache +cp -f %SOURCE7 pcsd/vendor/cache +cp -f %SOURCE8 pcsd/vendor/cache +cp -f %SOURCE9 pcsd/vendor/cache +cp -f %SOURCE10 pcsd/vendor/cache +cp -f %SOURCE11 pcsd/vendor/cache +cp -f %SOURCE12 pcsd/vendor/cache +cp -f %SOURCE13 pcsd/vendor/cache +cp -f %SOURCE14 pcsd/vendor/cache +#ruby gems copied + %build %install rm -rf $RPM_BUILD_ROOT pwd -make install DESTDIR=$RPM_BUILD_ROOT PYTHON_SITELIB=%{python_sitelib} -make install_pcsd DESTDIR=$RPM_BUILD_ROOT PYTHON_SITELIB=%{python_sitelib} hdrdir="%{_includedir}" rubyhdrdir="%{_includedir}" includedir="%{_includedir}" -chmod 755 $RPM_BUILD_ROOT/%{python_sitelib}/pcs/pcs.py - -# Temporary fix for ruby-2.0.0 and rpam -#cp $RPM_BUILD_ROOT/usr/lib/pcsd/gemhome/gems/rpam-ruby19-1.2.1/ext/Rpam/rpam_ext.so $RPM_BUILD_ROOT/usr/lib/pcsd/gemhome/gems/rpam-ruby19-1.2.1/lib +make install \ + DESTDIR=$RPM_BUILD_ROOT \ + PYTHON_SITELIB=%{python_sitelib} \ + PREFIX=%{PCS_PREFIX} \ + BASH_COMPLETION_DIR=$RPM_BUILD_ROOT/usr/share/bash-completion/completions +make install_pcsd \ + DESTDIR=$RPM_BUILD_ROOT \ + PYTHON_SITELIB=%{python_sitelib} \ + hdrdir="%{_includedir}" \ + rubyhdrdir="%{_includedir}" \ + includedir="%{_includedir}" \ + PREFIX=%{PCS_PREFIX} + +%check +run_all_tests(){ + #prepare environmet for tests + sitelib=$RPM_BUILD_ROOT%{python_sitelib} + pcsd_dir=$RPM_BUILD_ROOT%{PCS_PREFIX}/lib/pcsd + + #run pcs tests and remove them, we do not distribute them in rpm + #python2-mock package is required but is only in epel so we will install it + #manually + #we do not have permissions to write anywhere else than $RPM_BUILD_ROOT + #so we must install python2-mock there + #disabled tests: + #pcs.test.test_lib_external.ParallelCommunicationHelperTest.test_success \ + # File "/builddir/build/BUILDROOT/pcs-0.9.152-5.el7.x86_64/usr/lib/python2.7/site-packages/pcs/test/test_lib_external.py", line 865, in test_success + # func.assert_has_calls(expected_calls)... + # Expected: [call(0, a=0), call(1, a=2), call(2, a=4)] + # Actual: [call(1, a=2), call(0, a=0), call(2, a=4)] + # + #pcs.lib.booth.test.test_env.SetKeyfileAccessTest.test_set_desired_file_access \ + # Traceback (most recent call last): + # File "/builddir/build/BUILDROOT/pcs-0.9.152-6.el7.x86_64/usr/lib/python2.7/site-packages/pcs/lib/booth/test/test_env.py", line 148, in test_set_desired_file_access + # env.set_keyfile_access(file_path) + # File "/builddir/build/BUILDROOT/pcs-0.9.152-6.el7.x86_64/usr/lib/python2.7/site-packages/pcs/lib/booth/env.py", line 63, in set_keyfile_access + # raise report_keyfile_io_error(file_path, "chown", e) + # LibraryError: ERROR FILE_IO_ERROR: {u'reason': u"Operation not permitted: '/builddir/build/BUILDROOT/pcs-0.9.152-6.el7.x86_64/usr/lib/python2.7/site-packages/pcs/test/resources/temp-keyfile'", u'file_role': u'BOOTH_KEY', u'file_path': u'/builddir/build/BUILDROOT/pcs-0.9.152-6.el7.x86_64/usr/lib/python2.7/site-packages/pcs/test/resources/temp-keyfile', u'operation': u'chown'} + + export PYTHONPATH="${PYTHONPATH}:${sitelib}" + easy_install -d ${sitelib} %SOURCE15 + python ${sitelib}/pcs/test/suite.py -v --no-color --all-but \ + pcs.test.test_resource.ResourceTest.testAddResources \ + pcs.test.test_cluster.ClusterTest.testUIDGID \ + pcs.test.test_utils.RunParallelTest.test_wait_for_slower_workers \ + pcs.test.test_stonith.StonithTest.test_stonith_create_provides_unfencing \ + pcs.test.test_lib_external.ParallelCommunicationHelperTest.test_success \ + pcs.lib.booth.test.test_env.SetKeyfileAccessTest.test_set_desired_file_access \ + + test_result_python=$? + + find ${sitelib}/pcs -name test -type d -print0|xargs -0 rm -r -v -- + #we installed python2-mock inside $RPM_BUILD_ROOT and now we need to remove + #it because it does not belong into pcs package + #easy_install does not provide uninstall and pip is not an option (is in + #epel) so it must be cleaned manually + rm -v ${sitelib}/easy-install.pth + rm -v ${sitelib}/mock-1.0.1-py2.7.egg + rm -v ${sitelib}/site.py + rm -v ${sitelib}/site.pyc + + + #run pcsd tests and remove them + GEM_HOME=${pcsd_dir}/vendor/bundle/ruby ruby \ + -I${pcsd_dir} \ + -I${pcsd_dir}/test \ + ${pcsd_dir}/test/test_all_suite.rb + test_result_ruby=$? + #remove tests after use here to be symmetrical with pcs tests + rm -r -v ${pcsd_dir}/test + + if [ $test_result_python -ne 0 ]; then + return $test_result_python + fi + return $test_result_ruby +} + +run_all_tests %post %systemd_post pcsd.service @@ -99,27 +316,171 @@ chmod 755 $RPM_BUILD_ROOT/%{python_sitelib}/pcs/pcs.py %systemd_postun_with_restart pcsd.service %files -%defattr(-,root,root,-) %{python_sitelib}/pcs %{python_sitelib}/pcs-%{version}-py2.*.egg-info /usr/sbin/pcs /usr/lib/pcsd/* /usr/lib/pcsd/.bundle/config /usr/lib/systemd/system/pcsd.service +/usr/share/bash-completion/completions/pcs /var/lib/pcsd /etc/pam.d/pcsd -/etc/bash_completion.d/pcs /etc/logrotate.d/pcsd %dir /var/log/pcsd -/etc/sysconfig/pcsd +%config(noreplace) /etc/sysconfig/pcsd +%ghost %config(noreplace) /var/lib/pcsd/cfgsync_ctl +%ghost %config(noreplace) /var/lib/pcsd/pcsd.cookiesecret +%ghost %config(noreplace) /var/lib/pcsd/pcsd.crt +%ghost %config(noreplace) /var/lib/pcsd/pcsd.key +%ghost %config(noreplace) /var/lib/pcsd/pcs_settings.conf +%ghost %config(noreplace) /var/lib/pcsd/pcs_users.conf +%ghost %config(noreplace) /var/lib/pcsd/tokens %{_mandir}/man8/pcs.* %exclude /usr/lib/pcsd/*.debian +%exclude /usr/lib/pcsd/pcsd.service +%exclude /usr/lib/pcsd/pcsd.conf +%exclude %{python_sitelib}/pcs/bash_completion.sh +%exclude %{python_sitelib}/pcs/pcs.8 +%exclude %{python_sitelib}/pcs/pcs %doc COPYING README %changelog -* Tue Aug 23 2016 Johnny Hughes <johnny@centos.org> - 0.9.143-15.el7.centos -- Roll in CentOS Branding (centos bug #9426) +* Tue Sep 20 2016 Ivan Devat <idevat@redhat.com> - 0.9.152-10 +- Fixed error when stopping qdevice if is not running +- Fixed removing qdevice from a cluster +- Fixed documentation regarding booth +- Fixed return code when no matching ticket constraint found during remove +- Resolves: rhbz#1158805 rhbz#1305049 + +* Wed Sep 14 2016 Ivan Devat <idevat@redhat.com> - 0.9.152-9 +- Added warning when stopping/destroying qdevice instance which is being used +- Fiexed removing qdevice from a cluster which uses sbd +- Fixed re-running "pcs cluster node add" if it failed due to qdevice +- Fixed documentation regarding booth +- Added warning when using unknown booth ticket option +- Added constraint ticket remove command +- Fixed return code and message when displaying node utilization for nonexistent node +- Fixed setting utilization attributes in web UI +- Fixed support for node utilization on remote node +- Fixed updating of selected group when displaying new resource dialog +- Fixed group list when managing cluster running older pcs in web UI +- Fixed displaying unmanaged status for resources for older pcs in web UI +- Fixed clone/master/unclone group/ungroup buttons for older pcs in web UI +- Fixed node standby/unstandby for older pcs in web UI +- Resolves: rhbz#1158805 rhbz#1308514 rhbz#1305049 rhbz#1158500 rhbz#1231858 + +* Wed Aug 31 2016 Ivan Devat <idevat@redhat.com> - 0.9.152-8 +- Fixed error message in node maintenance/unmaintenance commands +- Fixed missing line at the end of booth config +- Fixed documentation regarding booth +- Fixed remove multiple booth resources with "--force" flag +- Fixed cleanup of ip resource if it fails to create booth resource +- Added bash completion for booth +- Fixed display full booth configuration +- Added ability to display booth config from remote node +- Added support for ticket options during adding booth ticket +- Fixed adding node to cluster when booth is not installed +- Added restart command for booth +- Fixed check if auto_tie_breaker is required when enabling sbd +- Improved way of displaying status of unmanaged primitive resources in web UI +- Resolves: rhbz#1247088 rhbz#1308514 rhbz#1164402 rhbz#1264360 + +* Fri Aug 19 2016 Ivan Devat <idevat@redhat.com> - 0.9.152-7 +- Added possibility to hide inactive resources in "pcs resource show" command +- Fixed exceptions handling when waiting for response from user in command line +- Fixed nonexisting resource detection in pcsd +- Fixed SBD_WATCHDOG_TIMEOUT option value validation +- Removed possibility to change SBD_PACEMAKER +- Fixed exception when disabling service on systemd systems +- Added automatic auto_tie_breaker quorum option set whenever it is needed for SBD to work +- Fixed setting sbd watchdog in config +- Fixed error handling when upgrading cib schema +- Improved consistency of syntax 'pcs alert recipient add' command +- Resolves: rhbz#1298585 rhbz#1354498 rhbz#1346852 rhbz#1164402 rhbz#1315371 rhbz#1366307 + +* Fri Aug 05 2016 Ivan Devat <idevat@redhat.com> - 0.9.152-6 +- Fixed documentation regarding clufter +- Added possibility to change order of resources in a group in web UI +- Added support for unmanaged resources in web UI +- Added support for booth (cluster ticket manager) +- Resolves: rhbz#1357945 rhbz#1281391 rhbz#1264360 rhbz#1308514 + +* Thu Jul 28 2016 Ivan Devat <idevat@redhat.com> - 0.9.152-5 +- Fixed traceback when stopping pcsd shortly after start +- Fixed removing a dead node from a cluster +- Added support for clufter's 'dist' parameter +- Fixed filtering by property name in "pcs property show" +- Fixed an error in web UI when removing resources takes a long time +- Fixed occasional missing optional arguments of resources in web UI +- Improved help for alerts +- Fixed recreating a remote node resource +- Fixed exceptions when authenticating cluster nodes +- Fixed permissions for bash completion file +- Resolves: rhbz#1348579 rhbz#1225423 rhbz#1357945 rhbz#1302010 rhbz#1301993 rhbz#1346852 rhbz#1231858 rhbz#1315371 rhbz#1303136 rhbz#1329472 rhbz#1359154 rhbz#1349465 + +* Fri Jul 15 2016 Ivan Devat <idevat@redhat.com> - 0.9.152-4 +- Added colocation constraint support in web UI +- Fixed displaying cluster config when cib is provided as a file +- Removed side effect on /etc/hosts during build +- Recipient id is used as identifier in alarms +- Improved quorum device commands syntax +- Fixed pcs client for running on a remote node +- Resolves: rhbz#1281364 rhbz#1269242 rhbz#1353607 rhbz#1315371 rhbz#1158805 rhbz#1289418 + +* Fri Jul 01 2016 Ivan Devat <idevat@redhat.com> - 0.9.152-3 +- Added support for pacemaker alerts +- Added support for qdevice/qnetd provided by corosync +- Fixed sbd calls on python3 +- Fixed bad request when resource removal takes longer than pcs expects +- Added support for set expected votes on a live cluster +- Added a wrapper for holding SELinux context when pcsd is started by systemd +- Resolves: rhbz#1315371 rhbz#1158805 rhbz#1164402 rhbz#1346852 rhbz#1327739 rhbz#1348579 rhbz#1349465 + +* Wed Jun 22 2016 Ivan Devat <idevat@redhat.com> - 0.9.152-2 +- Specified achitectures matching with pacemaker and corosync +- Resolves: rhbz#1299847 + +* Tue Jun 21 2016 Ivan Devat <idevat@redhat.com> - 0.9.152-1 +- Rebased to latest upstream sources +- Added support for sbd configuration +- Added support for constraint tickets in web UI +- Added warning to pcs quorum unblock command +- Fixes in manpage and built-in help +- Config files marked as config +- Resolves: rhbz#1299847 rhbz#1164402 rhbz#1305049 rhbz#1264566 rhbz#1225946 rhbz#1231858 rhbz#1328066 rhbz#1341114 + +* Fri Jun 03 2016 Ivan Devat <idevat@redhat.com> - 0.9.151-2 +- Added missing requirements for python-setuptools +- Resolves: rhbz#1299847 + +* Tue May 31 2016 Ivan Devat <idevat@redhat.com> - 0.9.151-1 +- Rebased to latest upstream sources +- Added support for utilization attributes +- Optimized pcs status command +- Fixes in manpage and built-in help +- Improved resource cleanups +- Added --wait support for cluster start and node standby commands +- Improved resource and fence agent options in web UI +- Added ability to put a node into maintenance mode +- Fixed adding acl permission when targed id does not exists +- Fixed deleting resource when referenced in acl +- Improved pcsd launch script +- Added automatically setting provides=unfencing meta attribute for stonith device +- Improved Cluster Properties page in web UI +- Fixed page update after adding group in web UI +- Fixed deleting group (clones) when managing older cluster in web UI +- Fixed stonith update command when fence agents fails to get metadata +- Added support for putting Pacemaker Remote nodes into standby +- Added support for omission stopped resources in status command +- Added login input sanitization in web UI +- Added config settings for SSL options and ciphers +- Improved resource update command to inform user about missused op settings +- Spec file fixes +- Added support for constraint tickets from command line +- Fixed CVE-2016-0720 pcs: Cross-Site Request Forgery in web UI +- Fixed CVE-2016-0721 pcs: cookies are not invalidated upon logout +- Resolves: rhbz#1299847 rhbz#1158500 rhbz#1207405 rhbz#1219581 rhbz#1225946 rhbz#1220512 rhbz#1229822 rhbz#1231858 rhbz#1247088 rhbz#1248990 rhbz#1249085 rhbz#1252050 rhbz#1262773 rhbz#1281371 rhbz#1283562 rhbz#1286664 rhbz#1287320 rhbz#1290512 rhbz#1298585 rhbz#1305786 rhbz#1315652 rhbz#1321021 rhbz#1315743 rhbz#1315357 rhbz#1305049 rhbz#1335779 rhbz#1330884 * Wed Oct 21 2015 Tomas Jelinek <tojeline@redhat.com> - 0.9.143-15 - Fixed setting cluster properties in web UI