diff --git a/.gitignore b/.gitignore index 09fb27e..f460c9e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,15 @@ SOURCES/HAM-logo.png -SOURCES/pcs-withgems-0.9.143.tar.gz +SOURCES/backports-3.6.8.gem +SOURCES/eventmachine-1.2.0.1.gem +SOURCES/mock-1.0.1.tar.gz +SOURCES/multi_json-1.12.0.gem +SOURCES/open4-1.3.4.gem +SOURCES/orderedhash-0.0.6.gem +SOURCES/pcs-0.9.152.tar.gz +SOURCES/rack-1.6.4.gem +SOURCES/rack-protection-1.5.3.gem +SOURCES/rack-test-0.6.3.gem +SOURCES/rpam-ruby19-1.2.1.gem +SOURCES/sinatra-1.4.7.gem +SOURCES/sinatra-contrib-1.4.7.gem +SOURCES/tilt-2.0.3.gem diff --git a/.pcs.metadata b/.pcs.metadata index 2a2ec1f..68b2d71 100644 --- a/.pcs.metadata +++ b/.pcs.metadata @@ -1,3 +1,15 @@ -9c06bb646aba6330d4d85fe08415cdd2276fe918 SOURCES/HAM-logo.png -062c9973625dced9a54a2f83a7baf7696ac37d60 SOURCES/favicon.ico -f4cfd8dd9ffdc4ce13a9b6946008ded2e1676709 SOURCES/pcs-withgems-0.9.143.tar.gz +80dc7788a3468fb7dd362a4b8bedd9efb373de89 SOURCES/HAM-logo.png +5c9dd0d5552d242ee6bb338a9097e85f0a0a45d5 SOURCES/backports-3.6.8.gem +60b6f1d8391cd374c6a2ef3977cb1397ed89055a SOURCES/eventmachine-1.2.0.1.gem +baa3446eb63557a24c4522dc5a61cfad082fa395 SOURCES/mock-1.0.1.tar.gz +46156f5a4ff17a23c15d0d2f0fc84cb5627ac70d SOURCES/multi_json-1.12.0.gem +41a7fe9f8e3e02da5ae76c821b89c5b376a97746 SOURCES/open4-1.3.4.gem +709cc95025009e5d221e37cb0777e98582146809 SOURCES/orderedhash-0.0.6.gem +2808df782cd1d269e1d94c36a52573023128c0a0 SOURCES/pcs-0.9.152.tar.gz +0a1eea6d7bb903d8c075688534480e87d4151470 SOURCES/rack-1.6.4.gem +1c28529c1d7376c61faed80f3d3297905a14c2b3 SOURCES/rack-protection-1.5.3.gem +6fd5a7f881a65ef93b66e21556ef67fbe08a2fcc SOURCES/rack-test-0.6.3.gem +a90e5a60d99445404a3c29a66d953a5e9918976d SOURCES/rpam-ruby19-1.2.1.gem +1c7f1ad8af670f4990373ebddb4d9fecd8f3c7d1 SOURCES/sinatra-1.4.7.gem +83742328f21b684d6ce6c4747710c6e975b608e7 SOURCES/sinatra-contrib-1.4.7.gem +49bee6e8614c1e991c1156150b0a2eaa28868f8d SOURCES/tilt-2.0.3.gem diff --git a/SOURCES/bz1122818-01-fix-resource-relocation-of-globally-unique-clones.patch b/SOURCES/bz1122818-01-fix-resource-relocation-of-globally-unique-clones.patch deleted file mode 100644 index e76425e..0000000 --- a/SOURCES/bz1122818-01-fix-resource-relocation-of-globally-unique-clones.patch +++ /dev/null @@ -1,34 +0,0 @@ -From a4fa532d6c1091caf94d64c95c5625738aa1ebf3 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Wed, 12 Aug 2015 13:36:27 +0200 -Subject: [PATCH] fix resource relocation of globally-unique clones - ---- - pcs/test/test_utils.py | 57 +++++++++++++++++++++++++++++++++++++++++++++++--- - pcs/utils.py | 9 +++++++- - 2 files changed, 62 insertions(+), 4 deletions(-) - -diff --git a/pcs/utils.py b/pcs/utils.py -index d61ff44..740ff04 100644 ---- a/pcs/utils.py -+++ b/pcs/utils.py -@@ -2044,8 +2044,15 @@ def get_resources_location_from_operations(cib_dom, resources_operations): - continue - long_id = res_op["long_id"] - if long_id not in locations: -+ # Move clone instances as if they were non-cloned resources, it -+ # really works with current pacemaker (1.1.13-6). Otherwise there -+ # is probably no way to move them other then setting their -+ # stickiness to 0. -+ res_id = res_op["id"] -+ if ":" in res_id: -+ res_id = res_id.split(":")[0] - id_for_constraint = validate_constraint_resource( -- cib_dom, res_op["id"] -+ cib_dom, res_id - )[2] - if not id_for_constraint: - continue --- -1.9.1 - diff --git a/SOURCES/bz1122818-02-fix-resource-relocate-for-remote-nodes.patch b/SOURCES/bz1122818-02-fix-resource-relocate-for-remote-nodes.patch deleted file mode 100644 index adca66c..0000000 --- a/SOURCES/bz1122818-02-fix-resource-relocate-for-remote-nodes.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 5f6b6c657f2a88985baf02d24a2de8dafa8ec736 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Tue, 25 Aug 2015 13:08:46 +0200 -Subject: [PATCH] fix resource relocate for remote nodes - ---- - pcs/test/test_utils.py | 69 +++++++++++++++++++++++++++ - pcs/test/transitions02.xml | 116 +++++++++++++++++++++++++++++++++++++++++++++ - pcs/utils.py | 8 ++-- - 3 files changed, 190 insertions(+), 3 deletions(-) - create mode 100644 pcs/test/transitions02.xml - -diff --git a/pcs/utils.py b/pcs/utils.py -index 740ff04..cd33a27 100644 ---- a/pcs/utils.py -+++ b/pcs/utils.py -@@ -2014,7 +2014,9 @@ def simulate_cib(cib_dom): - - def get_operations_from_transitions(transitions_dom): - operation_list = [] -- watched_operations = ("start", "stop", "promote", "demote") -+ watched_operations = ( -+ "start", "stop", "promote", "demote", "migrate_from", "migrate_to" -+ ) - for rsc_op in transitions_dom.getElementsByTagName("rsc_op"): - primitives = rsc_op.getElementsByTagName("primitive") - if not primitives: -@@ -2040,7 +2042,7 @@ def get_resources_location_from_operations(cib_dom, resources_operations): - locations = {} - for res_op in resources_operations: - operation = res_op["operation"] -- if operation not in ("start", "promote"): -+ if operation not in ("start", "promote", "migrate_from"): - continue - long_id = res_op["long_id"] - if long_id not in locations: -@@ -2061,7 +2063,7 @@ def get_resources_location_from_operations(cib_dom, resources_operations): - "long_id": long_id, - "id_for_constraint": id_for_constraint, - } -- if operation == "start": -+ if operation in ("start", "migrate_from"): - locations[long_id]["start_on_node"] = res_op["on_node"] - if operation == "promote": - locations[long_id]["promote_on_node"] = res_op["on_node"] --- -1.9.1 - diff --git a/SOURCES/bz1158500-01-add-support-for-utilization-attributes.patch b/SOURCES/bz1158500-01-add-support-for-utilization-attributes.patch new file mode 100644 index 0000000..4907fad --- /dev/null +++ b/SOURCES/bz1158500-01-add-support-for-utilization-attributes.patch @@ -0,0 +1,357 @@ +From 1b6ed4d97198e7ca8c1fd5f76bfb8bfc95eeabdc Mon Sep 17 00:00:00 2001 +From: Ivan Devat +Date: Wed, 14 Sep 2016 09:37:06 +0200 +Subject: [PATCH] squash bz1158500 add support for utilization attri + +4ab84628f802 fix parsing of utilization attributes + +18d526f59679 support utilization on (non-cib) remote node + +f0b193a681e3 show error when show utilizat. on nonexistent node + +9907c123c225 web UI: fix setting utilization attributes +--- + .pylintrc | 2 +- + pcs/node.py | 54 ++++++++++++++++++++++++++++++++++++----- + pcs/resource.py | 8 ++++--- + pcs/test/test_node.py | 56 +++++++++++++++++++++++++++++++++++++++++++ + pcs/test/test_resource.py | 18 ++++++++++++++ + pcs/test/test_utils.py | 17 +++++++++---- + pcs/utils.py | 12 +++++++++- + pcsd/public/js/nodes-ember.js | 4 ++-- + pcsd/remote.rb | 2 +- + 9 files changed, 155 insertions(+), 18 deletions(-) + +diff --git a/.pylintrc b/.pylintrc +index 1dd6d5d..6101381 100644 +--- a/.pylintrc ++++ b/.pylintrc +@@ -92,7 +92,7 @@ dummy-variables-rgx=_$|dummy + + [FORMAT] + # Maximum number of lines in a module +-max-module-lines=4584 ++max-module-lines=4616 + # Maximum number of characters on a single line. + max-line-length=1291 + +diff --git a/pcs/node.py b/pcs/node.py +index ed77d5d..729ea35 100644 +--- a/pcs/node.py ++++ b/pcs/node.py +@@ -56,7 +56,10 @@ def node_cmd(argv): + elif len(argv) == 1: + print_node_utilization(argv.pop(0), filter_name=filter_name) + else: +- set_node_utilization(argv.pop(0), argv) ++ try: ++ set_node_utilization(argv.pop(0), argv) ++ except CmdLineInputError as e: ++ utils.exit_on_cmdline_input_errror(e, "node", "utilization") + # pcs-to-pcsd use only + elif sub_cmd == "pacemaker-status": + node_pacemaker_status() +@@ -150,17 +153,56 @@ def set_node_utilization(node, argv): + cib = utils.get_cib_dom() + node_el = utils.dom_get_node(cib, node) + if node_el is None: +- utils.err("Unable to find a node: {0}".format(node)) ++ if utils.usefile: ++ utils.err("Unable to find a node: {0}".format(node)) + +- utils.dom_update_utilization( +- node_el, utils.convert_args_to_tuples(argv), "nodes-" +- ) ++ for attrs in utils.getNodeAttributesFromPacemaker(): ++ if attrs.name == node and attrs.type == "remote": ++ node_attrs = attrs ++ break ++ else: ++ utils.err("Unable to find a node: {0}".format(node)) ++ ++ nodes_section_list = cib.getElementsByTagName("nodes") ++ if len(nodes_section_list) == 0: ++ utils.err("Unable to get nodes section of cib") ++ ++ dom = nodes_section_list[0].ownerDocument ++ node_el = dom.createElement("node") ++ node_el.setAttribute("id", node_attrs.id) ++ node_el.setAttribute("type", node_attrs.type) ++ node_el.setAttribute("uname", node_attrs.name) ++ nodes_section_list[0].appendChild(node_el) ++ ++ utils.dom_update_utilization(node_el, prepare_options(argv), "nodes-") + utils.replace_cib_configuration(cib) + + def print_node_utilization(filter_node=None, filter_name=None): + cib = utils.get_cib_dom() ++ ++ node_element_list = cib.getElementsByTagName("node") ++ ++ ++ if( ++ filter_node ++ and ++ filter_node not in [ ++ node_element.getAttribute("uname") ++ for node_element in node_element_list ++ ] ++ and ( ++ utils.usefile ++ or ++ filter_node not in [ ++ node_attrs.name for node_attrs ++ in utils.getNodeAttributesFromPacemaker() ++ ] ++ ) ++ ): ++ utils.err("Unable to find a node: {0}".format(filter_node)) ++ + utilization = {} +- for node_el in cib.getElementsByTagName("node"): ++ for node_el in node_element_list: + node = node_el.getAttribute("uname") + if filter_node is not None and node != filter_node: + continue +diff --git a/pcs/resource.py b/pcs/resource.py +index 74adac6..046a826 100644 +--- a/pcs/resource.py ++++ b/pcs/resource.py +@@ -191,7 +191,10 @@ def resource_cmd(argv): + elif len(argv) == 1: + print_resource_utilization(argv.pop(0)) + else: +- set_resource_utilization(argv.pop(0), argv) ++ try: ++ set_resource_utilization(argv.pop(0), argv) ++ except CmdLineInputError as e: ++ utils.exit_on_cmdline_input_errror(e, "resource", "utilization") + elif (sub_cmd == "get_resource_agent_info"): + get_resource_agent_info(argv) + else: +@@ -2795,8 +2798,7 @@ def set_resource_utilization(resource_id, argv): + resource_el = utils.dom_get_resource(cib, resource_id) + if resource_el is None: + utils.err("Unable to find a resource: {0}".format(resource_id)) +- +- utils.dom_update_utilization(resource_el, utils.convert_args_to_tuples(argv)) ++ utils.dom_update_utilization(resource_el, prepare_options(argv)) + utils.replace_cib_configuration(cib) + + def print_resource_utilization(resource_id): +diff --git a/pcs/test/test_node.py b/pcs/test/test_node.py +index 9b45e07..137c7c7 100644 +--- a/pcs/test/test_node.py ++++ b/pcs/test/test_node.py +@@ -7,7 +7,9 @@ from __future__ import ( + + import shutil + from pcs.test.tools import pcs_unittest as unittest ++from pcs.test.tools.pcs_unittest import mock + ++from pcs import node + from pcs.test.tools.assertions import AssertPcsMixin + from pcs.test.tools.misc import ( + ac, +@@ -268,6 +270,20 @@ Node Utilization: + self.assertEqual(0, returnVal) + + def test_node_utilization_set_invalid(self): ++ output, returnVal = pcs(temp_cib, "node utilization rh7-1 test") ++ expected_out = """\ ++Error: missing value of 'test' option ++""" ++ ac(expected_out, output) ++ self.assertEqual(1, returnVal) ++ ++ output, returnVal = pcs(temp_cib, "node utilization rh7-1 =10") ++ expected_out = """\ ++Error: missing key in '=10' option ++""" ++ ac(expected_out, output) ++ self.assertEqual(1, returnVal) ++ + output, returnVal = pcs(temp_cib, "node utilization rh7-0 test=10") + expected_out = """\ + Error: Unable to find a node: rh7-0 +@@ -524,3 +540,43 @@ Node Attributes: + "node attribute rh7-1 missing= --force", + "" + ) ++ ++class SetNodeUtilizationTest(unittest.TestCase, AssertPcsMixin): ++ def setUp(self): ++ shutil.copy(empty_cib, temp_cib) ++ self.pcs_runner = PcsRunner(temp_cib) ++ ++ def test_refuse_non_option_attribute_parameter_among_options(self): ++ self.assert_pcs_fail("node utilization rh7-1 net", [ ++ "Error: missing value of 'net' option", ++ ]) ++ ++ def test_refuse_option_without_key(self): ++ self.assert_pcs_fail("node utilization rh7-1 =1", [ ++ "Error: missing key in '=1' option", ++ ]) ++ ++class PrintNodeUtilizationTest(unittest.TestCase, AssertPcsMixin): ++ def setUp(self): ++ shutil.copy(empty_cib, temp_cib) ++ self.pcs_runner = PcsRunner(temp_cib) ++ ++ @mock.patch("pcs.node.utils") ++ def test_refuse_when_node_not_in_cib_and_is_not_remote(self, mock_utils): ++ mock_cib = mock.MagicMock() ++ mock_cib.getElementsByTagName = mock.Mock(return_value=[]) ++ ++ mock_utils.get_cib_dom = mock.Mock(return_value=mock_cib) ++ mock_utils.usefile = False ++ mock_utils.getNodeAttributesFromPacemaker = mock.Mock(return_value=[]) ++ mock_utils.err = mock.Mock(side_effect=SystemExit) ++ ++ self.assertRaises( ++ SystemExit, ++ lambda: node.print_node_utilization("some") ++ ) ++ ++ def test_refuse_when_node_not_in_mocked_cib(self): ++ self.assert_pcs_fail("node utilization some_nonexistent_node", [ ++ "Error: Unable to find a node: some_nonexistent_node", ++ ]) +diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py +index 87a7fa8..d32cfb4 100644 +--- a/pcs/test/test_resource.py ++++ b/pcs/test/test_resource.py +@@ -4430,6 +4430,24 @@ Resource Utilization: + self.assertEqual(0, returnVal) + + def test_resource_utilization_set_invalid(self): ++ output, returnVal = pcs( ++ temp_large_cib, "resource utilization dummy test" ++ ) ++ expected_out = """\ ++Error: missing value of 'test' option ++""" ++ ac(expected_out, output) ++ self.assertEqual(1, returnVal) ++ ++ output, returnVal = pcs( ++ temp_large_cib, "resource utilization dummy =10" ++ ) ++ expected_out = """\ ++Error: missing key in '=10' option ++""" ++ ac(expected_out, output) ++ self.assertEqual(1, returnVal) ++ + output, returnVal = pcs(temp_large_cib, "resource utilization dummy0") + expected_out = """\ + Error: Unable to find a resource: dummy0 +diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py +index 252de30..c4c6d87 100644 +--- a/pcs/test/test_utils.py ++++ b/pcs/test/test_utils.py +@@ -1400,12 +1400,12 @@ class UtilsTest(unittest.TestCase): + """).documentElement + self.assertRaises( + SystemExit, +- utils.dom_update_utilization, el, [("name", "invalid_val")] ++ utils.dom_update_utilization, el, {"name": "invalid_val"} + ) + + self.assertRaises( + SystemExit, +- utils.dom_update_utilization, el, [("name", "0.01")] ++ utils.dom_update_utilization, el, {"name": "0.01"} + ) + + sys.stderr = tmp_stderr +@@ -1415,7 +1415,12 @@ class UtilsTest(unittest.TestCase): + + """).documentElement + utils.dom_update_utilization( +- el, [("name", ""), ("key", "-1"), ("keys", "90")] ++ el, ++ { ++ "name": "", ++ "key": "-1", ++ "keys": "90", ++ } + ) + + self.assertEqual(len(dom_get_child_elements(el)), 1) +@@ -1459,7 +1464,11 @@ class UtilsTest(unittest.TestCase): + + """).documentElement + utils.dom_update_utilization( +- el, [("key", "100"), ("keys", "")] ++ el, ++ { ++ "key": "100", ++ "keys": "", ++ } + ) + + u = dom_get_child_elements(el)[0] +diff --git a/pcs/utils.py b/pcs/utils.py +index a7ff7ca..d5b6dcf 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -472,6 +472,16 @@ def getNodesFromPacemaker(): + except LibraryError as e: + process_library_reports(e.args) + ++def getNodeAttributesFromPacemaker(): ++ try: ++ return [ ++ node.attrs ++ for node in ClusterState(getClusterStateXml()).node_section.nodes ++ ] ++ except LibraryError as e: ++ process_library_reports(e.args) ++ ++ + def hasCorosyncConf(conf=None): + if not conf: + if is_rhel6(): +@@ -2487,7 +2497,7 @@ def dom_update_utilization(dom_element, attributes, id_prefix=""): + id_prefix + dom_element.getAttribute("id") + "-utilization" + ) + +- for name, value in attributes: ++ for name, value in sorted(attributes.items()): + if value != "" and not is_int(value): + err( + "Value of utilization attribute must be integer: " +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index c650fe6..19caf14 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -500,9 +500,9 @@ Pcs.UtilizationTableComponent = Ember.Component.extend({ + }, + add: function(form_id) { + var id = "#" + form_id; +- var name = $(id + " input[name='new_utilization_name']").val(); ++ var name = $(id + " input[name='new_utilization_name']").val().trim(); + if (name == "") { +- return; ++ alert("Name of utilization attribute should be non-empty string."); + } + var value = $(id + " input[name='new_utilization_value']").val().trim(); + if (!is_integer(value)) { +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index e467d0a..7dc7951 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -2240,7 +2240,7 @@ def set_node_utilization(params, reqest, auth_user) + + if retval != 0 + return [400, "Unable to set utilization '#{name}=#{value}' for node " + +- "'#{res_id}': #{stderr.join('')}" ++ "'#{node}': #{stderr.join('')}" + ] + end + return 200 +-- +1.8.3.1 + diff --git a/SOURCES/bz1158566-01-fix-dashboard-in-web-UI.patch b/SOURCES/bz1158566-01-fix-dashboard-in-web-UI.patch deleted file mode 100644 index baded2f..0000000 --- a/SOURCES/bz1158566-01-fix-dashboard-in-web-UI.patch +++ /dev/null @@ -1,396 +0,0 @@ -From ef01aa872871b8e1ea79058cbe3301ce878dde9a Mon Sep 17 00:00:00 2001 -From: Ondrej Mular -Date: Tue, 25 Aug 2015 11:44:00 +0200 -Subject: [PATCH] fix dashboard in web UI - ---- - pcsd/cluster_entity.rb | 53 +++++++++++++++++++++++++++++----------- - pcsd/pcs.rb | 14 ++++++++--- - pcsd/public/js/nodes-ember.js | 17 ++++++++++--- - pcsd/public/js/pcsd.js | 38 ++++++++++++++-------------- - pcsd/remote.rb | 22 +++++++++++++++-- - pcsd/test/test_cluster_entity.rb | 4 +-- - pcsd/views/_resource.erb | 20 +++++++-------- - pcsd/views/main.erb | 4 +++ - 8 files changed, 117 insertions(+), 55 deletions(-) - -diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb -index b291937..78bc5ab 100644 ---- a/pcsd/cluster_entity.rb -+++ b/pcsd/cluster_entity.rb -@@ -112,6 +112,9 @@ module ClusterEntity - status.node = node - primitive.crm_status << status - } -+ primitives.each {|_, resource| -+ resource[0].update_status -+ } - return primitives - end - -@@ -178,6 +181,9 @@ module ClusterEntity - end - end - } -+ tree.each {|resource| -+ resource.update_status -+ } - return tree - end - -@@ -491,23 +497,27 @@ module ClusterEntity - end - end - -+ def update_status -+ @status = get_status -+ end -+ - def get_status -- count = @crm_status.length - running = 0 -+ failed = 0 - @crm_status.each do |s| -- if ['Started', 'Master', 'Slave'].include?(s.role) -+ if s.active - running += 1 -+ elsif s.failed -+ failed += 1 - end - end - - if disabled? - status = ClusterEntity::ResourceStatus.new(:disabled) -- elsif running != 0 -- if running == count -- status = ClusterEntity::ResourceStatus.new(:running) -- else -- status = ClusterEntity::ResourceStatus.new(:partially_running) -- end -+ elsif running > 0 -+ status = ClusterEntity::ResourceStatus.new(:running) -+ elsif failed > 0 -+ status = ClusterEntity::ResourceStatus.new(:failed) - else - status = ClusterEntity::ResourceStatus.new(:blocked) - end -@@ -655,6 +665,14 @@ module ClusterEntity - end - end - -+ def update_status -+ @status = ClusterEntity::ResourceStatus.new(:running) -+ @members.each { |p| -+ p.update_status -+ @status = p.status if @status < p.status -+ } -+ end -+ - def to_status(version='1') - if version == '2' - hash = super(version) -@@ -730,6 +748,13 @@ module ClusterEntity - end - end - -+ def update_status -+ if @member -+ @member.update_status -+ @status = @member.status -+ end -+ end -+ - def to_status(version='1') - if version == '2' - hash = super(version) -@@ -794,13 +819,13 @@ module ClusterEntity - primitive_list = @member.members - end - @masters, @slaves = get_masters_slaves(primitive_list) -- end -- if @masters.empty? -- @error_list << { -- :message => 'Resource is master/slave but has not been promoted '\ -+ if @masters.empty? -+ @error_list << { -+ :message => 'Resource is master/slave but has not been promoted '\ - + 'to master on any node.', -- :type => 'no_master' -- } -+ :type => 'no_master' -+ } -+ end - end - end - -diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb -index 1fe9b99..cc5b038 100644 ---- a/pcsd/pcs.rb -+++ b/pcsd/pcs.rb -@@ -1506,10 +1506,18 @@ def cluster_status_from_nodes(session, cluster_nodes, cluster_name) - status = overview.update(cluster_nodes_map[quorate_nodes[0]]) - status[:quorate] = true - status[:node_list] = node_status_list -- # if we don't have quorum, use data from any node -- # no node has quorum, so no node has any info about the cluster -+ # if we don't have quorum, use data from any online node, -+ # otherwise use data from any node no node has quorum, so no node has any -+ # info about the cluster - elsif not old_status -- status = overview.update(cluster_nodes_map.values[0]) -+ node_to_use = cluster_nodes_map.values[0] -+ cluster_nodes_map.each { |_, node_data| -+ if node_data[:node] and node_data[:node][:status] == 'online' -+ node_to_use = node_data -+ break -+ end -+ } -+ status = overview.update(node_to_use) - status[:quorate] = false - status[:node_list] = node_status_list - # old pcsd doesn't provide info about quorum, use data from any node -diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js -index 1f60adc..172c00a 100644 ---- a/pcsd/public/js/nodes-ember.js -+++ b/pcsd/public/js/nodes-ember.js -@@ -54,7 +54,8 @@ Pcs = Ember.Application.createWithMixins({ - if (window.location.pathname.lastIndexOf('/manage', 0) !== 0) { - return; - } -- clearTimeout(Pcs.update_timeout); -+ clearTimeout(Pcs.get('update_timeout')); -+ Pcs.set('update_timeout', null); - var self = Pcs; - var cluster_name = self.cluster_name; - if (cluster_name == null) { -@@ -77,7 +78,7 @@ Pcs = Ember.Application.createWithMixins({ - if (data["not_current_data"]) { - self.update(); - } else { -- Pcs.update_timeout = window.setTimeout(self.update, 20000); -+ Pcs.set('update_timeout', window.setTimeout(self.update,20000)); - } - hide_loading_screen(); - }, -@@ -92,7 +93,7 @@ Pcs = Ember.Application.createWithMixins({ - console.log("Error: Unable to parse json for clusters_overview"); - } - } -- Pcs.update_timeout = window.setTimeout(self.update,20000); -+ Pcs.set('update_timeout', window.setTimeout(self.update,20000)); - hide_loading_screen(); - } - }); -@@ -126,6 +127,7 @@ Pcs = Ember.Application.createWithMixins({ - var cur_resource = self.get('cur_resource'); - var resource_map = self.get('resource_map'); - if (first_run) { -+ setup_node_links(); - Pcs.nodesController.load_node($('#node_list_row').find('.node_selected').first(),true); - Pcs.aclsController.load_role($('#acls_list_row').find('.node_selected').first(), true); - if (self.get("fence_id_to_load")) { -@@ -173,7 +175,6 @@ Pcs = Ember.Application.createWithMixins({ - if (!resource_change && self.get('cur_resource')) - tree_view_select(self.get('cur_resource').get('id')); - Pcs.selectedNodeController.reset(); -- setup_node_links(); - disable_checkbox_clicks(); - }); - }); -@@ -207,6 +208,7 @@ Pcs.resourcesContainer = Ember.Object.create({ - cur_fence: null, - constraints: {}, - group_list: [], -+ data_version: null, - - get_resource_by_id: function(resource_id) { - var resource_map = this.get('resource_map'); -@@ -434,6 +436,7 @@ Pcs.resourcesContainer = Ember.Object.create({ - update: function(data) { - var self = this; - self.set('group_list', data['groups']); -+ self.set("data_version", data['status_version']); - var resources = data["resource_list"]; - var resource_obj = null; - var resource_id; -@@ -495,6 +498,12 @@ Pcs.resourcesContainer = Ember.Object.create({ - } - }); - -+Pcs.resourcesContainer.reopen({ -+ is_version_1: function() { -+ return (this.get("data_version") == '1'); -+ }.property('data_version') -+}); -+ - Pcs.ResourceObj = Ember.Object.extend({ - id: null, - _id: Ember.computed.alias('id'), -diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js -index 9891aa8..2c71e6b 100644 ---- a/pcsd/public/js/pcsd.js -+++ b/pcsd/public/js/pcsd.js -@@ -1242,26 +1242,24 @@ function destroy_tooltips() { - } - - function remove_cluster(ids) { -- for (var i=0; i cluster.name, -+ 'error_list' => [ -+ {'message' => 'Unable to connect to the cluster. Request timeout.'} -+ ], -+ 'warning_list' => [], -+ 'status' => 'unknown', -+ 'node_list' => get_default_overview_node_list(cluster.name), -+ 'resource_list' => [] -+ } - overview_cluster = nil - online, offline, not_authorized_nodes = check_gui_status_of_nodes( - session, -@@ -1134,7 +1145,7 @@ def clusters_overview(params, request, session) - nodes_not_in_cluster = [] - for node in cluster_nodes_auth - code, response = send_request_with_token( -- session, node, 'cluster_status', true, {}, true, nil, 15 -+ session, node, 'cluster_status', true, {}, true, nil, 8 - ) - if code == 404 - not_supported = true -@@ -1228,7 +1239,14 @@ def clusters_overview(params, request, session) - cluster_map[cluster.name] = overview_cluster - } - } -- threads.each { |t| t.join } -+ -+ begin -+ Timeout::timeout(18) { -+ threads.each { |t| t.join } -+ } -+ rescue Timeout::Error -+ threads.each { |t| t.exit } -+ end - - # update clusters in PCSConfig - not_current_data = false -diff --git a/pcsd/views/_resource.erb b/pcsd/views/_resource.erb -index 862b648..cc4c06e 100644 ---- a/pcsd/views/_resource.erb -+++ b/pcsd/views/_resource.erb -@@ -32,16 +32,16 @@ - - <%= erb :_resource_list %> - -- --
-- <% if @myView == "resource" %> -- {{resource-edit resource=Pcs.resourcesContainer.cur_resource page_name="Resource"}} -- <% else %> -- {{resource-edit resource=Pcs.resourcesContainer.cur_fence page_name="Fence device" stonith=1}} -- <% end %> --
-- -- -+ -+
-+ <% if @myView == "resource" %> -+ {{resource-edit resource=Pcs.resourcesContainer.cur_resource page_name="Resource" old_pcsd=Pcs.resourcesContainer.is_version_1}} -+ <% else %> -+ {{resource-edit resource=Pcs.resourcesContainer.cur_fence page_name="Fence device" stonith=1 old_pcsd=Pcs.resourcesContainer.is_version_1}} -+ <% end %> -+
-+ -+ - <% if @myView == "resource" %> - - -diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb -index 3c1e0cd..bb4e989 100644 ---- a/pcsd/views/main.erb -+++ b/pcsd/views/main.erb -@@ -197,6 +197,7 @@ - Current Location: - {{resource.nodes_running_on_string}} - -+ {{#unless old_pcsd}} - {{#unless resource.parent}} - - Clone: -@@ -226,8 +227,10 @@ - - {{/if}} - {{/unless}} -+ {{/unless}} - {{/if}} - {{/unless}} -+ {{#unless old_pcsd}} - {{#if resource.is_group}} - {{#unless resource.parent}} - -@@ -258,6 +261,7 @@ - - - {{/if}} -+ {{/unless}} - - {{#unless resource.stonith}} - {{location_constraints-table constraints=resource.location_constraints}} --- -1.9.1 - diff --git a/SOURCES/bz1158566-02-fix-loading-cluster-status-for-web-UI.patch b/SOURCES/bz1158566-02-fix-loading-cluster-status-for-web-UI.patch deleted file mode 100644 index f901308..0000000 --- a/SOURCES/bz1158566-02-fix-loading-cluster-status-for-web-UI.patch +++ /dev/null @@ -1,143 +0,0 @@ -From f55ca2f12c4552fcd516737fa797cf806aa70705 Mon Sep 17 00:00:00 2001 -From: Ondrej Mular -Date: Thu, 3 Sep 2015 12:29:37 +0200 -Subject: [PATCH] fix loading cluster status for web UI - ---- - pcs/status.py | 37 ++++++++++++++++++++++++++++++++++--- - pcsd/cluster_entity.rb | 25 ++++++++++++++++++++++--- - pcsd/pcs.rb | 3 +++ - 3 files changed, 59 insertions(+), 6 deletions(-) - -diff --git a/pcs/status.py b/pcs/status.py -index eb2a5eb..34354ef 100644 ---- a/pcs/status.py -+++ b/pcs/status.py -@@ -123,14 +123,28 @@ def nodes_status(argv): - onlinenodes = [] - offlinenodes = [] - standbynodes = [] -+ remote_onlinenodes = [] -+ remote_offlinenodes = [] -+ remote_standbynodes = [] - for node in nodes[0].getElementsByTagName("node"): -+ node_name = node.getAttribute("name") -+ node_remote = node.getAttribute("type") == "remote" - if node.getAttribute("online") == "true": - if node.getAttribute("standby") == "true": -- standbynodes.append(node.getAttribute("name")) -+ if node_remote: -+ remote_standbynodes.append(node_name) -+ else: -+ standbynodes.append(node_name) - else: -- onlinenodes.append(node.getAttribute("name")) -+ if node_remote: -+ remote_onlinenodes.append(node_name) -+ else: -+ onlinenodes.append(node_name) - else: -- offlinenodes.append(node.getAttribute("name")) -+ if node_remote: -+ remote_offlinenodes.append(node_name) -+ else: -+ offlinenodes.append(node_name) - - print "Pacemaker Nodes:" - -@@ -149,6 +163,23 @@ def nodes_status(argv): - print node, - print "" - -+ print "Pacemaker Remote Nodes:" -+ -+ print " Online:", -+ for node in remote_onlinenodes: -+ print node, -+ print "" -+ -+ print " Standby:", -+ for node in remote_standbynodes: -+ print node, -+ print "" -+ -+ print " Offline:", -+ for node in remote_offlinenodes: -+ print node, -+ print "" -+ - # TODO: Remove, currently unused, we use status from the resource.py - def resources_status(argv): - info_dom = utils.getClusterState() -diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb -index 78bc5ab..4f751b8 100644 ---- a/pcsd/cluster_entity.rb -+++ b/pcsd/cluster_entity.rb -@@ -533,7 +533,8 @@ module ClusterEntity - @operations = [] - failed_ops = [] - message_list = [] -- cib_dom.elements.each("//lrm_resource[@id='#{@id}']/lrm_rsc_op") { |e| -+ cib_dom.elements.each("//lrm_resource[@id='#{@id}']/lrm_rsc_op | "\ -+ + "//lrm_resource[starts-with(@id, \"#{@id}:\")]/lrm_rsc_op") { |e| - operation = ResourceOperation.new(e) - @operations << operation - if operation.rc_code != 0 -@@ -819,13 +820,15 @@ module ClusterEntity - primitive_list = @member.members - end - @masters, @slaves = get_masters_slaves(primitive_list) -- if @masters.empty? -- @error_list << { -+ if @masters.empty? and !disabled? -+ @status = ClusterEntity::ResourceStatus.new(:partially_running) -+ @warning_list << { - :message => 'Resource is master/slave but has not been promoted '\ - + 'to master on any node.', - :type => 'no_master' - } - end -+ @status = @member.status if @status < @member.status - end - end - -@@ -851,6 +854,22 @@ module ClusterEntity - end - end - -+ def update_status -+ if @member -+ @member.update_status -+ if @member.instance_of?(Primitive) -+ primitive_list = [@member] -+ else -+ primitive_list = @member.members -+ end -+ @masters, @slaves = get_masters_slaves(primitive_list) -+ if @masters.empty? and !disabled? -+ @status = ClusterEntity::ResourceStatus.new(:partially_running) -+ end -+ @status = @member.status if @status < @member.status -+ end -+ end -+ - private - def get_masters_slaves(primitive_list) - masters = [] -diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb -index cc5b038..87404ac 100644 ---- a/pcsd/pcs.rb -+++ b/pcsd/pcs.rb -@@ -568,6 +568,9 @@ def get_nodes_status() - if l.start_with?("Pacemaker Nodes:") - in_pacemaker = true - end -+ if l.start_with?("Pacemaker Remote Nodes:") -+ break -+ end - if l.end_with?(":") - next - end --- -1.9.1 - diff --git a/SOURCES/bz1158566-03-web-UI-multiple-fixes-in-the-dashboard.patch b/SOURCES/bz1158566-03-web-UI-multiple-fixes-in-the-dashboard.patch deleted file mode 100644 index 56bac08..0000000 --- a/SOURCES/bz1158566-03-web-UI-multiple-fixes-in-the-dashboard.patch +++ /dev/null @@ -1,1223 +0,0 @@ -From 9830bad113bf07fb65af18e2f2423c27da0180c0 Mon Sep 17 00:00:00 2001 -From: Ondrej Mular -Date: Tue, 8 Sep 2015 12:46:50 +0200 -Subject: [PATCH] web UI: multiple fixes in the dashboard - -- fix no quorum message -- fix status inconsistency of offline cluster -- fix status icons -- cluster status is 'failed' if there is resource with status 'blocked' -- fix random unselecting of current cluster -- performance improvements in loading cluster status -- removed icon that indicates issue in cluster -- changed status detection of resources ---- - pcsd/cluster_entity.rb | 150 +++++++++++++++-------- - pcsd/pcs.rb | 231 +++++++++++++++++------------------ - pcsd/public/js/nodes-ember.js | 122 +++++++++---------- - pcsd/public/js/pcsd.js | 24 +++- - pcsd/test/test_all_suite.rb | 1 + - pcsd/test/test_cluster_entity.rb | 126 +++++++++++++++---- - pcsd/test/test_pcs.rb | 257 +++++++++++++++++++++++++++++++++++++++ - pcsd/views/_cluster_list.erb | 6 +- - pcsd/views/main.erb | 2 +- - pcsd/views/manage.erb | 243 ++++++++++++++++++------------------ - 10 files changed, 779 insertions(+), 383 deletions(-) - create mode 100644 pcsd/test/test_pcs.rb - -diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb -index 4f751b8..b5d2719 100644 ---- a/pcsd/cluster_entity.rb -+++ b/pcsd/cluster_entity.rb -@@ -3,6 +3,34 @@ require 'pcs.rb' - - module ClusterEntity - -+ def self.get_rsc_status(crm_dom) -+ unless crm_dom -+ return {} -+ end -+ status = {} -+ crm_dom.elements.each('/crm_mon/resources//resource') { |e| -+ rsc_id = e.attributes['id'].split(':')[0] -+ status[rsc_id] ||= [] -+ status[rsc_id] << ClusterEntity::CRMResourceStatus.new(e) -+ } -+ return status -+ end -+ -+ def self.get_resources_operations(cib_dom) -+ unless cib_dom -+ return {} -+ end -+ operations = {} -+ cib_dom.elements.each( -+ '/cib/status/node_state/lrm/lrm_resources/lrm_resource/lrm_rsc_op' -+ ) { |e| -+ rsc_id = e.parent.attributes['id'].split(':')[0] -+ operations[rsc_id] ||= [] -+ operations[rsc_id] << ClusterEntity::ResourceOperation.new(e) -+ } -+ return operations -+ end -+ - def self.obj_to_hash(obj, variables=nil) - unless variables - variables = obj.instance_variables -@@ -454,8 +482,9 @@ module ClusterEntity - attr_accessor :agentname, :_class, :provider, :type, :stonith, - :instance_attr, :crm_status, :operations - -- def initialize(primitive_cib_element=nil, crm_dom=nil, parent=nil, cib_dom=nil) -- super(primitive_cib_element, crm_dom, parent) -+ def initialize(primitive_cib_element=nil, rsc_status=nil, parent=nil, -+ operations=nil) -+ super(primitive_cib_element, nil, parent) - @class_type = 'primitive' - @agentname = nil - @_class = nil -@@ -482,18 +511,12 @@ module ClusterEntity - ) - } - @stonith = @_class == 'stonith' -- if @id and crm_dom -- crm_dom.elements.each("//resource[starts-with(@id, \"#{@id}:\")] | "\ -- + "//resource[@id=\"#{@id}\"]") { |e| -- @crm_status << CRMResourceStatus.new(e) -- } -+ if @id and rsc_status -+ @crm_status = rsc_status[@id] || [] - end - - @status = get_status -- -- if cib_dom -- load_operations(cib_dom) -- end -+ load_operations(operations) - end - end - -@@ -525,28 +548,26 @@ module ClusterEntity - return status - end - -- def load_operations(cib_dom) -- unless @id -+ def load_operations(operations) -+ @operations = [] -+ unless operations and @id and operations[@id] - return - end - -- @operations = [] - failed_ops = [] - message_list = [] -- cib_dom.elements.each("//lrm_resource[@id='#{@id}']/lrm_rsc_op | "\ -- + "//lrm_resource[starts-with(@id, \"#{@id}:\")]/lrm_rsc_op") { |e| -- operation = ResourceOperation.new(e) -- @operations << operation -- if operation.rc_code != 0 -+ operations[@id].each { |o| -+ @operations << o -+ if o.rc_code != 0 - # 7 == OCF_NOT_RUNNING == The resource is safely stopped. -- next if operation.operation == 'monitor' and operation.rc_code == 7 -+ next if o.operation == 'monitor' and o.rc_code == 7 - # 8 == OCF_RUNNING_MASTER == The resource is running in master mode. -- next if 8 == operation.rc_code -- failed_ops << operation -- message = "Failed to #{operation.operation} #{@id}" -- message += " on #{Time.at(operation.last_rc_change).asctime}" -- message += " on node #{operation.on_node}" if operation.on_node -- message += ": #{operation.exit_reason}" if operation.exit_reason -+ next if 8 == o.rc_code -+ failed_ops << o -+ message = "Failed to #{o.operation} #{@id}" -+ message += " on #{Time.at(o.last_rc_change).asctime}" -+ message += " on node #{o.on_node}" if o.on_node -+ message += ": #{o.exit_reason}" if o.exit_reason - message_list << { - :message => message - } -@@ -652,26 +673,48 @@ module ClusterEntity - class Group < Resource - attr_accessor :members - -- def initialize(group_cib_element=nil, crm_dom=nil, parent=nil, cib_dom=nil) -- super(group_cib_element, crm_dom, parent) -+ def initialize( -+ group_cib_element=nil, rsc_status=nil, parent=nil, operations=nil -+ ) -+ super(group_cib_element, nil, parent) - @class_type = 'group' - @members = [] - if group_cib_element and group_cib_element.name == 'group' - @status = ClusterEntity::ResourceStatus.new(:running) - group_cib_element.elements.each('primitive') { |e| -- p = Primitive.new(e, crm_dom, self, cib_dom) -+ p = Primitive.new(e, rsc_status, self, operations) - members << p -- @status = p.status if @status < p.status - } -+ update_status - end - end - - def update_status - @status = ClusterEntity::ResourceStatus.new(:running) -+ first = true - @members.each { |p| - p.update_status -- @status = p.status if @status < p.status -+ if first -+ first = false -+ next -+ end -+ if ( -+ p.status == ClusterEntity::ResourceStatus.new(:disabled) or -+ p.status == ClusterEntity::ResourceStatus.new(:blocked) or -+ p.status == ClusterEntity::ResourceStatus.new(:failed) -+ ) -+ @status = ClusterEntity::ResourceStatus.new(:partially_running) -+ end - } -+ if (@members and @members.length > 0 and -+ (ClusterEntity::ResourceStatus.new(:running) != @members[0].status and -+ ClusterEntity::ResourceStatus.new(:unknown) != @members[0].status) -+ ) -+ @status = @members[0].status -+ end -+ if disabled? -+ @status = ClusterEntity::ResourceStatus.new(:disabled) -+ end - end - - def to_status(version='1') -@@ -713,8 +756,9 @@ module ClusterEntity - class MultiInstance < Resource - attr_accessor :member, :unique, :managed, :failed, :failure_ignored - -- def initialize(resource_cib_element=nil, crm_dom=nil, parent=nil, cib_dom=nil) -- super(resource_cib_element, crm_dom, parent) -+ def initialize(resource_cib_element=nil, crm_dom=nil, rsc_status=nil, -+ parent=nil, operations=nil) -+ super(resource_cib_element, nil, parent) - @member = nil - @multi_state = false - @unique = false -@@ -730,15 +774,13 @@ module ClusterEntity - ) - member = resource_cib_element.elements['group | primitive'] - if member and member.name == 'group' -- @member = Group.new(member, crm_dom, self, cib_dom) -+ @member = Group.new(member, rsc_status, self, operations) - elsif member and member.name == 'primitive' -- @member = Primitive.new(member, crm_dom, self, cib_dom) -- end -- if @member -- @status = @member.status -+ @member = Primitive.new(member, rsc_status, self, operations) - end -+ update_status - if crm_dom -- status = crm_dom.elements["//clone[@id='#{@id}']"] -+ status = crm_dom.elements["/crm_mon/resources//clone[@id='#{@id}']"] - if status - @unique = status.attributes['unique'] == 'true' - @managed = status.attributes['managed'] == 'true' -@@ -754,6 +796,9 @@ module ClusterEntity - @member.update_status - @status = @member.status - end -+ if disabled? -+ @status = ClusterEntity::ResourceStatus.new(:disabled) -+ end - end - - def to_status(version='1') -@@ -776,8 +821,11 @@ module ClusterEntity - - class Clone < MultiInstance - -- def initialize(resource_cib_element=nil, crm_dom=nil, parent=nil, cib_dom=nil) -- super(resource_cib_element, crm_dom, parent, cib_dom) -+ def initialize( -+ resource_cib_element=nil, crm_dom=nil, rsc_status=nil, parent=nil, -+ operations=nil -+ ) -+ super(resource_cib_element, crm_dom, rsc_status, parent, operations) - @class_type = 'clone' - end - -@@ -808,11 +856,12 @@ module ClusterEntity - class MasterSlave < MultiInstance - attr_accessor :masters, :slaves - -- def initialize(master_cib_element=nil, crm_dom=nil, parent=nil, cib_dom=nil) -- super(master_cib_element, crm_dom, parent, cib_dom) -+ def initialize(master_cib_element=nil, crm_dom=nil, rsc_status=nil, parent=nil, operations=nil) -+ super(master_cib_element, crm_dom, rsc_status, parent, operations) - @class_type = 'master' - @masters = [] - @slaves = [] -+ update_status - if @member - if @member.instance_of?(Primitive) - primitive_list = [@member] -@@ -820,15 +869,15 @@ module ClusterEntity - primitive_list = @member.members - end - @masters, @slaves = get_masters_slaves(primitive_list) -- if @masters.empty? and !disabled? -- @status = ClusterEntity::ResourceStatus.new(:partially_running) -+ if (@masters.empty? and -+ @status != ClusterEntity::ResourceStatus.new(:disabled) -+ ) - @warning_list << { - :message => 'Resource is master/slave but has not been promoted '\ - + 'to master on any node.', - :type => 'no_master' - } - end -- @status = @member.status if @status < @member.status - end - end - -@@ -857,16 +906,21 @@ module ClusterEntity - def update_status - if @member - @member.update_status -+ @status = @member.status - if @member.instance_of?(Primitive) - primitive_list = [@member] - else - primitive_list = @member.members - end - @masters, @slaves = get_masters_slaves(primitive_list) -- if @masters.empty? and !disabled? -+ if (@masters.empty? and -+ @member.status != ClusterEntity::ResourceStatus.new(:disabled) -+ ) - @status = ClusterEntity::ResourceStatus.new(:partially_running) - end -- @status = @member.status if @status < @member.status -+ end -+ if disabled? -+ @status = ClusterEntity::ResourceStatus.new(:disabled) - end - end - -diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb -index 87404ac..9a0d145 100644 ---- a/pcsd/pcs.rb -+++ b/pcsd/pcs.rb -@@ -15,14 +15,14 @@ require 'resource.rb' - require 'cluster_entity.rb' - require 'auth.rb' - --def getAllSettings(session) -- stdout, stderr, retval = run_cmd(session, PCS, "property") -- stdout.map(&:chomp!) -- stdout.map(&:strip!) -+def getAllSettings(session, cib_dom=nil) -+ unless cib_dom -+ cib_dom = get_cib_dom(session) -+ end - stdout2, stderr2, retval2 = run_cmd(session, PENGINE, "metadata") - metadata = stdout2.join - ret = {} -- if retval == 0 and retval2 == 0 -+ if cib_dom and retval2 == 0 - doc = REXML::Document.new(metadata) - - default = "" -@@ -37,8 +37,9 @@ def getAllSettings(session) - ret[name] = {"value" => default, "type" => el_type} - } - -- stdout.each {|line| -- key,val = line.split(': ', 2) -+ cib_dom.elements.each('/cib/configuration/crm_config//nvpair') { |e| -+ key = e.attributes['name'] -+ val = e.attributes['value'] - key.gsub!(/-/,"_") - if ret.has_key?(key) - if ret[key]["type"] == "boolean" -@@ -723,106 +724,92 @@ def get_cluster_name() - end - end - --def get_node_attributes(session) -- stdout, stderr, retval = run_cmd(session, PCS, "property", "list") -- if retval != 0 -- return {} -- end -- -- attrs = {} -- found = false -- stdout.each { |line| -- if not found -- if line.strip.start_with?("Node Attributes:") -- found = true -- end -- next -- end -- if not line.start_with?(" ") -- break -- end -- sline = line.split(":", 2) -- nodename = sline[0].strip -- attrs[nodename] = [] -- sline[1].strip.split(" ").each { |attr| -- key, val = attr.split("=", 2) -- attrs[nodename] << {:key => key, :value => val} -+def get_node_attributes(session, cib_dom=nil) -+ unless cib_dom -+ cib_dom = get_cib_dom(session) -+ return {} unless cib_dom -+ end -+ node_attrs = {} -+ cib_dom.elements.each( -+ '/cib/configuration/nodes/node/instance_attributes/nvpair' -+ ) { |e| -+ node = e.parent.parent.attributes['uname'] -+ node_attrs[node] ||= [] -+ node_attrs[node] << { -+ :id => e.attributes['id'], -+ :key => e.attributes['name'], -+ :value => e.attributes['value'] - } - } -- return attrs -+ node_attrs.each { |_, val| val.sort_by! { |obj| obj[:key] }} -+ return node_attrs - end - --def get_fence_levels(session) -- stdout, stderr, retval = run_cmd(session, PCS, "stonith", "level") -- if retval != 0 or stdout == "" -- return {} -+def get_fence_levels(session, cib_dom=nil) -+ unless cib_dom -+ cib_dom = get_cib_dom(session) -+ return {} unless cib_dom - end - - fence_levels = {} -- node = "" -- stdout.each {|line| -- if line.start_with?(" Node: ") -- node = line.split(":",2)[1].strip -- next -- end -- fence_levels[node] ||= [] -- md = / Level (\S+) - (.*)$/.match(line) -- fence_levels[node] << {"level" => md[1], "devices" => md[2]} -+ cib_dom.elements.each( -+ '/cib/configuration/fencing-topology/fencing-level' -+ ) { |e| -+ target = e.attributes['target'] -+ fence_levels[target] ||= [] -+ fence_levels[target] << { -+ 'level' => e.attributes['index'], -+ 'devices' => e.attributes['devices'] -+ } - } -+ fence_levels.each { |_, val| val.sort_by! { |obj| obj['level'].to_i }} - return fence_levels - end - --def get_acls(session) -- stdout, stderr, retval = run_cmd(session, PCS, "acl", "show") -- if retval != 0 or stdout == "" -- return {} -+def get_acls(session, cib_dom=nil) -+ unless cib_dom -+ cib_dom = get_cib_dom(session) -+ return {} unless cib_dom - end - -- ret_val = {} -- state = nil -- user = "" -- role = "" -- -- stdout.each do |line| -- if m = /^User: (.*)$/.match(line) -- user = m[1] -- state = "user" -- ret_val[state] ||= {} -- ret_val[state][user] ||= [] -- next -- elsif m = /^Group: (.*)$/.match(line) -- user = m[1] -- state = "group" -- ret_val[state] ||= {} -- ret_val[state][user] ||= [] -- next -- elsif m = /^Role: (.*)$/.match(line) -- role = m[1] -- state = "role" -- ret_val[state] ||= {} -- ret_val[state][role] ||= {} -- next -- end -+ acls = { -+ 'role' => {}, -+ 'group' => {}, -+ 'user' => {}, -+ 'target' => {} -+ } - -- case state -- when "user", "group" -- m = /^ Roles: (.*)$/.match(line) -- ret_val[state][user] ||= [] -- m[1].scan(/\S+/).each {|urole| -- ret_val[state][user] << urole -+ cib_dom.elements.each('/cib/configuration/acls/*') { |e| -+ type = e.name[4..-1] -+ if e.name == 'acl_role' -+ role_id = e.attributes['id'] -+ desc = e.attributes['description'] -+ acls[type][role_id] = {} -+ acls[type][role_id]['description'] = desc ? desc : '' -+ acls[type][role_id]['permissions'] = [] -+ e.elements.each('acl_permission') { |p| -+ p_id = p.attributes['id'] -+ p_kind = p.attributes['kind'] -+ val = '' -+ if p.attributes['xpath'] -+ val = "xpath #{p.attributes['xpath']}" -+ elsif p.attributes['reference'] -+ val = "id #{p.attributes['reference']}" -+ else -+ next -+ end -+ acls[type][role_id]['permissions'] << "#{p_kind} #{val} (#{p_id})" -+ } -+ elsif ['acl_target', 'acl_group'].include?(e.name) -+ id = e.attributes['id'] -+ acls[type][id] = [] -+ e.elements.each('role') { |r| -+ acls[type][id] << r.attributes['id'] - } -- when "role" -- ret_val[state][role] ||= {} -- ret_val[state][role]["permissions"] ||= [] -- ret_val[state][role]["description"] ||= "" -- if m = /^ Description: (.*)$/.match(line) -- ret_val[state][role]["description"] = m[1] -- elsif m = /^ Permission: (.*)$/.match(line) -- ret_val[state][role]["permissions"] << m[1] -- end - end -- end -- return ret_val -+ } -+ acls['user'] = acls['target'] -+ return acls - end - - def enable_cluster(session) -@@ -1438,7 +1425,7 @@ def cluster_status_from_nodes(session, cluster_nodes, cluster_name) - {:version=>'2', :operations=>'1'}, - true, - nil, -- 6 -+ 15 - ) - node_map[node] = {} - node_map[node].update(overview) -@@ -1601,10 +1588,10 @@ def cluster_status_from_nodes(session, cluster_nodes, cluster_name) - } - if status[:status] != 'error' - status[:resource_list].each { |resource| -- if resource[:status] == 'failed' -+ if ['failed', 'blocked'].include?(resource[:status]) - status[:status] = 'error' - break -- elsif ['blocked', 'partially running'].include?(resource[:status]) -+ elsif ['partially running'].include?(resource[:status]) - status[:status] = 'warning' - end - } -@@ -1634,10 +1621,11 @@ def get_node_status(session, cib_dom) - :cluster_settings => {}, - :need_ring1_address => need_ring1_address?, - :is_cman_with_udpu_transport => is_cman_with_udpu_transport?, -- :acls => get_acls(session), -+ :acls => get_acls(session, cib_dom), - :username => session[:username], -- :fence_levels => get_fence_levels(session), -- :node_attr => node_attrs_to_v2(get_node_attributes(session)) -+ :fence_levels => get_fence_levels(session, cib_dom), -+ :node_attr => node_attrs_to_v2(get_node_attributes(session, cib_dom)), -+ :known_nodes => [] - } - - nodes = get_nodes_status() -@@ -1654,10 +1642,10 @@ def get_node_status(session, cib_dom) - - if cib_dom - node_status[:groups] = get_resource_groups(cib_dom) -- node_status[:constraints] = getAllConstraints(cib_dom.elements['//constraints']) -+ node_status[:constraints] = getAllConstraints(cib_dom.elements['/cib/configuration/constraints']) - end - -- cluster_settings = getAllSettings(session) -+ cluster_settings = getAllSettings(session, cib_dom) - if not cluster_settings.has_key?('error') - node_status[:cluster_settings] = cluster_settings - end -@@ -1670,7 +1658,7 @@ def get_resource_groups(cib_dom) - return [] - end - group_list = [] -- cib_dom.elements.each('cib/configuration/resources//group') do |e| -+ cib_dom.elements.each('/cib/configuration/resources//group') do |e| - group_list << e.attributes['id'] - end - return group_list -@@ -1682,49 +1670,54 @@ def get_resources(cib_dom, crm_dom=nil, get_operations=false) - end - - resource_list = [] -- cib = (get_operations) ? cib_dom : nil -+ operations = (get_operations) ? ClusterEntity::get_resources_operations(cib_dom) : nil -+ rsc_status = ClusterEntity::get_rsc_status(crm_dom) - -- cib_dom.elements.each('cib/configuration/resources/primitive') do |e| -- resource_list << ClusterEntity::Primitive.new(e, crm_dom, nil, cib) -+ cib_dom.elements.each('/cib/configuration/resources/primitive') do |e| -+ resource_list << ClusterEntity::Primitive.new(e, rsc_status, nil, operations) - end -- cib_dom.elements.each('cib/configuration/resources/group') do |e| -- resource_list << ClusterEntity::Group.new(e, crm_dom, nil, cib) -+ cib_dom.elements.each('/cib/configuration/resources/group') do |e| -+ resource_list << ClusterEntity::Group.new(e, rsc_status, nil, operations) - end -- cib_dom.elements.each('cib/configuration/resources/clone') do |e| -- resource_list << ClusterEntity::Clone.new(e, crm_dom, nil, cib) -+ cib_dom.elements.each('/cib/configuration/resources/clone') do |e| -+ resource_list << ClusterEntity::Clone.new( -+ e, crm_dom, rsc_status, nil, operations -+ ) - end -- cib_dom.elements.each('cib/configuration/resources/master') do |e| -- resource_list << ClusterEntity::MasterSlave.new(e, crm_dom, nil, cib) -+ cib_dom.elements.each('/cib/configuration/resources/master') do |e| -+ resource_list << ClusterEntity::MasterSlave.new( -+ e, crm_dom, rsc_status, nil, operations -+ ) - end - return resource_list - end - --def get_resource_by_id(id, cib_dom, crm_dom=nil, get_operations=false) -+def get_resource_by_id(id, cib_dom, crm_dom=nil, rsc_status=nil, operations=false) - unless cib_dom - return nil - end - -- e = cib_dom.elements["cib/configuration/resources//*[@id='#{id}']"] -+ e = cib_dom.elements["/cib/configuration/resources//*[@id='#{id}']"] - unless e - return nil - end - - if e.parent.name != 'resources' # if resource is in group, clone or master/slave -- p = get_resource_by_id(e.parent.attributes['id'], cib_dom, crm_dom, get_operations) -+ p = get_resource_by_id( -+ e.parent.attributes['id'], cib_dom, crm_dom, rsc_status, operations -+ ) - return p.get_map[id.to_sym] - end - -- cib = (get_operations) ? cib_dom : nil -- - case e.name - when 'primitive' -- return ClusterEntity::Primitive.new(e, crm_dom, nil, cib) -+ return ClusterEntity::Primitive.new(e, rsc_status, nil, operations) - when 'group' -- return ClusterEntity::Group.new(e, crm_dom, nil, cib) -+ return ClusterEntity::Group.new(e, rsc_status, nil, operations) - when 'clone' -- return ClusterEntity::Clone.new(e, crm_dom, nil, cib) -+ return ClusterEntity::Clone.new(e, crm_dom, rsc_status, nil, operations) - when 'master' -- return ClusterEntity::MasterSlave.new(e, crm_dom, nil, cib) -+ return ClusterEntity::MasterSlave.new(e, crm_dom, rsc_status, nil, operations) - else - return nil - end -@@ -1762,7 +1755,7 @@ def node_attrs_to_v2(node_attrs) - all_nodes_attr[node] = [] - attrs.each { |attr| - all_nodes_attr[node] << { -- :id => nil, -+ :id => attr[:id], - :name => attr[:key], - :value => attr[:value] - } -diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js -index 5fec386..bbeed55 100644 ---- a/pcsd/public/js/nodes-ember.js -+++ b/pcsd/public/js/nodes-ember.js -@@ -75,9 +75,9 @@ Pcs = Ember.Application.createWithMixins({ - timeout: 20000, - success: function(data) { - Pcs.clusterController.update(data); -- Ember.run.next(function() { -- correct_visibility_dashboard(Pcs.clusterController.cur_cluster); -- }); -+ if (Pcs.clusterController.get('cur_cluster')) { -+ Pcs.clusterController.update_cur_cluster(Pcs.clusterController.get('cur_cluster').get('name')); -+ } - if (data["not_current_data"]) { - self.update(); - } -@@ -595,30 +595,20 @@ Pcs.ResourceObj = Ember.Object.extend({ - }.property("class_type"), - res_type: Ember.computed.alias('resource_type'), - status_icon: function() { -- var icon_class; -- switch (this.get('status')) { -- case "running": -- icon_class = "check"; -- break; -- case "disabled": -- case "partially running": -- icon_class = "warning"; -- break; -- case "failed": -- case "blocked": -- icon_class = "error"; -- break; -- default: -- icon_class = "x"; -- } -+ var icon_class = get_status_icon_class(this.get("status_val")); - return "
"; - }.property("status_val"), - status_val: function() { -- if (this.get('warning_list').length) -- return get_status_value("warning"); -+ var status_val = get_status_value(this.get('status')); -+ if (this.get('warning_list').length && status_val != get_status_value('disabled')) -+ status_val = get_status_value("warning"); - if (this.get('error_list').length) -- return get_status_value("error"); -- return get_status_value(this.status); -+ status_val = get_status_value("error"); -+ if ((get_status_value(this.get('status')) - status_val) < 0) { -+ return get_status_value(this.get('status')); -+ } else { -+ return status_val; -+ } - }.property('status', 'error_list.@each.message', 'warning_list.@each.message'), - status_color: function() { - return get_status_color(this.get("status_val")); -@@ -996,12 +986,17 @@ Pcs.Clusternode = Ember.Object.extend({ - return this.get('status') == "unknown"; - }.property("status"), - status_val: function() { -- if (this.warnings && this.warnings.length) -- return get_status_value("warning"); -- if (this.errors && this.errors.length) -- return get_status_value("error"); -- return get_status_value(this.status); -- }.property("status"), -+ var status_val = get_status_value(this.get('status')); -+ if (this.get('warning_list').length) -+ status_val = get_status_value("warning"); -+ if (this.get('error_list').length) -+ status_val = get_status_value("error"); -+ if ((get_status_value(this.get('status')) - status_val) < 0) { -+ return get_status_value(this.get('status')); -+ } else { -+ return status_val; -+ } -+ }.property('status', 'error_list.@each.message', 'warning_list.@each.message'), - status_style: function() { - var color = get_status_color(this.get("status_val")); - return "color: " + color + ((color != "green")? "; font-weight: bold;" : ""); -@@ -1011,8 +1006,8 @@ Pcs.Clusternode = Ember.Object.extend({ - return ((this.get("status_val") == get_status_value("ok") || this.status == "standby") ? show + "default-hidden" : ""); - }.property("status_val"), - status_icon: function() { -- var icon_class = {"-1": "x", 1: "error", 2: "warning", 3: "x", 4: "check"}; -- return "
"; -+ var icon_class = get_status_icon_class(this.get("status_val")); -+ return "
"; - }.property("status_val"), - error_list: [], - warning_list: [], -@@ -1158,18 +1153,18 @@ Pcs.Cluster = Ember.Object.extend({ - return out; - }.property("error_list"), - status_icon: function() { -- var icon_class = {"-1": "x", 1: "error", 2: "warning", 3: "x", 4: "check"}; -- return "
"; -+ var icon_class = get_status_icon_class(get_status_value(this.get('status'))); -+ return "
"; - }.property("status"), - quorum_show: function() { -- if (this.status == "unknown") { -+ if (this.get('status') == "unknown") { - return "(quorate unknown)" -- } else if (!this.quorate) { -+ } else if (!this.get('quorate')) { - return "(doesn't have quorum)" - } else { - return "" - } -- }.property("status", "quorum"), -+ }.property("status", "quorate"), - nodes: [], - nodes_failed: 0, - resource_list: [], -@@ -1270,7 +1265,7 @@ Pcs.Cluster = Ember.Object.extend({ - - Pcs.clusterController = Ember.Object.create({ - cluster_list: Ember.ArrayController.create({ -- content: Ember.A(), sortProperties: ['status'], -+ content: Ember.A(), sortProperties: ['status', 'name'], - sortAscending: true, - sortFunction: function(a,b){return status_comparator(a,b);} - }), -@@ -1283,26 +1278,25 @@ Pcs.clusterController = Ember.Object.create({ - num_warning: 0, - num_unknown: 0, - -- update_cur_cluster: function(row) { -+ update_cur_cluster: function(cluster_name) { - var self = this; -- var cluster_name = $(row).attr("nodeID"); -- $("#clusters_list").find("div.arrow").hide(); -- $(row).find("div.arrow").show(); -+ $("#clusters_list div.arrow").hide(); -+ var selected_cluster = null; - - $.each(self.get('cluster_list').get('content'), function(key, cluster) { - if (cluster.get("name") == cluster_name) { -- self.set('cur_cluster', cluster); -+ selected_cluster = cluster; - return false; - } - }); -- correct_visibility_dashboard(self.get('cur_cluster')); - -- $("#node_sub_info").children().each(function (i, val) { -- if ($(val).attr("id") == ("cluster_info_" + cluster_name)) -- $(val).show(); -- else -- $(val).hide(); -- }); -+ self.set('cur_cluster', selected_cluster); -+ if (selected_cluster) { -+ Ember.run.next(function() { -+ $("#clusters_list tr[nodeID=" + cluster_name + "] div.arrow").show(); -+ correct_visibility_dashboard(self.get('cur_cluster')); -+ }); -+ } - }, - - update: function(data) { -@@ -1355,21 +1349,6 @@ Pcs.clusterController = Ember.Object.create({ - }); - } - -- switch (cluster.get('status')) { -- case "ok": -- self.incrementProperty('num_ok'); -- break; -- case "error": -- self.incrementProperty('num_error'); -- break; -- case "warning": -- self.incrementProperty('num_warning'); -- break; -- default: -- self.incrementProperty('num_unknown'); -- break; -- } -- - var nodes_to_auth = []; - $.each(cluster.get('warning_list'), function(key, val){ - if (val.hasOwnProperty("type") && val.type == "nodes_not_authorized"){ -@@ -1398,6 +1377,21 @@ Pcs.clusterController = Ember.Object.create({ - - cluster.set("status", "unknown"); - } -+ -+ switch (get_status_value(cluster.get('status'))) { -+ case get_status_value("ok"): -+ self.incrementProperty('num_ok'); -+ break; -+ case get_status_value("error"): -+ self.incrementProperty('num_error'); -+ break; -+ case get_status_value("warning"): -+ self.incrementProperty('num_warning'); -+ break; -+ default: -+ self.incrementProperty('num_unknown'); -+ break; -+ } - }); - - var to_remove = []; -diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js -index e4830a9..cddf14e 100644 ---- a/pcsd/public/js/pcsd.js -+++ b/pcsd/public/js/pcsd.js -@@ -1850,10 +1850,10 @@ function get_status_value(status) { - standby: 2, - "partially running": 2, - disabled: 3, -- unknown: 3, -- ok: 4, -- running: 4, -- online: 4 -+ unknown: 4, -+ ok: 5, -+ running: 5, -+ online: 5 - }; - return ((values.hasOwnProperty(status)) ? values[status] : -1); - } -@@ -1866,11 +1866,25 @@ function status_comparator(a,b) { - return valA - valB; - } - -+function get_status_icon_class(status_val) { -+ switch (status_val) { -+ case get_status_value("error"): -+ return "error"; -+ case get_status_value("disabled"): -+ case get_status_value("warning"): -+ return "warning"; -+ case get_status_value("ok"): -+ return "check"; -+ default: -+ return "x"; -+ } -+} -+ - function get_status_color(status_val) { - if (status_val == get_status_value("ok")) { - return "green"; - } -- else if (status_val == get_status_value("warning") || status_val == get_status_value("unknown")) { -+ else if (status_val == get_status_value("warning") || status_val == get_status_value("unknown") || status_val == get_status_value('disabled')) { - return "orange"; - } - return "red"; -diff --git a/pcsd/views/_cluster_list.erb b/pcsd/views/_cluster_list.erb -index 9d719e0..90f084e 100644 ---- a/pcsd/views/_cluster_list.erb -+++ b/pcsd/views/_cluster_list.erb -@@ -22,7 +22,7 @@ - {{/if}} - - {{#each Pcs.clusterController.cluster_list }} -- -+ - - - -@@ -42,7 +42,7 @@ - {{else}} - {{nodes.length}} - {{#if nodes_failed}} -- |
{{nodes_failed}}
-+ |
{{nodes_failed}}
- {{/if}} - {{/if}} - -@@ -52,7 +52,7 @@ - {{else}} - {{resource_list.length}} - {{#if resources_failed}} -- |
{{resources_failed}}
-+ |
{{resources_failed}}
- {{/if}} - {{/if}} - -diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb -index bb4e989..b24c74a 100644 ---- a/pcsd/views/main.erb -+++ b/pcsd/views/main.erb -@@ -151,7 +151,7 @@ - - - --
-+ {{{resource.status_icon}}} - - {{{resource.show_status}}} - -diff --git a/pcsd/views/manage.erb b/pcsd/views/manage.erb -index 79a8637..3620779 100644 ---- a/pcsd/views/manage.erb -+++ b/pcsd/views/manage.erb -@@ -42,131 +42,132 @@ -
INFORMATION ABOUT CLUSTERS
- -
--
Select a cluster to view more detailed cluster information
-- {{#each Pcs.clusterController.cluster_list}} -- -+ {{else}} -+
Select a cluster to view more detailed cluster information
-+ {{/if}} -
- - --- -1.9.1 - diff --git a/SOURCES/bz1158569-01-fixed-a-typo-in-an-error-message.patch b/SOURCES/bz1158569-01-fixed-a-typo-in-an-error-message.patch deleted file mode 100644 index 3389bd3..0000000 --- a/SOURCES/bz1158569-01-fixed-a-typo-in-an-error-message.patch +++ /dev/null @@ -1,25 +0,0 @@ -From 1307ccbf977dd4ca797a82312631afae03530fbb Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Tue, 8 Sep 2015 09:19:10 +0200 -Subject: [PATCH] fixed a typo in an error message - ---- - pcsd/remote.rb | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index 06947ec..8a71000 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -2127,7 +2127,7 @@ def fix_auth_of_cluster(params, request, session) - tokens_data, true - ) - if retval == 404 -- return [400, "Old version of PCS/PCSD is runnig on cluster nodes. Fixing authentication is not supported. Use 'pcs cluster auth' command to authenticate the nodes."] -+ return [400, "Old version of PCS/PCSD is running on cluster nodes. Fixing authentication is not supported. Use 'pcs cluster auth' command to authenticate the nodes."] - elsif retval != 200 - return [400, "Authentication failed."] - end --- -1.9.1 - diff --git a/SOURCES/bz1158569-02-fix-authentication-in-web-UI.patch b/SOURCES/bz1158569-02-fix-authentication-in-web-UI.patch deleted file mode 100644 index 06f1040..0000000 --- a/SOURCES/bz1158569-02-fix-authentication-in-web-UI.patch +++ /dev/null @@ -1,125 +0,0 @@ -From 0b12b5e6212b42a3128d30dbce9371ac361dd865 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Tue, 15 Sep 2015 16:30:23 +0200 -Subject: [PATCH] fix authentication in web UI - ---- - pcsd/public/js/pcsd.js | 10 ++++---- - pcsd/remote.rb | 62 +++++++++++++++++++++++++++++++------------------- - 2 files changed, 45 insertions(+), 27 deletions(-) - -diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js -index 197cdd1..e4830a9 100644 ---- a/pcsd/public/js/pcsd.js -+++ b/pcsd/public/js/pcsd.js -@@ -719,7 +719,7 @@ function auth_nodes(dialog) { - $("#auth_failed_error_msg").hide(); - $.ajax({ - type: 'POST', -- url: '/remote/auth_nodes', -+ url: '/remote/auth_gui_against_nodes', - data: dialog.find("#auth_nodes_form").serialize(), - timeout: pcs_timeout, - success: function (data) { -@@ -735,9 +735,11 @@ function auth_nodes(dialog) { - function auth_nodes_dialog_update(dialog_obj, data) { - var unauth_nodes = []; - var node; -- for (node in data) { -- if (data[node] != 0) { -- unauth_nodes.push(node); -+ if (data['node_auth_error']) { -+ for (node in data['node_auth_error']) { -+ if (data['node_auth_error'][node] != 0) { -+ unauth_nodes.push(node); -+ } - } - } - -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index 8a71000..e65c8ac 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -60,7 +60,7 @@ def remote(params, request, session) - :cluster_destroy => method(:cluster_destroy), - :get_wizard => method(:get_wizard), - :wizard_submit => method(:wizard_submit), -- :auth_nodes => method(:auth_nodes), -+ :auth_gui_against_nodes => method(:auth_gui_against_nodes), - :get_tokens => method(:get_tokens), - :get_cluster_tokens => method(:get_cluster_tokens), - :save_tokens => method(:save_tokens), -@@ -1994,32 +1994,48 @@ def wizard_submit(params, request, session) - - end - --def auth_nodes(params, request, session) -- retval = {} -- params.each{|node| -- if node[0].end_with?"-pass" and node[0].length > 5 -- nodename = node[0][0..-6] -- if params.has_key?("all") -- pass = params["pass-all"] -- else -- pass = node[1] -- end -- result, sync_successful, _, _ = pcs_auth( -- session, [nodename], SUPERUSER, pass, true, true -- ) -- if not sync_successful -- retval[nodename] = 1 -- else -- node_status = result[nodename]['status'] -- if 'ok' == node_status or 'already_authorized' == node_status -- retval[nodename] = 0 -+def auth_gui_against_nodes(params, request, session) -+ node_auth_error = {} -+ new_tokens = {} -+ threads = [] -+ params.each { |node| -+ threads << Thread.new { -+ if node[0].end_with?("-pass") and node[0].length > 5 -+ nodename = node[0][0..-6] -+ if params.has_key?("all") -+ pass = params["pass-all"] - else -- retval[nodename] = 1 -+ pass = node[1] -+ end -+ data = { -+ 'node-0' => nodename, -+ 'username' => SUPERUSER, -+ 'password' => pass, -+ 'force' => 1, -+ } -+ node_auth_error[nodename] = 1 -+ code, response = send_request(session, nodename, 'auth', true, data) -+ if 200 == code -+ token = response.strip -+ if not token.empty? -+ new_tokens[nodename] = token -+ node_auth_error[nodename] = 0 -+ end - end - end -- end -+ } - } -- return [200, JSON.generate(retval)] -+ threads.each { |t| t.join } -+ -+ if not new_tokens.empty? -+ cluster_nodes = get_corosync_nodes() -+ tokens_cfg = Cfgsync::PcsdTokens.from_file('') -+ sync_successful, sync_responses = Cfgsync::save_sync_new_tokens( -+ tokens_cfg, new_tokens, cluster_nodes, $cluster_name -+ ) -+ end -+ -+ return [200, JSON.generate({'node_auth_error' => node_auth_error})] - end - - # not used anymore, left here for backward compatability reasons --- -1.9.1 - diff --git a/SOURCES/bz1158571-01-web-UI-mark-unsaved-permissions-forms.patch b/SOURCES/bz1158571-01-web-UI-mark-unsaved-permissions-forms.patch deleted file mode 100644 index 0d9637f..0000000 --- a/SOURCES/bz1158571-01-web-UI-mark-unsaved-permissions-forms.patch +++ /dev/null @@ -1,130 +0,0 @@ -From 5c62afc314bfbff55e36c0f7f8e9aec0cc9246c4 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Wed, 2 Sep 2015 14:04:55 +0200 -Subject: [PATCH] web UI: mark unsaved permissions forms - ---- - pcsd/public/js/pcsd.js | 36 ++++++++++++++++++++++++++++++++++++ - pcsd/views/_permissions_cluster.erb | 5 ++++- - pcsd/views/permissions.erb | 8 +++++++- - 3 files changed, 47 insertions(+), 2 deletions(-) - -diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js -index 2c71e6b..879b533 100644 ---- a/pcsd/public/js/pcsd.js -+++ b/pcsd/public/js/pcsd.js -@@ -2205,6 +2205,7 @@ function permissions_load_cluster(cluster_name, callback) { - $("#" + element_id + " :checkbox").each(function(key, checkbox) { - permissions_fix_dependent_checkboxes(checkbox); - }); -+ permissions_cluster_dirty_flag(cluster_name, false); - if (callback) { - callback(); - } -@@ -2259,8 +2260,27 @@ function permissions_save_cluster(form) { - }); - } - -+function permissions_cluster_dirty_flag(cluster_name, flag) { -+ var cluster_row = permissions_get_cluster_row(cluster_name); -+ if (cluster_row) { -+ var dirty_elem = cluster_row.find("span[class=unsaved_changes]"); -+ if (dirty_elem) { -+ if (flag) { -+ dirty_elem.show(); -+ } -+ else { -+ dirty_elem.hide(); -+ } -+ } -+ } -+} -+ - function permission_remove_row(button) { -+ var cluster_name = permissions_get_clustername( -+ $(button).parents("form").first() -+ ); - $(button).parent().parent().remove(); -+ permissions_cluster_dirty_flag(cluster_name, true); - } - - function permissions_add_row(template_row) { -@@ -2268,6 +2288,9 @@ function permissions_add_row(template_row) { - var user_type = permissions_get_row_type(template_row); - var max_key = -1; - var exists = false; -+ var cluster_name = permissions_get_clustername( -+ $(template_row).parents("form").first() -+ ); - - if("" == user_name) { - alert("Please enter the name"); -@@ -2326,6 +2349,8 @@ function permissions_add_row(template_row) { - template_inputs.removeAttr("checked").removeAttr("selected"); - template_inputs.removeAttr("disabled").removeAttr("readonly"); - $(template_row).find(":input[type=text]").val(""); -+ -+ permissions_cluster_dirty_flag(cluster_name, true); - } - - function permissions_get_dependent_checkboxes(checkbox) { -@@ -2400,3 +2425,14 @@ function permissions_get_checkbox_permission(checkbox) { - return ""; - } - -+function permissions_get_cluster_row(cluster_name) { -+ var cluster_row = null; -+ $('#cluster_list td[class=node_name]').each(function(index, elem) { -+ var jq_elem = $(elem); -+ if (jq_elem.text().trim() == cluster_name.trim()) { -+ cluster_row = jq_elem.parents("tr").first(); -+ } -+ }); -+ return cluster_row; -+} -+ -diff --git a/pcsd/views/_permissions_cluster.erb b/pcsd/views/_permissions_cluster.erb -index 232a5de..4048366 100644 ---- a/pcsd/views/_permissions_cluster.erb -+++ b/pcsd/views/_permissions_cluster.erb -@@ -58,7 +58,10 @@ - <% if user['allow'].include?(perm['code']) %> - checked="checked" - <% end %> -- onchange="permissions_fix_dependent_checkboxes(this);" -+ onchange=" -+ permissions_fix_dependent_checkboxes(this); -+ permissions_cluster_dirty_flag('<%= h(@cluster_name) %>', true); -+ " - > - - <% } %> -diff --git a/pcsd/views/permissions.erb b/pcsd/views/permissions.erb -index b02d9d3..1e38d7e 100644 ---- a/pcsd/views/permissions.erb -+++ b/pcsd/views/permissions.erb -@@ -16,7 +16,8 @@ - - - -- -+ -+ - - - <% @clusters.each do |c| %> -@@ -28,6 +29,11 @@ - -+ - --- -1.9.1 - diff --git a/SOURCES/bz1158571-02-check-and-refresh-user-auth-info-upon-each-request.patch b/SOURCES/bz1158571-02-check-and-refresh-user-auth-info-upon-each-request.patch deleted file mode 100644 index 569ebd8..0000000 --- a/SOURCES/bz1158571-02-check-and-refresh-user-auth-info-upon-each-request.patch +++ /dev/null @@ -1,89 +0,0 @@ -From 00ef3951514889791a11318124c271309d8b4958 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Fri, 4 Sep 2015 16:01:00 +0200 -Subject: [PATCH] check and refresh user auth info upon each request - ---- - pcs/cluster.py | 2 ++ - pcs/utils.py | 2 ++ - pcsd/auth.rb | 16 ++++++++++++---- - pcsd/test/test_auth.rb | 1 + - 4 files changed, 17 insertions(+), 4 deletions(-) - -diff --git a/pcs/cluster.py b/pcs/cluster.py -index d2a80a8..5a2128a 100644 ---- a/pcs/cluster.py -+++ b/pcs/cluster.py -@@ -235,6 +235,8 @@ def auth_nodes_do(nodes, username, password, force, local): - 'local': local, - } - output, retval = utils.run_pcsdcli('auth', pcsd_data) -+ if retval == 0 and output['status'] == 'access_denied': -+ utils.err('Access denied') - if retval == 0 and output['status'] == 'ok' and output['data']: - failed = False - try: -diff --git a/pcs/utils.py b/pcs/utils.py -index c91b50e..757c159 100644 ---- a/pcs/utils.py -+++ b/pcs/utils.py -@@ -803,6 +803,8 @@ def call_local_pcsd(argv, interactive_auth=False, std_in=None): - return [['Unable to communicate with pcsd'], 1, '', ''] - if output_json['status'] == 'bad_command': - return [['Command not allowed'], 1, '', ''] -+ if output_json['status'] == 'access_denied': -+ return [['Access denied'], 1, '', ''] - if output_json['status'] != "ok" or not output_json["data"]: - return [['Unable to communicate with pcsd'], 1, '', ''] - try: -diff --git a/pcsd/auth.rb b/pcsd/auth.rb -index 22d7868..53712ed 100644 ---- a/pcsd/auth.rb -+++ b/pcsd/auth.rb -@@ -19,7 +19,7 @@ class PCSAuth - - def self.validUser(username, password, generate_token = false) - $logger.info("Attempting login by '#{username}'") -- if not Rpam.auth(username,password, :service => "pcsd") -+ if not Rpam.auth(username, password, :service => "pcsd") - $logger.info("Failed login by '#{username}' (bad username or password)") - return nil - end -@@ -59,7 +59,7 @@ class PCSAuth - return [true, stdout.join(' ').split(nil)] - end - -- def self.isUserAllowedToLogin(username) -+ def self.isUserAllowedToLogin(username, log_success=true) - success, groups = getUsersGroups(username) - if not success - $logger.info( -@@ -73,7 +73,9 @@ class PCSAuth - ) - return false - end -- $logger.info("Successful login by '#{username}'") -+ if log_success -+ $logger.info("Successful login by '#{username}'") -+ end - return true - end - -@@ -131,7 +133,13 @@ class PCSAuth - end - - def self.isLoggedIn(session) -- return session[:username] != nil -+ username = session[:username] -+ if (username != nil) and isUserAllowedToLogin(username, false) -+ success, groups = getUsersGroups(username) -+ session[:usergroups] = success ? groups : [] -+ return true -+ end -+ return false - end - - def self.getSuperuserSession() --- -1.9.1 - diff --git a/SOURCES/bz1158571-03-fix-checking-user-s-group-membership.patch b/SOURCES/bz1158571-03-fix-checking-user-s-group-membership.patch deleted file mode 100644 index 8815b8f..0000000 --- a/SOURCES/bz1158571-03-fix-checking-user-s-group-membership.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 25a4636078b869779cc6adfac3368a9fc382496d Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Mon, 7 Sep 2015 16:42:02 +0200 -Subject: [PATCH] fix checking user's group membership - ---- - pcsd/pcsd.rb | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - -diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb -index da47fb2..9a07ee8 100644 ---- a/pcsd/pcsd.rb -+++ b/pcsd/pcsd.rb -@@ -120,8 +120,7 @@ $thread_cfgsync = Thread.new { - - helpers do - def protected! -- PCSAuth.loginByToken(session, cookies) if not PCSAuth.isLoggedIn(session) -- if not PCSAuth.isLoggedIn(session) -+ if not PCSAuth.loginByToken(session, cookies) and not PCSAuth.isLoggedIn(session) - # If we're on /managec//main we redirect - match_expr = "/managec/(.*)/(.*)" - mymatch = request.path.match(match_expr) --- -1.9.1 - diff --git a/SOURCES/bz1158577-01-improve-logging-in-pcsd.patch b/SOURCES/bz1158577-01-improve-logging-in-pcsd.patch deleted file mode 100644 index 0ef49d8..0000000 --- a/SOURCES/bz1158577-01-improve-logging-in-pcsd.patch +++ /dev/null @@ -1,24 +0,0 @@ -From df10fbfd2673523f4cadac4be64cdf97ec9aba6c Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Wed, 12 Aug 2015 15:47:09 +0200 -Subject: [PATCH] improve logging in pcsd - ---- - pcsd/pcs.rb | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb -index 6c7661a..1cddca8 100644 ---- a/pcsd/pcs.rb -+++ b/pcsd/pcs.rb -@@ -359,6 +359,7 @@ def send_request_with_token(session, node, request, post=false, data={}, remote= - token = additional_tokens[node] || get_node_token(node) - $logger.info "SRWT Node: #{node} Request: #{request}" - if not token -+ $logger.error "Unable to connect to node #{node}, no token available" - return 400,'{"notoken":true}' - end - cookies_data = { --- -1.9.1 - diff --git a/SOURCES/bz1158577-02-fix-certificates-syncing.patch b/SOURCES/bz1158577-02-fix-certificates-syncing.patch deleted file mode 100644 index 21faec1..0000000 --- a/SOURCES/bz1158577-02-fix-certificates-syncing.patch +++ /dev/null @@ -1,554 +0,0 @@ -From 8363f06e73bba0a1d3f7d18cf5b1cde5b5080141 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Thu, 27 Aug 2015 14:29:21 +0200 -Subject: [PATCH] fix certificates syncing - ---- - pcs/cluster.py | 16 +++--- - pcs/pcsd.py | 107 ++++++++++++++++++++++++++-------------- - pcs/utils.py | 29 +++++++++++ - pcsd/pcs.rb | 153 ++++++++++++++++++++++++++++++++++++++++++++++++++++----- - pcsd/pcsd.rb | 12 ++++- - pcsd/remote.rb | 12 +++-- - pcsd/ssl.rb | 26 ++++++++-- - 7 files changed, 292 insertions(+), 63 deletions(-) - -diff --git a/pcs/cluster.py b/pcs/cluster.py -index c982ffe..d2a80a8 100644 ---- a/pcs/cluster.py -+++ b/pcs/cluster.py -@@ -345,13 +345,13 @@ def corosync_setup(argv,returnConfig=False): - sync_start(argv, primary_nodes) - if "--enable" in utils.pcs_options: - enable_cluster(primary_nodes) -- pcsd.pcsd_sync_certs([]) -+ pcsd.pcsd_sync_certs([], exit_after_error=False) - return - elif not returnConfig and not "--local" in utils.pcs_options:# and fedora_config: - sync(argv, primary_nodes) - if "--enable" in utils.pcs_options: - enable_cluster(primary_nodes) -- pcsd.pcsd_sync_certs([]) -+ pcsd.pcsd_sync_certs([], exit_after_error=False) - return - else: - nodes = argv[1:] -@@ -1190,15 +1190,17 @@ def cluster_node(argv): - - utils.setCorosyncConfig(node0, corosync_conf) - if "--enable" in utils.pcs_options: -- utils.enableCluster(node0) -+ retval, err = utils.enableCluster(node0) -+ if retval != 0: -+ print("Warning: enable cluster - {0}".format(err)) - if "--start" in utils.pcs_options or utils.is_rhel6(): - # always start new node on cman cluster - # otherwise it will get fenced -- utils.startCluster(node0) -+ retval, err = utils.startCluster(node0) -+ if retval != 0: -+ print("Warning: start cluster - {0}".format(err)) - -- pcsd_data = {'nodes': [node0]} -- utils.run_pcsdcli('send_local_certs', pcsd_data) -- utils.run_pcsdcli('pcsd_restart_nodes', pcsd_data) -+ pcsd.pcsd_sync_certs([node0], exit_after_error=False) - else: - utils.err("Unable to update any nodes") - output, retval = utils.reloadCorosync() -diff --git a/pcs/pcsd.py b/pcs/pcsd.py -index 6002c1a..b1b6be6 100644 ---- a/pcs/pcsd.py -+++ b/pcs/pcsd.py -@@ -36,14 +36,15 @@ def pcsd_certkey(argv): - try: - with open(certfile, 'r') as myfile: - cert = myfile.read() -- except IOError as e: -- utils.err(e) -- -- try: - with open(keyfile, 'r') as myfile: - key = myfile.read() - except IOError as e: - utils.err(e) -+ errors = utils.verify_cert_key_pair(cert, key) -+ if errors: -+ for err in errors: -+ utils.err(err, False) -+ sys.exit(1) - - if not "--force" in utils.pcs_options and (os.path.exists(settings.pcsd_cert_location) or os.path.exists(settings.pcsd_key_location)): - utils.err("certificate and/or key already exists, your must use --force to overwrite") -@@ -70,39 +71,71 @@ def pcsd_certkey(argv): - - print "Certificate and key updated, you may need to restart pcsd (service pcsd restart) for new settings to take effect" - --def pcsd_sync_certs(argv): -- nodes = utils.getNodesFromCorosyncConf() -- pcsd_data = {'nodes': nodes} -- commands = [ -- { -- "command": "send_local_certs", -- "message": "Synchronizing pcsd certificates on nodes {0}.".format( -- ", ".join(nodes) -- ), -- }, -- { -- "command": "pcsd_restart_nodes", -- "message": "Restaring pcsd on the nodes in order to reload " -- + "the certificates." -- , -- }, -- ] -- for cmd in commands: -- error = '' -- print cmd["message"] -- output, retval = utils.run_pcsdcli(cmd["command"], pcsd_data) -- if retval == 0 and output['status'] == 'ok' and output['data']: -- try: -- if output['data']['status'] != 'ok' and output['data']['text']: -- error = output['data']['text'] -- except KeyError: -- error = 'Unable to communicate with pcsd' -- else: -- error = 'Unable to sync pcsd certificates' -- if error: -- # restart pcsd even if sync failed in order to reload -- # the certificates on nodes where it succeded -- utils.err(error, False) -+def pcsd_sync_certs(argv, exit_after_error=True): -+ error = False -+ nodes_sync = argv if argv else utils.getNodesFromCorosyncConf() -+ nodes_restart = [] -+ -+ print("Synchronizing pcsd certificates on nodes {0}...".format( -+ ", ".join(nodes_sync) -+ )) -+ pcsd_data = { -+ "nodes": nodes_sync, -+ } -+ output, retval = utils.run_pcsdcli("send_local_certs", pcsd_data) -+ if retval == 0 and output["status"] == "ok" and output["data"]: -+ try: -+ sync_result = output["data"] -+ if sync_result["node_status"]: -+ for node, status in sync_result["node_status"].items(): -+ print("{0}: {1}".format(node, status["text"])) -+ if status["status"] == "ok": -+ nodes_restart.append(node) -+ else: -+ error = True -+ if sync_result["status"] != "ok": -+ error = True -+ utils.err(sync_result["text"], False) -+ if error and not nodes_restart: -+ if exit_after_error: -+ sys.exit(1) -+ else: -+ return -+ print -+ except (KeyError, AttributeError): -+ utils.err("Unable to communicate with pcsd", exit_after_error) -+ return -+ else: -+ utils.err("Unable to sync pcsd certificates", exit_after_error) -+ return -+ -+ print("Restaring pcsd on the nodes in order to reload the certificates...") -+ pcsd_data = { -+ "nodes": nodes_restart, -+ } -+ output, retval = utils.run_pcsdcli("pcsd_restart_nodes", pcsd_data) -+ if retval == 0 and output["status"] == "ok" and output["data"]: -+ try: -+ restart_result = output["data"] -+ if restart_result["node_status"]: -+ for node, status in restart_result["node_status"].items(): -+ print("{0}: {1}".format(node, status["text"])) -+ if status["status"] != "ok": -+ error = True -+ if restart_result["status"] != "ok": -+ error = True -+ utils.err(restart_result["text"], False) -+ if error: -+ if exit_after_error: -+ sys.exit(1) -+ else: -+ return -+ except (KeyError, AttributeError): -+ utils.err("Unable to communicate with pcsd", exit_after_error) -+ return -+ else: -+ utils.err("Unable to restart pcsd", exit_after_error) -+ return - - def pcsd_clear_auth(argv): - output = [] -diff --git a/pcs/utils.py b/pcs/utils.py -index 761723b..c91b50e 100644 ---- a/pcs/utils.py -+++ b/pcs/utils.py -@@ -1880,6 +1880,35 @@ def is_iso8601_date(var): - output, retVal = run(["iso8601", "-d", var]) - return retVal == 0 - -+def verify_cert_key_pair(cert, key): -+ errors = [] -+ cert_modulus = "" -+ key_modulus = "" -+ -+ output, retval = run( -+ ["/usr/bin/openssl", "x509", "-modulus", "-noout"], -+ string_for_stdin=cert -+ ) -+ if retval != 0: -+ errors.append("Invalid certificate: {0}".format(output.strip())) -+ else: -+ cert_modulus = output.strip() -+ -+ output, retval = run( -+ ["/usr/bin/openssl", "rsa", "-modulus", "-noout"], -+ string_for_stdin=key -+ ) -+ if retval != 0: -+ errors.append("Invalid key: {0}".format(output.strip())) -+ else: -+ key_modulus = output.strip() -+ -+ if not errors and cert_modulus and key_modulus: -+ if cert_modulus != key_modulus: -+ errors.append("Certificate does not match the key") -+ -+ return errors -+ - # Does pacemaker consider a variable as true in cib? - # See crm_is_true in pacemaker/lib/common/utils.c - def is_cib_true(var): -diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb -index 1cddca8..37f6b83 100644 ---- a/pcsd/pcs.rb -+++ b/pcsd/pcs.rb -@@ -1215,29 +1215,84 @@ def send_local_configs_to_nodes( - end - - def send_local_certs_to_nodes(session, nodes) -- data = { -- 'ssl_cert' => File.read(CRT_FILE), -- 'ssl_key' => File.read(KEY_FILE), -- 'cookie_secret' => File.read(COOKIE_FILE), -- } -+ begin -+ data = { -+ 'ssl_cert' => File.read(CRT_FILE), -+ 'ssl_key' => File.read(KEY_FILE), -+ 'cookie_secret' => File.read(COOKIE_FILE), -+ } -+ rescue => e -+ return { -+ 'status' => 'error', -+ 'text' => "Unable to read certificates: #{e}", -+ 'node_status' => {}, -+ } -+ end -+ -+ crt_errors = verify_cert_key_pair(data['ssl_cert'], data['ssl_key']) -+ if crt_errors and not crt_errors.empty? -+ return { -+ 'status' => 'error', -+ 'text' => "Invalid certificate and/or key: #{crt_errors.join}", -+ 'node_status' => {}, -+ } -+ end -+ secret_errors = verify_cookie_secret(data['cookie_secret']) -+ if secret_errors and not secret_errors.empty? -+ return { -+ 'status' => 'error', -+ 'text' => "Invalid cookie secret: #{secret_errors.join}", -+ 'node_status' => {}, -+ } -+ end -+ - node_response = {} - threads = [] - nodes.each { |node| - threads << Thread.new { -- code, _ = send_request_with_token(session, node, '/set_certs', true, data) -- node_response[node] = 200 == code ? 'ok' : 'error' -+ code, response = send_request_with_token( -+ session, node, '/set_certs', true, data -+ ) -+ node_response[node] = [code, response] - } - } - threads.each { |t| t.join } - - node_error = [] -+ node_status = {} - node_response.each { |node, response| -- node_error << node if response != 'ok' -+ if response[0] == 200 -+ node_status[node] = { -+ 'status' => 'ok', -+ 'text' => 'Success', -+ } -+ else -+ text = response[1] -+ if response[0] == 401 -+ text = "Unable to authenticate, try running 'pcs cluster auth'" -+ elsif response[0] == 400 -+ begin -+ parsed_response = JSON.parse(response[1], {:symbolize_names => true}) -+ if parsed_response[:noresponse] -+ text = "Unable to connect" -+ elsif parsed_response[:notoken] or parsed_response[:notauthorized] -+ text = "Unable to authenticate, try running 'pcs cluster auth'" -+ end -+ rescue JSON::ParserError -+ end -+ end -+ node_status[node] = { -+ 'status' => 'error', -+ 'text' => text -+ } -+ node_error << node -+ end - } - return { - 'status' => node_error.empty?() ? 'ok' : 'error', - 'text' => node_error.empty?() ? 'Success' : \ - "Unable to save pcsd certificates to nodes: #{node_error.join(', ')}", -+ 'node_status' => node_status, - } - end - -@@ -1246,20 +1301,49 @@ def pcsd_restart_nodes(session, nodes) - threads = [] - nodes.each { |node| - threads << Thread.new { -- code, _ = send_request_with_token(session, node, '/pcsd_restart', true) -- node_response[node] = 200 == code ? 'ok' : 'error' -+ code, response = send_request_with_token( -+ session, node, '/pcsd_restart', true -+ ) -+ node_response[node] = [code, response] - } - } - threads.each { |t| t.join } - - node_error = [] -+ node_status = {} - node_response.each { |node, response| -- node_error << node if response != 'ok' -+ if response[0] == 200 -+ node_status[node] = { -+ 'status' => 'ok', -+ 'text' => 'Success', -+ } -+ else -+ text = response[1] -+ if response[0] == 401 -+ text = "Unable to authenticate, try running 'pcs cluster auth'" -+ elsif response[0] == 400 -+ begin -+ parsed_response = JSON.parse(response[1], {:symbolize_names => true}) -+ if parsed_response[:noresponse] -+ text = "Unable to connect" -+ elsif parsed_response[:notoken] or parsed_response[:notauthorized] -+ text = "Unable to authenticate, try running 'pcs cluster auth'" -+ end -+ rescue JSON::ParserError -+ end -+ end -+ node_status[node] = { -+ 'status' => 'error', -+ 'text' => text -+ } -+ node_error << node -+ end - } - return { - 'status' => node_error.empty?() ? 'ok' : 'error', - 'text' => node_error.empty?() ? 'Success' : \ - "Unable to restart pcsd on nodes: #{node_error.join(', ')}", -+ 'node_status' => node_status, - } - end - -@@ -1280,6 +1364,53 @@ def write_file_lock(path, perm, data) - end - end - -+def verify_cert_key_pair(cert, key) -+ errors = [] -+ cert_modulus = nil -+ key_modulus = nil -+ -+ stdout, stderr, retval = run_cmd_options( -+ PCSAuth.getSuperuserSession(), -+ { -+ 'stdin' => cert, -+ }, -+ '/usr/bin/openssl', 'x509', '-modulus', '-noout' -+ ) -+ if retval != 0 -+ errors << "Invalid certificate: #{stderr.join}" -+ else -+ cert_modulus = stdout.join.strip -+ end -+ -+ stdout, stderr, retval = run_cmd_options( -+ PCSAuth.getSuperuserSession(), -+ { -+ 'stdin' => key, -+ }, -+ '/usr/bin/openssl', 'rsa', '-modulus', '-noout' -+ ) -+ if retval != 0 -+ errors << "Invalid key: #{stderr.join}" -+ else -+ key_modulus = stdout.join.strip -+ end -+ -+ if errors.empty? and cert_modulus and key_modulus -+ if cert_modulus != key_modulus -+ errors << 'Certificate does not match the key' -+ end -+ end -+ -+ return errors -+end -+ -+def verify_cookie_secret(secret) -+ if secret.empty? -+ return ['Cookie secret is empty'] -+ end -+ return [] -+end -+ - def cluster_status_from_nodes(session, cluster_nodes, cluster_name) - node_map = {} - forbidden_nodes = {} -diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb -index 1f26fe5..da47fb2 100644 ---- a/pcsd/pcsd.rb -+++ b/pcsd/pcsd.rb -@@ -25,10 +25,20 @@ Dir["wizards/*.rb"].each {|file| require file} - - use Rack::CommonLogger - -+def generate_cookie_secret -+ return SecureRandom.hex(30) -+end -+ - begin - secret = File.read(COOKIE_FILE) -+ secret_errors = verify_cookie_secret(secret) -+ if secret_errors and not secret_errors.empty? -+ secret_errors.each { |err| $logger.error err } -+ $logger.error "Invalid cookie secret, using temporary one" -+ secret = generate_cookie_secret() -+ end - rescue Errno::ENOENT -- secret = SecureRandom.hex(30) -+ secret = generate_cookie_secret() - File.open(COOKIE_FILE, 'w', 0700) {|f| f.write(secret)} - end - -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index 4655756..22af38a 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -584,15 +584,19 @@ def set_certs(params, request, session) - return [400, 'cannot save ssl key without ssl certificate'] - end - if !ssl_cert.empty? and !ssl_key.empty? -+ ssl_errors = verify_cert_key_pair(ssl_cert, ssl_key) -+ if ssl_errors and !ssl_errors.empty? -+ return [400, ssl_errors.join] -+ end - begin - write_file_lock(CRT_FILE, 0700, ssl_cert) - write_file_lock(KEY_FILE, 0700, ssl_key) -- rescue -+ rescue => e - # clean the files if we ended in the middle - # the files will be regenerated on next pcsd start - FileUtils.rm(CRT_FILE, {:force => true}) - FileUtils.rm(KEY_FILE, {:force => true}) -- return [400, 'cannot save ssl files'] -+ return [400, "cannot save ssl files: #{e}"] - end - end - -@@ -601,8 +605,8 @@ def set_certs(params, request, session) - if !cookie_secret.empty? - begin - write_file_lock(COOKIE_FILE, 0700, cookie_secret) -- rescue -- return [400, 'cannot save cookie secret'] -+ rescue => e -+ return [400, "cannot save cookie secret: #{e}"] - end - end - end -diff --git a/pcsd/ssl.rb b/pcsd/ssl.rb -index 02372f6..e948aef 100644 ---- a/pcsd/ssl.rb -+++ b/pcsd/ssl.rb -@@ -5,10 +5,12 @@ require 'openssl' - require 'rack' - - require 'bootstrap.rb' -+require 'pcs.rb' - - server_name = WEBrick::Utils::getservername -+$logger = configure_logger('/var/log/pcsd/pcsd.log') - --if not File.exists?(CRT_FILE) or not File.exists?(KEY_FILE) -+def generate_cert_key_pair(server_name) - name = "/C=US/ST=MN/L=Minneapolis/O=pcsd/OU=pcsd/CN=#{server_name}" - ca = OpenSSL::X509::Name.parse(name) - key = OpenSSL::PKey::RSA.new(2048) -@@ -21,9 +23,27 @@ if not File.exists?(CRT_FILE) or not File.exists?(KEY_FILE) - crt.not_before = Time.now - crt.not_after = Time.now + 10 * 365 * 24 * 60 * 60 # 10 year - crt.sign(key, OpenSSL::Digest::SHA256.new) -+ return crt, key -+end - -+if not File.exists?(CRT_FILE) or not File.exists?(KEY_FILE) -+ crt, key = generate_cert_key_pair(server_name) - File.open(CRT_FILE, 'w',0700) {|f| f.write(crt)} - File.open(KEY_FILE, 'w',0700) {|f| f.write(key)} -+else -+ crt, key = nil, nil -+ begin -+ crt = File.read(CRT_FILE) -+ key = File.read(KEY_FILE) -+ rescue => e -+ $logger.error "Unable to read certificate or key: #{e}" -+ end -+ crt_errors = verify_cert_key_pair(crt, key) -+ if crt_errors and not crt_errors.empty? -+ crt_errors.each { |err| $logger.error err } -+ $logger.error "Invalid certificate and/or key, using temporary ones" -+ crt, key = generate_cert_key_pair(server_name) -+ end - end - - webrick_options = { -@@ -32,8 +52,8 @@ webrick_options = { - :Host => '::', - :SSLEnable => true, - :SSLVerifyClient => OpenSSL::SSL::VERIFY_NONE, -- :SSLCertificate => OpenSSL::X509::Certificate.new(File.open(CRT_FILE).read), -- :SSLPrivateKey => OpenSSL::PKey::RSA.new(File.open(KEY_FILE).read()), -+ :SSLCertificate => OpenSSL::X509::Certificate.new(crt), -+ :SSLPrivateKey => OpenSSL::PKey::RSA.new(key), - :SSLCertName => [[ "CN", server_name ]], - :SSLOptions => OpenSSL::SSL::OP_NO_SSLv2 | OpenSSL::SSL::OP_NO_SSLv3, - } --- -1.9.1 - diff --git a/SOURCES/bz1158805-01-add-support-for-qdevice-qnetd-provided-.patch b/SOURCES/bz1158805-01-add-support-for-qdevice-qnetd-provided-.patch new file mode 100644 index 0000000..4f6eaaf --- /dev/null +++ b/SOURCES/bz1158805-01-add-support-for-qdevice-qnetd-provided-.patch @@ -0,0 +1,10043 @@ +From db8643c4489274faee0bba008846a63c2ab63f46 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Wed, 15 Jun 2016 14:52:39 +0200 +Subject: [PATCH] bz1158805-01-add support for qdevice-qnetd provided by + corosync + +--- + pcs/cli/common/lib_wrapper.py | 10 + + pcs/cluster.py | 119 +- + pcs/common/report_codes.py | 31 +- + pcs/lib/commands/qdevice.py | 88 +- + pcs/lib/commands/quorum.py | 217 +- + pcs/lib/corosync/config_facade.py | 98 +- + pcs/lib/corosync/live.py | 15 + + pcs/lib/corosync/qdevice_client.py | 93 + + pcs/lib/corosync/qdevice_net.py | 314 ++- + pcs/lib/env.py | 11 +- + pcs/lib/errors.py | 6 +- + pcs/lib/external.py | 44 +- + pcs/lib/nodes_task.py | 69 +- + pcs/lib/reports.py | 225 +- + pcs/pcs.8 | 27 +- + pcs/qdevice.py | 71 + + pcs/quorum.py | 34 +- + pcs/settings_default.py | 6 +- + pcs/test/resources/qdevice-certs/qnetd-cacert.crt | 1 + + pcs/test/test_lib_commands_qdevice.py | 255 ++ + pcs/test/test_lib_commands_quorum.py | 1109 ++++++++- + pcs/test/test_lib_corosync_config_facade.py | 367 ++- + pcs/test/test_lib_corosync_live.py | 62 +- + pcs/test/test_lib_corosync_qdevice_client.py | 60 + + pcs/test/test_lib_corosync_qdevice_net.py | 965 +++++++- + pcs/test/test_lib_env.py | 142 +- + pcs/test/test_lib_external.py | 126 +- + pcs/test/test_lib_nodes_task.py | 168 +- + pcs/test/test_quorum.py | 9 +- + pcs/test/test_utils.py | 2628 +++++++++++---------- + pcs/usage.py | 53 +- + pcs/utils.py | 147 +- + pcsd/pcs.rb | 17 + + pcsd/remote.rb | 163 +- + pcsd/settings.rb | 6 + + pcsd/settings.rb.debian | 10 +- + 36 files changed, 6170 insertions(+), 1596 deletions(-) + create mode 100644 pcs/lib/corosync/qdevice_client.py + create mode 100644 pcs/test/resources/qdevice-certs/qnetd-cacert.crt + create mode 100644 pcs/test/test_lib_corosync_qdevice_client.py + +diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py +index 2ba5602..2dd5810 100644 +--- a/pcs/cli/common/lib_wrapper.py ++++ b/pcs/cli/common/lib_wrapper.py +@@ -117,6 +117,8 @@ def load_module(env, middleware_factory, name): + "get_config": quorum.get_config, + "remove_device": quorum.remove_device, + "set_options": quorum.set_options, ++ "status": quorum.status_text, ++ "status_device": quorum.status_device_text, + "update_device": quorum.update_device, + } + ) +@@ -125,6 +127,7 @@ def load_module(env, middleware_factory, name): + env, + middleware.build(), + { ++ "status": qdevice.qdevice_status_text, + "setup": qdevice.qdevice_setup, + "destroy": qdevice.qdevice_destroy, + "start": qdevice.qdevice_start, +@@ -132,6 +135,13 @@ def load_module(env, middleware_factory, name): + "kill": qdevice.qdevice_kill, + "enable": qdevice.qdevice_enable, + "disable": qdevice.qdevice_disable, ++ # following commands are internal use only, called from pcsd ++ "client_net_setup": qdevice.client_net_setup, ++ "client_net_import_certificate": ++ qdevice.client_net_import_certificate, ++ "client_net_destroy": qdevice.client_net_destroy, ++ "sign_net_cert_request": ++ qdevice.qdevice_net_sign_certificate_request, + } + ) + if name == "sbd": +diff --git a/pcs/cluster.py b/pcs/cluster.py +index 002b5c5..988ab75 100644 +--- a/pcs/cluster.py ++++ b/pcs/cluster.py +@@ -36,23 +36,29 @@ from pcs import ( + ) + from pcs.utils import parallel_for_nodes + from pcs.common import report_codes ++from pcs.cli.common.reports import process_library_reports + from pcs.lib import ( + pacemaker as lib_pacemaker, + sbd as lib_sbd, + reports as lib_reports, + ) +-from pcs.lib.tools import environment_file_to_dict ++from pcs.lib.commands.quorum import _add_device_model_net ++from pcs.lib.corosync import ( ++ config_parser as corosync_conf_utils, ++ qdevice_net, ++) ++from pcs.lib.corosync.config_facade import ConfigFacade as corosync_conf_facade ++from pcs.lib.errors import ( ++ LibraryError, ++ ReportItemSeverity, ++) + from pcs.lib.external import ( + disable_service, + NodeCommunicationException, + node_communicator_exception_to_report_item, + ) + from pcs.lib.node import NodeAddresses +-from pcs.lib.errors import ( +- LibraryError, +- ReportItemSeverity, +-) +-from pcs.lib.corosync import config_parser as corosync_conf_utils ++from pcs.lib.tools import environment_file_to_dict + + def cluster_cmd(argv): + if len(argv) == 0: +@@ -288,7 +294,7 @@ def cluster_setup(argv): + ) + if udpu_rrp and "rrp_mode" not in options["transport_options"]: + options["transport_options"]["rrp_mode"] = "passive" +- utils.process_library_reports(messages) ++ process_library_reports(messages) + + # prepare config file + if is_rhel6: +@@ -306,7 +312,7 @@ def cluster_setup(argv): + options["totem_options"], + options["quorum_options"] + ) +- utils.process_library_reports(messages) ++ process_library_reports(messages) + + # setup on the local node + if "--local" in utils.pcs_options: +@@ -870,6 +876,7 @@ def start_cluster(argv): + return + + print("Starting Cluster...") ++ service_list = [] + if utils.is_rhel6(): + # Verify that CMAN_QUORUM_TIMEOUT is set, if not, then we set it to 0 + retval, output = getstatusoutput('source /etc/sysconfig/cman ; [ -z "$CMAN_QUORUM_TIMEOUT" ]') +@@ -882,14 +889,15 @@ def start_cluster(argv): + print(output) + utils.err("unable to start cman") + else: +- output, retval = utils.run(["service", "corosync","start"]) ++ service_list.append("corosync") ++ if utils.need_to_handle_qdevice_service(): ++ service_list.append("corosync-qdevice") ++ service_list.append("pacemaker") ++ for service in service_list: ++ output, retval = utils.run(["service", service, "start"]) + if retval != 0: + print(output) +- utils.err("unable to start corosync") +- output, retval = utils.run(["service", "pacemaker", "start"]) +- if retval != 0: +- print(output) +- utils.err("unable to start pacemaker") ++ utils.err("unable to start {0}".format(service)) + if wait: + wait_for_nodes_started([], wait_timeout) + +@@ -1035,14 +1043,20 @@ def enable_cluster(argv): + enable_cluster_nodes(argv) + return + +- utils.enableServices() ++ try: ++ utils.enableServices() ++ except LibraryError as e: ++ process_library_reports(e.args) + + def disable_cluster(argv): + if len(argv) > 0: + disable_cluster_nodes(argv) + return + +- utils.disableServices() ++ try: ++ utils.disableServices() ++ except LibraryError as e: ++ process_library_reports(e.args) + + def enable_cluster_all(): + enable_cluster_nodes(utils.getNodesFromCorosyncConf()) +@@ -1132,13 +1146,18 @@ def stop_cluster_corosync(): + utils.err("unable to stop cman") + else: + print("Stopping Cluster (corosync)...") +- output, retval = utils.run(["service", "corosync","stop"]) +- if retval != 0: +- print(output) +- utils.err("unable to stop corosync") ++ service_list = [] ++ if utils.need_to_handle_qdevice_service(): ++ service_list.append("corosync-qdevice") ++ service_list.append("corosync") ++ for service in service_list: ++ output, retval = utils.run(["service", service, "stop"]) ++ if retval != 0: ++ print(output) ++ utils.err("unable to stop {0}".format(service)) + + def kill_cluster(argv): +- daemons = ["crmd", "pengine", "attrd", "lrmd", "stonithd", "cib", "pacemakerd", "corosync"] ++ daemons = ["crmd", "pengine", "attrd", "lrmd", "stonithd", "cib", "pacemakerd", "corosync-qdevice", "corosync"] + dummy_output, dummy_retval = utils.run(["killall", "-9"] + daemons) + # if dummy_retval != 0: + # print "Error: unable to execute killall -9" +@@ -1321,19 +1340,16 @@ def cluster_node(argv): + "cluster is not configured for RRP, " + "you must not specify ring 1 address for the node" + ) +- utils.check_qdevice_algorithm_and_running_cluster( +- utils.getCorosyncConf(), add=True +- ) + corosync_conf = None + (canAdd, error) = utils.canAddNodeToCluster(node0) + if not canAdd: + utils.err("Unable to add '%s' to cluster: %s" % (node0, error)) + ++ lib_env = utils.get_lib_env() ++ report_processor = lib_env.report_processor ++ node_communicator = lib_env.node_communicator() ++ node_addr = NodeAddresses(node0, node1) + try: +- node_addr = NodeAddresses(node0, node1) +- lib_env = utils.get_lib_env() +- report_processor = lib_env.report_processor +- node_communicator = lib_env.node_communicator() + if lib_sbd.is_sbd_enabled(utils.cmd_runner()): + if "--watchdog" not in utils.pcs_options: + watchdog = settings.sbd_watchdog_default +@@ -1367,9 +1383,9 @@ def cluster_node(argv): + report_processor, node_communicator, node_addr + ) + except LibraryError as e: +- utils.process_library_reports(e.args) ++ process_library_reports(e.args) + except NodeCommunicationException as e: +- utils.process_library_reports( ++ process_library_reports( + [node_communicator_exception_to_report_item(e)] + ) + +@@ -1383,6 +1399,8 @@ def cluster_node(argv): + else: + print("%s: Corosync updated" % my_node) + corosync_conf = output ++ # corosync.conf must be reloaded before the new node is started ++ output, retval = utils.reloadCorosync() + if corosync_conf != None: + # send local cluster pcsd configs to the new node + # may be used for sending corosync config as well in future +@@ -1406,6 +1424,25 @@ def cluster_node(argv): + except: + utils.err('Unable to communicate with pcsd') + ++ # set qdevice-net certificates if needed ++ if not utils.is_rhel6(): ++ try: ++ conf_facade = corosync_conf_facade.from_string( ++ corosync_conf ++ ) ++ qdevice_model, qdevice_model_options, _ = conf_facade.get_quorum_device_settings() ++ if qdevice_model == "net": ++ _add_device_model_net( ++ lib_env, ++ qdevice_model_options["host"], ++ conf_facade.get_cluster_name(), ++ [node_addr], ++ skip_offline_nodes=False ++ ) ++ except LibraryError as e: ++ process_library_reports(e.args) ++ ++ print("Setting up corosync...") + utils.setCorosyncConfig(node0, corosync_conf) + if "--enable" in utils.pcs_options: + retval, err = utils.enableCluster(node0) +@@ -1421,7 +1458,6 @@ def cluster_node(argv): + pcsd.pcsd_sync_certs([node0], exit_after_error=False) + else: + utils.err("Unable to update any nodes") +- output, retval = utils.reloadCorosync() + if utils.is_cman_with_udpu_transport(): + print("Warning: Using udpu transport on a CMAN cluster, " + + "cluster restart is required to apply node addition") +@@ -1433,9 +1469,6 @@ def cluster_node(argv): + utils.err( + "node '%s' does not appear to exist in configuration" % node0 + ) +- utils.check_qdevice_algorithm_and_running_cluster( +- utils.getCorosyncConf(), add=False +- ) + if "--force" not in utils.pcs_options: + retval, data = utils.get_remote_quorumtool_output(node0) + if retval != 0: +@@ -1697,10 +1730,18 @@ def cluster_destroy(argv): + else: + print("Shutting down pacemaker/corosync services...") + os.system("service pacemaker stop") ++ # returns error if qdevice is not running, it is safe to ignore it ++ # since we want it not to be running ++ os.system("service corosync-qdevice stop") + os.system("service corosync stop") + print("Killing any remaining services...") +- os.system("killall -q -9 corosync aisexec heartbeat pacemakerd ccm stonithd ha_logd lrmd crmd pengine attrd pingd mgmtd cib fenced dlm_controld gfs_controld") +- utils.disableServices() ++ os.system("killall -q -9 corosync corosync-qdevice aisexec heartbeat pacemakerd ccm stonithd ha_logd lrmd crmd pengine attrd pingd mgmtd cib fenced dlm_controld gfs_controld") ++ try: ++ utils.disableServices() ++ except: ++ # previously errors were suppressed in here, let's keep it that way ++ # for now ++ pass + try: + disable_service(utils.cmd_runner(), "sbd") + except: +@@ -1716,6 +1757,12 @@ def cluster_destroy(argv): + "pe*.bz2","cib.*"] + for name in state_files: + os.system("find /var/lib -name '"+name+"' -exec rm -f \{\} \;") ++ try: ++ qdevice_net.client_destroy() ++ except: ++ # errors from deleting other files are suppressed as well ++ # we do not want to fail if qdevice was not set up ++ pass + + def cluster_verify(argv): + nofilename = True +diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py +index bda982a..afe0554 100644 +--- a/pcs/common/report_codes.py ++++ b/pcs/common/report_codes.py +@@ -45,6 +45,8 @@ COROSYNC_CONFIG_RELOAD_ERROR = "COROSYNC_CONFIG_RELOAD_ERROR" + COROSYNC_NOT_RUNNING_CHECK_STARTED = "COROSYNC_NOT_RUNNING_CHECK_STARTED" + COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR = "COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR" + COROSYNC_NOT_RUNNING_ON_NODE = "COROSYNC_NOT_RUNNING_ON_NODE" ++COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE = "COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE" ++COROSYNC_QUORUM_GET_STATUS_ERROR = "COROSYNC_QUORUM_GET_STATUS_ERROR" + COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE" + CRM_MON_ERROR = "CRM_MON_ERROR" + DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST" +@@ -62,11 +64,11 @@ INVALID_SCORE = "INVALID_SCORE" + INVALID_TIMEOUT_VALUE = "INVALID_TIMEOUT_VALUE" + MULTIPLE_SCORE_OPTIONS = "MULTIPLE_SCORE_OPTIONS" + NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL = "NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL" +-NODE_COMMUNICATION_ERROR = "NODE_COMMUNICATION_ERROR", +-NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED = "NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED", +-NODE_COMMUNICATION_ERROR_PERMISSION_DENIED = "NODE_COMMUNICATION_ERROR_PERMISSION_DENIED", +-NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT = "NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT", +-NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND = "NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND", ++NODE_COMMUNICATION_ERROR = "NODE_COMMUNICATION_ERROR" ++NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED = "NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED" ++NODE_COMMUNICATION_ERROR_PERMISSION_DENIED = "NODE_COMMUNICATION_ERROR_PERMISSION_DENIED" ++NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT = "NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT" ++NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND = "NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND" + NODE_COMMUNICATION_FINISHED = "NODE_COMMUNICATION_FINISHED" + NODE_COMMUNICATION_NOT_CONNECTED = "NODE_COMMUNICATION_NOT_CONNECTED" + NODE_COMMUNICATION_STARTED = "NODE_COMMUNICATION_STARTED" +@@ -74,16 +76,25 @@ NODE_NOT_FOUND = "NODE_NOT_FOUND" + NON_UDP_TRANSPORT_ADDR_MISMATCH = 'NON_UDP_TRANSPORT_ADDR_MISMATCH' + OMITTING_NODE = "OMITTING_NODE" + PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND = "PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND" +-PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE", +-PARSE_ERROR_COROSYNC_CONF = "PARSE_ERROR_COROSYNC_CONF", +-PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE", ++PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE" ++PARSE_ERROR_COROSYNC_CONF = "PARSE_ERROR_COROSYNC_CONF" ++PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE" + QDEVICE_ALREADY_DEFINED = "QDEVICE_ALREADY_DEFINED" + QDEVICE_ALREADY_INITIALIZED = "QDEVICE_ALREADY_INITIALIZED" ++QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE = "QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE" ++QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED = "QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED" ++QDEVICE_CERTIFICATE_REMOVAL_STARTED = "QDEVICE_CERTIFICATE_REMOVAL_STARTED" ++QDEVICE_CERTIFICATE_REMOVED_FROM_NODE = "QDEVICE_CERTIFICATE_REMOVED_FROM_NODE" ++QDEVICE_CERTIFICATE_IMPORT_ERROR = "QDEVICE_CERTIFICATE_IMPORT_ERROR" ++QDEVICE_CERTIFICATE_SIGN_ERROR = "QDEVICE_CERTIFICATE_SIGN_ERROR" + QDEVICE_DESTROY_ERROR = "QDEVICE_DESTROY_ERROR" + QDEVICE_DESTROY_SUCCESS = "QDEVICE_DESTROY_SUCCESS" ++QDEVICE_GET_STATUS_ERROR = "QDEVICE_GET_STATUS_ERROR" + QDEVICE_INITIALIZATION_ERROR = "QDEVICE_INITIALIZATION_ERROR" + QDEVICE_INITIALIZATION_SUCCESS = "QDEVICE_INITIALIZATION_SUCCESS" + QDEVICE_NOT_DEFINED = "QDEVICE_NOT_DEFINED" ++QDEVICE_NOT_INITIALIZED = "QDEVICE_NOT_INITIALIZED" ++QDEVICE_CLIENT_RELOAD_STARTED = "QDEVICE_CLIENT_RELOAD_STARTED" + QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED = "QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED" + REQUIRED_OPTION_IS_MISSING = "REQUIRED_OPTION_IS_MISSING" + RESOURCE_CLEANUP_ERROR = "RESOURCE_CLEANUP_ERROR" +@@ -106,12 +117,16 @@ SBD_ENABLING_STARTED = "SBD_ENABLING_STARTED" + SBD_NOT_INSTALLED = "SBD_NOT_INSTALLED" + SBD_NOT_ENABLED = "SBD_NOT_ENABLED" + SERVICE_DISABLE_ERROR = "SERVICE_DISABLE_ERROR" ++SERVICE_DISABLE_STARTED = "SERVICE_DISABLE_STARTED" + SERVICE_DISABLE_SUCCESS = "SERVICE_DISABLE_SUCCESS" + SERVICE_ENABLE_ERROR = "SERVICE_ENABLE_ERROR" ++SERVICE_ENABLE_STARTED = "SERVICE_ENABLE_STARTED" ++SERVICE_ENABLE_SKIPPED = "SERVICE_ENABLE_SKIPPED" + SERVICE_ENABLE_SUCCESS = "SERVICE_ENABLE_SUCCESS" + SERVICE_KILL_ERROR = "SERVICE_KILL_ERROR" + SERVICE_KILL_SUCCESS = "SERVICE_KILL_SUCCESS" + SERVICE_START_ERROR = "SERVICE_START_ERROR" ++SERVICE_START_SKIPPED = "SERVICE_START_SKIPPED" + SERVICE_START_STARTED = "SERVICE_START_STARTED" + SERVICE_START_SUCCESS = "SERVICE_START_SUCCESS" + SERVICE_STOP_ERROR = "SERVICE_STOP_ERROR" +diff --git a/pcs/lib/commands/qdevice.py b/pcs/lib/commands/qdevice.py +index c300a4c..1d1d85f 100644 +--- a/pcs/lib/commands/qdevice.py ++++ b/pcs/lib/commands/qdevice.py +@@ -5,6 +5,9 @@ from __future__ import ( + unicode_literals, + ) + ++import base64 ++import binascii ++ + from pcs.lib import external, reports + from pcs.lib.corosync import qdevice_net + from pcs.lib.errors import LibraryError +@@ -31,7 +34,7 @@ def qdevice_setup(lib_env, model, enable, start): + def qdevice_destroy(lib_env, model): + """ + Stop and disable qdevice on local host and remove its configuration +- string model qdevice model to initialize ++ string model qdevice model to destroy + """ + _ensure_not_cman(lib_env) + _check_model(model) +@@ -40,6 +43,22 @@ def qdevice_destroy(lib_env, model): + qdevice_net.qdevice_destroy() + lib_env.report_processor.process(reports.qdevice_destroy_success(model)) + ++def qdevice_status_text(lib_env, model, verbose=False, cluster=None): ++ """ ++ Get runtime status of a quorum device in plain text ++ string model qdevice model to query ++ bool verbose get more detailed output ++ string cluster show information only about specified cluster ++ """ ++ _ensure_not_cman(lib_env) ++ _check_model(model) ++ runner = lib_env.cmd_runner() ++ return ( ++ qdevice_net.qdevice_status_generic_text(runner, verbose) ++ + ++ qdevice_net.qdevice_status_cluster_text(runner, cluster, verbose) ++ ) ++ + def qdevice_enable(lib_env, model): + """ + make qdevice start automatically on boot on local host +@@ -80,6 +99,73 @@ def qdevice_kill(lib_env, model): + _check_model(model) + _service_kill(lib_env, qdevice_net.qdevice_kill) + ++def qdevice_net_sign_certificate_request( ++ lib_env, certificate_request, cluster_name ++): ++ """ ++ Sign node certificate request by qnetd CA ++ string certificate_request base64 encoded certificate request ++ string cluster_name name of the cluster to which qdevice is being added ++ """ ++ _ensure_not_cman(lib_env) ++ try: ++ certificate_request_data = base64.b64decode(certificate_request) ++ except (TypeError, binascii.Error): ++ raise LibraryError(reports.invalid_option_value( ++ "qnetd certificate request", ++ certificate_request, ++ ["base64 encoded certificate"] ++ )) ++ return base64.b64encode( ++ qdevice_net.qdevice_sign_certificate_request( ++ lib_env.cmd_runner(), ++ certificate_request_data, ++ cluster_name ++ ) ++ ) ++ ++def client_net_setup(lib_env, ca_certificate): ++ """ ++ Intialize qdevice net client on local host ++ ca_certificate base64 encoded qnetd CA certificate ++ """ ++ _ensure_not_cman(lib_env) ++ try: ++ ca_certificate_data = base64.b64decode(ca_certificate) ++ except (TypeError, binascii.Error): ++ raise LibraryError(reports.invalid_option_value( ++ "qnetd CA certificate", ++ ca_certificate, ++ ["base64 encoded certificate"] ++ )) ++ qdevice_net.client_setup(lib_env.cmd_runner(), ca_certificate_data) ++ ++def client_net_import_certificate(lib_env, certificate): ++ """ ++ Import qnetd client certificate to local node certificate storage ++ certificate base64 encoded qnetd client certificate ++ """ ++ _ensure_not_cman(lib_env) ++ try: ++ certificate_data = base64.b64decode(certificate) ++ except (TypeError, binascii.Error): ++ raise LibraryError(reports.invalid_option_value( ++ "qnetd client certificate", ++ certificate, ++ ["base64 encoded certificate"] ++ )) ++ qdevice_net.client_import_certificate_and_key( ++ lib_env.cmd_runner(), ++ certificate_data ++ ) ++ ++def client_net_destroy(lib_env): ++ """ ++ delete qdevice client config files on local host ++ """ ++ _ensure_not_cman(lib_env) ++ qdevice_net.client_destroy() ++ + def _ensure_not_cman(lib_env): + if lib_env.is_cman_cluster: + raise LibraryError(reports.cman_unsupported_command()) +diff --git a/pcs/lib/commands/quorum.py b/pcs/lib/commands/quorum.py +index 1ee5411..aa00bbd 100644 +--- a/pcs/lib/commands/quorum.py ++++ b/pcs/lib/commands/quorum.py +@@ -5,9 +5,18 @@ from __future__ import ( + unicode_literals, + ) + +- + from pcs.lib import reports + from pcs.lib.errors import LibraryError ++from pcs.lib.corosync import ( ++ live as corosync_live, ++ qdevice_net, ++ qdevice_client ++) ++from pcs.lib.external import ( ++ NodeCommunicationException, ++ node_communicator_exception_to_report_item, ++ parallel_nodes_communication_helper, ++) + + + def get_config(lib_env): +@@ -42,6 +51,21 @@ def set_options(lib_env, options, skip_offline_nodes=False): + cfg.set_quorum_options(lib_env.report_processor, options) + lib_env.push_corosync_conf(cfg, skip_offline_nodes) + ++def status_text(lib_env): ++ """ ++ Get quorum runtime status in plain text ++ """ ++ __ensure_not_cman(lib_env) ++ return corosync_live.get_quorum_status_text(lib_env.cmd_runner()) ++ ++def status_device_text(lib_env, verbose=False): ++ """ ++ Get quorum device client runtime status in plain text ++ bool verbose get more detailed output ++ """ ++ __ensure_not_cman(lib_env) ++ return qdevice_client.get_status_text(lib_env.cmd_runner(), verbose) ++ + def add_device( + lib_env, model, model_options, generic_options, force_model=False, + force_options=False, skip_offline_nodes=False +@@ -58,6 +82,8 @@ def add_device( + __ensure_not_cman(lib_env) + + cfg = lib_env.get_corosync_conf() ++ # Try adding qdevice to corosync.conf. This validates all the options and ++ # makes sure qdevice is not defined in corosync.conf yet. + cfg.add_quorum_device( + lib_env.report_processor, + model, +@@ -66,9 +92,131 @@ def add_device( + force_model, + force_options + ) +- # TODO validation, verification, certificates, etc. ++ ++ # First setup certificates for qdevice, then send corosync.conf to nodes. ++ # If anything fails, nodes will not have corosync.conf with qdevice in it, ++ # so there is no effect on the cluster. ++ if lib_env.is_corosync_conf_live: ++ # do model specific configuration ++ # if model is not known to pcs and was forced, do not configure antyhing ++ # else but corosync.conf, as we do not know what to do anyways ++ if model == "net": ++ _add_device_model_net( ++ lib_env, ++ # we are sure it's there, it was validated in add_quorum_device ++ model_options["host"], ++ cfg.get_cluster_name(), ++ cfg.get_nodes(), ++ skip_offline_nodes ++ ) ++ ++ lib_env.report_processor.process( ++ reports.service_enable_started("corosync-qdevice") ++ ) ++ communicator = lib_env.node_communicator() ++ parallel_nodes_communication_helper( ++ qdevice_client.remote_client_enable, ++ [ ++ [(lib_env.report_processor, communicator, node), {}] ++ for node in cfg.get_nodes() ++ ], ++ lib_env.report_processor, ++ skip_offline_nodes ++ ) ++ ++ # everything set up, it's safe to tell the nodes to use qdevice + lib_env.push_corosync_conf(cfg, skip_offline_nodes) + ++ # Now, when corosync.conf has been reloaded, we can start qdevice service. ++ if lib_env.is_corosync_conf_live: ++ lib_env.report_processor.process( ++ reports.service_start_started("corosync-qdevice") ++ ) ++ communicator = lib_env.node_communicator() ++ parallel_nodes_communication_helper( ++ qdevice_client.remote_client_start, ++ [ ++ [(lib_env.report_processor, communicator, node), {}] ++ for node in cfg.get_nodes() ++ ], ++ lib_env.report_processor, ++ skip_offline_nodes ++ ) ++ ++def _add_device_model_net( ++ lib_env, qnetd_host, cluster_name, cluster_nodes, skip_offline_nodes ++): ++ """ ++ setup cluster nodes for using qdevice model net ++ string qnetd_host address of qdevice provider (qnetd host) ++ string cluster_name name of the cluster to which qdevice is being added ++ NodeAddressesList cluster_nodes list of cluster nodes addresses ++ bool skip_offline_nodes continue even if not all nodes are accessible ++ """ ++ communicator = lib_env.node_communicator() ++ runner = lib_env.cmd_runner() ++ reporter = lib_env.report_processor ++ ++ reporter.process( ++ reports.qdevice_certificate_distribution_started() ++ ) ++ # get qnetd CA certificate ++ try: ++ qnetd_ca_cert = qdevice_net.remote_qdevice_get_ca_certificate( ++ communicator, ++ qnetd_host ++ ) ++ except NodeCommunicationException as e: ++ raise LibraryError( ++ node_communicator_exception_to_report_item(e) ++ ) ++ # init certificate storage on all nodes ++ parallel_nodes_communication_helper( ++ qdevice_net.remote_client_setup, ++ [ ++ ((communicator, node, qnetd_ca_cert), {}) ++ for node in cluster_nodes ++ ], ++ reporter, ++ skip_offline_nodes ++ ) ++ # create client certificate request ++ cert_request = qdevice_net.client_generate_certificate_request( ++ runner, ++ cluster_name ++ ) ++ # sign the request on qnetd host ++ try: ++ signed_certificate = qdevice_net.remote_sign_certificate_request( ++ communicator, ++ qnetd_host, ++ cert_request, ++ cluster_name ++ ) ++ except NodeCommunicationException as e: ++ raise LibraryError( ++ node_communicator_exception_to_report_item(e) ++ ) ++ # transform the signed certificate to pk12 format which can sent to nodes ++ pk12 = qdevice_net.client_cert_request_to_pk12(runner, signed_certificate) ++ # distribute final certificate to nodes ++ def do_and_report(reporter, communicator, node, pk12): ++ qdevice_net.remote_client_import_certificate_and_key( ++ communicator, node, pk12 ++ ) ++ reporter.process( ++ reports.qdevice_certificate_accepted_by_node(node.label) ++ ) ++ parallel_nodes_communication_helper( ++ do_and_report, ++ [ ++ ((reporter, communicator, node, pk12), {}) ++ for node in cluster_nodes ++ ], ++ reporter, ++ skip_offline_nodes ++ ) ++ + def update_device( + lib_env, model_options, generic_options, force_options=False, + skip_offline_nodes=False +@@ -98,9 +246,74 @@ def remove_device(lib_env, skip_offline_nodes=False): + __ensure_not_cman(lib_env) + + cfg = lib_env.get_corosync_conf() ++ model, dummy_options, dummy_options = cfg.get_quorum_device_settings() + cfg.remove_quorum_device() + lib_env.push_corosync_conf(cfg, skip_offline_nodes) + ++ if lib_env.is_corosync_conf_live: ++ # disable qdevice ++ lib_env.report_processor.process( ++ reports.service_disable_started("corosync-qdevice") ++ ) ++ communicator = lib_env.node_communicator() ++ parallel_nodes_communication_helper( ++ qdevice_client.remote_client_disable, ++ [ ++ [(lib_env.report_processor, communicator, node), {}] ++ for node in cfg.get_nodes() ++ ], ++ lib_env.report_processor, ++ skip_offline_nodes ++ ) ++ # stop qdevice ++ lib_env.report_processor.process( ++ reports.service_stop_started("corosync-qdevice") ++ ) ++ communicator = lib_env.node_communicator() ++ parallel_nodes_communication_helper( ++ qdevice_client.remote_client_stop, ++ [ ++ [(lib_env.report_processor, communicator, node), {}] ++ for node in cfg.get_nodes() ++ ], ++ lib_env.report_processor, ++ skip_offline_nodes ++ ) ++ # handle model specific configuration ++ if model == "net": ++ _remove_device_model_net( ++ lib_env, ++ cfg.get_nodes(), ++ skip_offline_nodes ++ ) ++ ++def _remove_device_model_net(lib_env, cluster_nodes, skip_offline_nodes): ++ """ ++ remove configuration used by qdevice model net ++ NodeAddressesList cluster_nodes list of cluster nodes addresses ++ bool skip_offline_nodes continue even if not all nodes are accessible ++ """ ++ reporter = lib_env.report_processor ++ communicator = lib_env.node_communicator() ++ ++ reporter.process( ++ reports.qdevice_certificate_removal_started() ++ ) ++ def do_and_report(reporter, communicator, node): ++ qdevice_net.remote_client_destroy(communicator, node) ++ reporter.process( ++ reports.qdevice_certificate_removed_from_node(node.label) ++ ) ++ parallel_nodes_communication_helper( ++ do_and_report, ++ [ ++ [(reporter, communicator, node), {}] ++ for node in cluster_nodes ++ ], ++ lib_env.report_processor, ++ skip_offline_nodes ++ ) ++ + def __ensure_not_cman(lib_env): + if lib_env.is_corosync_conf_live and lib_env.is_cman_cluster: + raise LibraryError(reports.cman_unsupported_command()) +diff --git a/pcs/lib/corosync/config_facade.py b/pcs/lib/corosync/config_facade.py +index 5a486ca..600a89b 100644 +--- a/pcs/lib/corosync/config_facade.py ++++ b/pcs/lib/corosync/config_facade.py +@@ -22,6 +22,12 @@ class ConfigFacade(object): + "last_man_standing_window", + "wait_for_all", + ) ++ QUORUM_OPTIONS_INCOMPATIBLE_WITH_QDEVICE = ( ++ "auto_tie_breaker", ++ "last_man_standing", ++ "last_man_standing_window", ++ ) ++ + + @classmethod + def from_string(cls, config_string): +@@ -52,6 +58,8 @@ class ConfigFacade(object): + self._config = parsed_config + # set to True if changes cannot be applied on running cluster + self._need_stopped_cluster = False ++ # set to True if qdevice reload is required to apply changes ++ self._need_qdevice_reload = False + + @property + def config(self): +@@ -61,6 +69,17 @@ class ConfigFacade(object): + def need_stopped_cluster(self): + return self._need_stopped_cluster + ++ @property ++ def need_qdevice_reload(self): ++ return self._need_qdevice_reload ++ ++ def get_cluster_name(self): ++ cluster_name = "" ++ for totem in self.config.get_sections("totem"): ++ for attrs in totem.get_attributes("cluster_name"): ++ cluster_name = attrs[1] ++ return cluster_name ++ + def get_nodes(self): + """ + Get all defined nodes +@@ -112,8 +131,9 @@ class ConfigFacade(object): + + def __validate_quorum_options(self, options): + report_items = [] ++ has_qdevice = self.has_quorum_device() ++ qdevice_incompatible_options = [] + for name, value in sorted(options.items()): +- + allowed_names = self.__class__.QUORUM_OPTIONS + if name not in allowed_names: + report_items.append( +@@ -124,6 +144,13 @@ class ConfigFacade(object): + if value == "": + continue + ++ if ( ++ has_qdevice ++ and ++ name in self.__class__.QUORUM_OPTIONS_INCOMPATIBLE_WITH_QDEVICE ++ ): ++ qdevice_incompatible_options.append(name) ++ + if name == "last_man_standing_window": + if not value.isdigit(): + report_items.append(reports.invalid_option_value( +@@ -137,6 +164,13 @@ class ConfigFacade(object): + name, value, allowed_values + )) + ++ if qdevice_incompatible_options: ++ report_items.append( ++ reports.corosync_options_incompatible_with_qdevice( ++ qdevice_incompatible_options ++ ) ++ ) ++ + return report_items + + def has_quorum_device(self): +@@ -201,13 +235,13 @@ class ConfigFacade(object): + force=force_options + ) + ) ++ + # configuration cleanup +- remove_need_stopped_cluster = { +- "auto_tie_breaker": "", +- "last_man_standing": "", +- "last_man_standing_window": "", +- } +- need_stopped_cluster = False ++ remove_need_stopped_cluster = dict([ ++ (name, "") ++ for name in self.__class__.QUORUM_OPTIONS_INCOMPATIBLE_WITH_QDEVICE ++ ]) ++ # remove old device settings + quorum_section_list = self.__ensure_section(self.config, "quorum") + for quorum in quorum_section_list: + for device in quorum.get_sections("device"): +@@ -218,13 +252,19 @@ class ConfigFacade(object): + and + value not in ["", "0"] + ): +- need_stopped_cluster = True ++ self._need_stopped_cluster = True ++ # remove conflicting quorum options + attrs_to_remove = { + "allow_downscale": "", + "two_node": "", + } + attrs_to_remove.update(remove_need_stopped_cluster) + self.__set_section_options(quorum_section_list, attrs_to_remove) ++ # remove nodes' votes ++ for nodelist in self.config.get_sections("nodelist"): ++ for node in nodelist.get_sections("node"): ++ node.del_attributes_by_name("quorum_votes") ++ + # add new configuration + quorum = quorum_section_list[-1] + new_device = config_parser.Section("device") +@@ -234,12 +274,9 @@ class ConfigFacade(object): + new_model = config_parser.Section(model) + self.__set_section_options([new_model], model_options) + new_device.add_section(new_model) ++ self.__update_qdevice_votes() + self.__update_two_node() + self.__remove_empty_sections(self.config) +- # update_two_node sets self._need_stopped_cluster when changing an +- # algorithm lms <-> 2nodelms. We don't care about that, it's not really +- # a change, as there was no qdevice before. So we override it. +- self._need_stopped_cluster = need_stopped_cluster + + def update_quorum_device( + self, report_processor, model_options, generic_options, +@@ -281,9 +318,10 @@ class ConfigFacade(object): + model_sections.extend(device.get_sections(model)) + self.__set_section_options(device_sections, generic_options) + self.__set_section_options(model_sections, model_options) ++ self.__update_qdevice_votes() + self.__update_two_node() + self.__remove_empty_sections(self.config) +- self._need_stopped_cluster = True ++ self._need_qdevice_reload = True + + def remove_quorum_device(self): + """ +@@ -369,7 +407,7 @@ class ConfigFacade(object): + continue + + if name == "algorithm": +- allowed_values = ("2nodelms", "ffsplit", "lms") ++ allowed_values = ("ffsplit", "lms") + if value not in allowed_values: + report_items.append(reports.invalid_option_value( + name, value, allowed_values, severity, forceable +@@ -461,19 +499,29 @@ class ConfigFacade(object): + else: + for quorum in self.config.get_sections("quorum"): + quorum.del_attributes_by_name("two_node") +- # update qdevice algorithm "lms" vs "2nodelms" ++ ++ def __update_qdevice_votes(self): ++ # ffsplit won't start if votes is missing or not set to 1 ++ # for other algorithms it's required not to put votes at all ++ model = None ++ algorithm = None ++ device_sections = [] + for quorum in self.config.get_sections("quorum"): + for device in quorum.get_sections("device"): +- for net in device.get_sections("net"): +- algorithm = None +- for dummy_name, value in net.get_attributes("algorithm"): +- algorithm = value +- if algorithm == "lms" and has_two_nodes: +- net.set_attribute("algorithm", "2nodelms") +- self._need_stopped_cluster = True +- elif algorithm == "2nodelms" and not has_two_nodes: +- net.set_attribute("algorithm", "lms") +- self._need_stopped_cluster = True ++ device_sections.append(device) ++ for dummy_name, value in device.get_attributes("model"): ++ model = value ++ for device in device_sections: ++ for model_section in device.get_sections(model): ++ for dummy_name, value in model_section.get_attributes( ++ "algorithm" ++ ): ++ algorithm = value ++ if model == "net": ++ if algorithm == "ffsplit": ++ self.__set_section_options(device_sections, {"votes": "1"}) ++ else: ++ self.__set_section_options(device_sections, {"votes": ""}) + + def __set_section_options(self, section_list, options): + for section in section_list[:-1]: +diff --git a/pcs/lib/corosync/live.py b/pcs/lib/corosync/live.py +index 2446a46..4129aeb 100644 +--- a/pcs/lib/corosync/live.py ++++ b/pcs/lib/corosync/live.py +@@ -47,3 +47,18 @@ def reload_config(runner): + reports.corosync_config_reload_error(output.rstrip()) + ) + ++def get_quorum_status_text(runner): ++ """ ++ Get runtime quorum status from the local node ++ """ ++ output, retval = runner.run([ ++ os.path.join(settings.corosync_binaries, "corosync-quorumtool"), ++ "-p" ++ ]) ++ # retval is 0 on success if node is not in partition with quorum ++ # retval is 1 on error OR on success if node has quorum ++ if retval not in [0, 1]: ++ raise LibraryError( ++ reports.corosync_quorum_get_status_error(output) ++ ) ++ return output +diff --git a/pcs/lib/corosync/qdevice_client.py b/pcs/lib/corosync/qdevice_client.py +new file mode 100644 +index 0000000..98fbb0e +--- /dev/null ++++ b/pcs/lib/corosync/qdevice_client.py +@@ -0,0 +1,93 @@ ++from __future__ import ( ++ absolute_import, ++ division, ++ print_function, ++ unicode_literals, ++) ++ ++import os.path ++ ++from pcs import settings ++from pcs.lib import reports ++from pcs.lib.errors import LibraryError ++ ++ ++def get_status_text(runner, verbose=False): ++ """ ++ Get quorum device client runtime status in plain text ++ bool verbose get more detailed output ++ """ ++ cmd = [ ++ os.path.join(settings.corosync_binaries, "corosync-qdevice-tool"), ++ "-s" ++ ] ++ if verbose: ++ cmd.append("-v") ++ output, retval = runner.run(cmd) ++ if retval != 0: ++ raise LibraryError( ++ reports.corosync_quorum_get_status_error(output) ++ ) ++ return output ++ ++def remote_client_enable(reporter, node_communicator, node): ++ """ ++ enable qdevice client service (corosync-qdevice) on a remote node ++ """ ++ response = node_communicator.call_node( ++ node, ++ "remote/qdevice_client_enable", ++ None ++ ) ++ if response == "corosync is not enabled, skipping": ++ reporter.process( ++ reports.service_enable_skipped( ++ "corosync-qdevice", ++ "corosync is not enabled", ++ node.label ++ ) ++ ) ++ else: ++ reporter.process( ++ reports.service_enable_success("corosync-qdevice", node.label) ++ ) ++ ++def remote_client_disable(reporter, node_communicator, node): ++ """ ++ disable qdevice client service (corosync-qdevice) on a remote node ++ """ ++ node_communicator.call_node(node, "remote/qdevice_client_disable", None) ++ reporter.process( ++ reports.service_disable_success("corosync-qdevice", node.label) ++ ) ++ ++def remote_client_start(reporter, node_communicator, node): ++ """ ++ start qdevice client service (corosync-qdevice) on a remote node ++ """ ++ response = node_communicator.call_node( ++ node, ++ "remote/qdevice_client_start", ++ None ++ ) ++ if response == "corosync is not running, skipping": ++ reporter.process( ++ reports.service_start_skipped( ++ "corosync-qdevice", ++ "corosync is not running", ++ node.label ++ ) ++ ) ++ else: ++ reporter.process( ++ reports.service_start_success("corosync-qdevice", node.label) ++ ) ++ ++def remote_client_stop(reporter, node_communicator, node): ++ """ ++ stop qdevice client service (corosync-qdevice) on a remote node ++ """ ++ node_communicator.call_node(node, "remote/qdevice_client_stop", None) ++ reporter.process( ++ reports.service_stop_success("corosync-qdevice", node.label) ++ ) +diff --git a/pcs/lib/corosync/qdevice_net.py b/pcs/lib/corosync/qdevice_net.py +index 7479257..4054592 100644 +--- a/pcs/lib/corosync/qdevice_net.py ++++ b/pcs/lib/corosync/qdevice_net.py +@@ -5,8 +5,14 @@ from __future__ import ( + unicode_literals, + ) + ++import base64 ++import binascii ++import functools ++import os + import os.path ++import re + import shutil ++import tempfile + + from pcs import settings + from pcs.lib import external, reports +@@ -15,6 +21,18 @@ from pcs.lib.errors import LibraryError + + __model = "net" + __service_name = "corosync-qnetd" ++__qnetd_certutil = os.path.join( ++ settings.corosync_qnet_binaries, ++ "corosync-qnetd-certutil" ++) ++__qnetd_tool = os.path.join( ++ settings.corosync_qnet_binaries, ++ "corosync-qnetd-tool" ++) ++__qdevice_certutil = os.path.join( ++ settings.corosync_binaries, ++ "corosync-qdevice-net-certutil" ++) + + def qdevice_setup(runner): + """ +@@ -24,25 +42,63 @@ def qdevice_setup(runner): + raise LibraryError(reports.qdevice_already_initialized(__model)) + + output, retval = runner.run([ +- os.path.join(settings.corosync_binaries, "corosync-qnetd-certutil"), +- "-i" ++ __qnetd_certutil, "-i" + ]) + if retval != 0: + raise LibraryError( + reports.qdevice_initialization_error(__model, output.rstrip()) + ) + ++def qdevice_initialized(): ++ """ ++ check if qdevice server certificate database has been initialized ++ """ ++ return os.path.exists(os.path.join( ++ settings.corosync_qdevice_net_server_certs_dir, ++ "cert8.db" ++ )) ++ + def qdevice_destroy(): + """ + delete qdevice configuration on local host + """ + try: +- shutil.rmtree(settings.corosync_qdevice_net_server_certs_dir) ++ if qdevice_initialized(): ++ shutil.rmtree(settings.corosync_qdevice_net_server_certs_dir) + except EnvironmentError as e: + raise LibraryError( + reports.qdevice_destroy_error(__model, e.strerror) + ) + ++def qdevice_status_generic_text(runner, verbose=False): ++ """ ++ get qdevice runtime status in plain text ++ bool verbose get more detailed output ++ """ ++ cmd = [__qnetd_tool, "-s"] ++ if verbose: ++ cmd.append("-v") ++ output, retval = runner.run(cmd) ++ if retval != 0: ++ raise LibraryError(reports.qdevice_get_status_error(__model, output)) ++ return output ++ ++def qdevice_status_cluster_text(runner, cluster=None, verbose=False): ++ """ ++ get qdevice runtime status in plain text ++ bool verbose get more detailed output ++ string cluster show information only about specified cluster ++ """ ++ cmd = [__qnetd_tool, "-l"] ++ if verbose: ++ cmd.append("-v") ++ if cluster: ++ cmd.extend(["-c", cluster]) ++ output, retval = runner.run(cmd) ++ if retval != 0: ++ raise LibraryError(reports.qdevice_get_status_error(__model, output)) ++ return output ++ + def qdevice_enable(runner): + """ + make qdevice start automatically on boot on local host +@@ -72,3 +128,255 @@ def qdevice_kill(runner): + kill qdevice now on local host + """ + external.kill_services(runner, [__service_name]) ++ ++def qdevice_sign_certificate_request(runner, cert_request, cluster_name): ++ """ ++ sign client certificate request ++ cert_request certificate request data ++ string cluster_name name of the cluster to which qdevice is being added ++ """ ++ if not qdevice_initialized(): ++ raise LibraryError(reports.qdevice_not_initialized(__model)) ++ # save the certificate request, corosync tool only works with files ++ tmpfile = _store_to_tmpfile( ++ cert_request, ++ reports.qdevice_certificate_sign_error ++ ) ++ # sign the request ++ output, retval = runner.run([ ++ __qnetd_certutil, "-s", "-c", tmpfile.name, "-n", cluster_name ++ ]) ++ tmpfile.close() # temp file is deleted on close ++ if retval != 0: ++ raise LibraryError( ++ reports.qdevice_certificate_sign_error(output.strip()) ++ ) ++ # get signed certificate, corosync tool only works with files ++ return _get_output_certificate( ++ output, ++ reports.qdevice_certificate_sign_error ++ ) ++ ++def client_setup(runner, ca_certificate): ++ """ ++ initialize qdevice client on local host ++ ca_certificate qnetd CA certificate ++ """ ++ client_destroy() ++ # save CA certificate, corosync tool only works with files ++ ca_file_path = os.path.join( ++ settings.corosync_qdevice_net_client_certs_dir, ++ settings.corosync_qdevice_net_client_ca_file_name ++ ) ++ try: ++ if not os.path.exists(ca_file_path): ++ os.makedirs( ++ settings.corosync_qdevice_net_client_certs_dir, ++ mode=0o700 ++ ) ++ with open(ca_file_path, "wb") as ca_file: ++ ca_file.write(ca_certificate) ++ except EnvironmentError as e: ++ raise LibraryError( ++ reports.qdevice_initialization_error(__model, e.strerror) ++ ) ++ # initialize client's certificate storage ++ output, retval = runner.run([ ++ __qdevice_certutil, "-i", "-c", ca_file_path ++ ]) ++ if retval != 0: ++ raise LibraryError( ++ reports.qdevice_initialization_error(__model, output.rstrip()) ++ ) ++ ++def client_initialized(): ++ """ ++ check if qdevice net client certificate database has been initialized ++ """ ++ return os.path.exists(os.path.join( ++ settings.corosync_qdevice_net_client_certs_dir, ++ "cert8.db" ++ )) ++ ++def client_destroy(): ++ """ ++ delete qdevice client config files on local host ++ """ ++ try: ++ if client_initialized(): ++ shutil.rmtree(settings.corosync_qdevice_net_client_certs_dir) ++ except EnvironmentError as e: ++ raise LibraryError( ++ reports.qdevice_destroy_error(__model, e.strerror) ++ ) ++ ++def client_generate_certificate_request(runner, cluster_name): ++ """ ++ create a certificate request which can be signed by qnetd server ++ string cluster_name name of the cluster to which qdevice is being added ++ """ ++ if not client_initialized(): ++ raise LibraryError(reports.qdevice_not_initialized(__model)) ++ output, retval = runner.run([ ++ __qdevice_certutil, "-r", "-n", cluster_name ++ ]) ++ if retval != 0: ++ raise LibraryError( ++ reports.qdevice_initialization_error(__model, output.rstrip()) ++ ) ++ return _get_output_certificate( ++ output, ++ functools.partial(reports.qdevice_initialization_error, __model) ++ ) ++ ++def client_cert_request_to_pk12(runner, cert_request): ++ """ ++ transform signed certificate request to pk12 certificate which can be ++ imported to nodes ++ cert_request signed certificate request ++ """ ++ if not client_initialized(): ++ raise LibraryError(reports.qdevice_not_initialized(__model)) ++ # save the signed certificate request, corosync tool only works with files ++ tmpfile = _store_to_tmpfile( ++ cert_request, ++ reports.qdevice_certificate_import_error ++ ) ++ # transform it ++ output, retval = runner.run([ ++ __qdevice_certutil, "-M", "-c", tmpfile.name ++ ]) ++ tmpfile.close() # temp file is deleted on close ++ if retval != 0: ++ raise LibraryError( ++ reports.qdevice_certificate_import_error(output) ++ ) ++ # get resulting pk12, corosync tool only works with files ++ return _get_output_certificate( ++ output, ++ reports.qdevice_certificate_import_error ++ ) ++ ++def client_import_certificate_and_key(runner, pk12_certificate): ++ """ ++ import qdevice client certificate to the local node certificate storage ++ """ ++ if not client_initialized(): ++ raise LibraryError(reports.qdevice_not_initialized(__model)) ++ # save the certificate, corosync tool only works with files ++ tmpfile = _store_to_tmpfile( ++ pk12_certificate, ++ reports.qdevice_certificate_import_error ++ ) ++ output, retval = runner.run([ ++ __qdevice_certutil, "-m", "-c", tmpfile.name ++ ]) ++ tmpfile.close() # temp file is deleted on close ++ if retval != 0: ++ raise LibraryError( ++ reports.qdevice_certificate_import_error(output) ++ ) ++ ++def remote_qdevice_get_ca_certificate(node_communicator, host): ++ """ ++ connect to a qnetd host and get qnetd CA certificate ++ string host address of the qnetd host ++ """ ++ try: ++ return base64.b64decode( ++ node_communicator.call_host( ++ host, ++ "remote/qdevice_net_get_ca_certificate", ++ None ++ ) ++ ) ++ except (TypeError, binascii.Error): ++ raise LibraryError(reports.invalid_response_format(host)) ++ ++def remote_client_setup(node_communicator, node, qnetd_ca_certificate): ++ """ ++ connect to a remote node and initialize qdevice there ++ NodeAddresses node target node ++ qnetd_ca_certificate qnetd CA certificate ++ """ ++ return node_communicator.call_node( ++ node, ++ "remote/qdevice_net_client_init_certificate_storage", ++ external.NodeCommunicator.format_data_dict([ ++ ("ca_certificate", base64.b64encode(qnetd_ca_certificate)), ++ ]) ++ ) ++ ++def remote_sign_certificate_request( ++ node_communicator, host, cert_request, cluster_name ++): ++ """ ++ connect to a qdevice host and sign node certificate there ++ string host address of the qnetd host ++ cert_request certificate request to be signed ++ string cluster_name name of the cluster to which qdevice is being added ++ """ ++ try: ++ return base64.b64decode( ++ node_communicator.call_host( ++ host, ++ "remote/qdevice_net_sign_node_certificate", ++ external.NodeCommunicator.format_data_dict([ ++ ("certificate_request", base64.b64encode(cert_request)), ++ ("cluster_name", cluster_name), ++ ]) ++ ) ++ ) ++ except (TypeError, binascii.Error): ++ raise LibraryError(reports.invalid_response_format(host)) ++ ++def remote_client_import_certificate_and_key(node_communicator, node, pk12): ++ """ ++ import pk12 certificate on a remote node ++ NodeAddresses node target node ++ pk12 certificate ++ """ ++ return node_communicator.call_node( ++ node, ++ "remote/qdevice_net_client_import_certificate", ++ external.NodeCommunicator.format_data_dict([ ++ ("certificate", base64.b64encode(pk12)), ++ ]) ++ ) ++ ++def remote_client_destroy(node_communicator, node): ++ """ ++ delete qdevice client config files on a remote node ++ NodeAddresses node target node ++ """ ++ return node_communicator.call_node( ++ node, ++ "remote/qdevice_net_client_destroy", ++ None ++ ) ++ ++def _store_to_tmpfile(data, report_func): ++ try: ++ tmpfile = tempfile.NamedTemporaryFile(mode="wb", suffix=".pcs") ++ tmpfile.write(data) ++ tmpfile.flush() ++ return tmpfile ++ except EnvironmentError as e: ++ raise LibraryError(report_func(e.strerror)) ++ ++def _get_output_certificate(cert_tool_output, report_func): ++ regexp = re.compile(r"^Certificate( request)? stored in (?P.+)$") ++ filename = None ++ for line in cert_tool_output.splitlines(): ++ match = regexp.search(line) ++ if match: ++ filename = match.group("path") ++ if not filename: ++ raise LibraryError(report_func(cert_tool_output)) ++ try: ++ with open(filename, "rb") as cert_file: ++ return cert_file.read() ++ except EnvironmentError as e: ++ raise LibraryError(report_func( ++ "{path}: {error}".format(path=filename, error=e.strerror) ++ )) +diff --git a/pcs/lib/env.py b/pcs/lib/env.py +index 1151891..24e4252 100644 +--- a/pcs/lib/env.py ++++ b/pcs/lib/env.py +@@ -10,6 +10,7 @@ from lxml import etree + from pcs.lib import reports + from pcs.lib.external import ( + is_cman_cluster, ++ is_service_running, + CommandRunner, + NodeCommunicator, + ) +@@ -21,6 +22,7 @@ from pcs.lib.corosync.live import ( + from pcs.lib.nodes_task import ( + distribute_corosync_conf, + check_corosync_offline_on_nodes, ++ qdevice_reload_on_nodes, + ) + from pcs.lib.pacemaker import ( + get_cib, +@@ -152,11 +154,18 @@ class LibraryEnvironment(object): + corosync_conf_data, + skip_offline_nodes + ) +- if not corosync_conf_facade.need_stopped_cluster: ++ if is_service_running(self.cmd_runner(), "corosync"): + reload_corosync_config(self.cmd_runner()) + self.report_processor.process( + reports.corosync_config_reloaded() + ) ++ if corosync_conf_facade.need_qdevice_reload: ++ qdevice_reload_on_nodes( ++ self.node_communicator(), ++ self.report_processor, ++ node_list, ++ skip_offline_nodes ++ ) + else: + self._corosync_conf_data = corosync_conf_data + +diff --git a/pcs/lib/errors.py b/pcs/lib/errors.py +index c0bd3d1..9cab5e9 100644 +--- a/pcs/lib/errors.py ++++ b/pcs/lib/errors.py +@@ -42,4 +42,8 @@ class ReportItem(object): + self.message = self.message_pattern.format(**self.info) + + def __repr__(self): +- return self.code+": "+str(self.info) ++ return "{severity} {code}: {info}".format( ++ severity=self.severity, ++ code=self.code, ++ info=self.info ++ ) +diff --git a/pcs/lib/external.py b/pcs/lib/external.py +index 34426f9..c773e5a 100644 +--- a/pcs/lib/external.py ++++ b/pcs/lib/external.py +@@ -49,7 +49,11 @@ except ImportError: + + from pcs.lib import reports + from pcs.lib.errors import LibraryError, ReportItemSeverity +-from pcs.common.tools import simple_cache ++from pcs.common import report_codes ++from pcs.common.tools import ( ++ simple_cache, ++ run_parallel as tools_run_parallel, ++) + from pcs import settings + + +@@ -521,7 +525,7 @@ class NodeCommunicator(object): + # text in response body with HTTP code 400 + # we need to be backward compatible with that + raise NodeCommandUnsuccessfulException( +- host, request, response_data ++ host, request, response_data.rstrip() + ) + elif e.code == 401: + raise NodeAuthenticationException( +@@ -581,3 +585,39 @@ class NodeCommunicator(object): + base64.b64encode(" ".join(self._groups).encode("utf-8")) + )) + return cookies ++ ++ ++def parallel_nodes_communication_helper( ++ func, func_args_kwargs, reporter, skip_offline_nodes=False ++): ++ """ ++ Help running node calls in parallel and handle communication exceptions. ++ Raise LibraryError on any failure. ++ ++ function func function to be run, should be a function calling a node ++ iterable func_args_kwargs list of tuples: (*args, **kwargs) ++ bool skip_offline_nodes do not raise LibraryError if a node is unreachable ++ """ ++ failure_severity = ReportItemSeverity.ERROR ++ failure_forceable = report_codes.SKIP_OFFLINE_NODES ++ if skip_offline_nodes: ++ failure_severity = ReportItemSeverity.WARNING ++ failure_forceable = None ++ report_items = [] ++ ++ def _parallel(*args, **kwargs): ++ try: ++ func(*args, **kwargs) ++ except NodeCommunicationException as e: ++ report_items.append( ++ node_communicator_exception_to_report_item( ++ e, ++ failure_severity, ++ failure_forceable ++ ) ++ ) ++ except LibraryError as e: ++ report_items.extend(e.args) ++ ++ tools_run_parallel(_parallel, func_args_kwargs) ++ reporter.process_list(report_items) +diff --git a/pcs/lib/nodes_task.py b/pcs/lib/nodes_task.py +index b9a61f6..e94d327 100644 +--- a/pcs/lib/nodes_task.py ++++ b/pcs/lib/nodes_task.py +@@ -8,14 +8,19 @@ from __future__ import ( + import json + + from pcs.common import report_codes ++from pcs.common.tools import run_parallel as tools_run_parallel + from pcs.lib import reports +-from pcs.lib.errors import ReportItemSeverity ++from pcs.lib.errors import LibraryError, ReportItemSeverity + from pcs.lib.external import ( + NodeCommunicator, + NodeCommunicationException, + node_communicator_exception_to_report_item, ++ parallel_nodes_communication_helper, ++) ++from pcs.lib.corosync import ( ++ live as corosync_live, ++ qdevice_client, + ) +-from pcs.lib.corosync import live as corosync_live + + + def distribute_corosync_conf( +@@ -33,11 +38,9 @@ def distribute_corosync_conf( + if skip_offline_nodes: + failure_severity = ReportItemSeverity.WARNING + failure_forceable = None +- +- reporter.process(reports.corosync_config_distribution_started()) + report_items = [] +- # TODO use parallel communication +- for node in node_addr_list: ++ ++ def _parallel(node): + try: + corosync_live.set_remote_corosync_conf( + node_communicator, +@@ -62,6 +65,12 @@ def distribute_corosync_conf( + failure_forceable + ) + ) ++ ++ reporter.process(reports.corosync_config_distribution_started()) ++ tools_run_parallel( ++ _parallel, ++ [((node, ), {}) for node in node_addr_list] ++ ) + reporter.process_list(report_items) + + def check_corosync_offline_on_nodes( +@@ -77,13 +86,11 @@ def check_corosync_offline_on_nodes( + if skip_offline_nodes: + failure_severity = ReportItemSeverity.WARNING + failure_forceable = None +- +- reporter.process(reports.corosync_not_running_check_started()) + report_items = [] +- # TODO use parallel communication +- for node in node_addr_list: ++ ++ def _parallel(node): + try: +- status = node_communicator.call_node(node, "remote/status", "") ++ status = node_communicator.call_node(node, "remote/status", None) + if not json.loads(status)["corosync"]: + reporter.process( + reports.corosync_not_running_on_node_ok(node.label) +@@ -115,8 +122,48 @@ def check_corosync_offline_on_nodes( + failure_forceable + ) + ) ++ ++ reporter.process(reports.corosync_not_running_check_started()) ++ tools_run_parallel( ++ _parallel, ++ [((node, ), {}) for node in node_addr_list] ++ ) + reporter.process_list(report_items) + ++def qdevice_reload_on_nodes( ++ node_communicator, reporter, node_addr_list, skip_offline_nodes=False ++): ++ """ ++ Reload corosync-qdevice configuration on cluster nodes ++ NodeAddressesList node_addr_list nodes to reload config on ++ bool skip_offline_nodes don't raise an error on node communication errors ++ """ ++ reporter.process(reports.qdevice_client_reload_started()) ++ parallel_params = [ ++ [(reporter, node_communicator, node), {}] ++ for node in node_addr_list ++ ] ++ # catch an exception so we try to start qdevice on nodes where we stopped it ++ report_items = [] ++ try: ++ parallel_nodes_communication_helper( ++ qdevice_client.remote_client_stop, ++ parallel_params, ++ reporter, ++ skip_offline_nodes ++ ) ++ except LibraryError as e: ++ report_items.extend(e.args) ++ try: ++ parallel_nodes_communication_helper( ++ qdevice_client.remote_client_start, ++ parallel_params, ++ reporter, ++ skip_offline_nodes ++ ) ++ except LibraryError as e: ++ report_items.extend(e.args) ++ reporter.process_list(report_items) + + def node_check_auth(communicator, node): + """ +diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py +index 490b4ff..d8f88cd 100644 +--- a/pcs/lib/reports.py ++++ b/pcs/lib/reports.py +@@ -552,6 +552,19 @@ def corosync_running_on_node_fail(node): + info={"node": node} + ) + ++def corosync_quorum_get_status_error(reason): ++ """ ++ unable to get runtime status of quorum on local node ++ string reason an error message ++ """ ++ return ReportItem.error( ++ report_codes.COROSYNC_QUORUM_GET_STATUS_ERROR, ++ "Unable to get quorum status: {reason}", ++ info={ ++ "reason": reason, ++ } ++ ) ++ + def corosync_config_reloaded(): + """ + corosync configuration has been reloaded +@@ -614,6 +627,21 @@ def corosync_config_parser_other_error(): + "Unable to parse corosync config" + ) + ++def corosync_options_incompatible_with_qdevice(options): ++ """ ++ cannot set specified corosync options when qdevice is in use ++ iterable options incompatible options names ++ """ ++ return ReportItem.error( ++ report_codes.COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE, ++ "These options cannot be set when the cluster uses a quorum device: " ++ + "{options_names_str}", ++ info={ ++ "options_names": options, ++ "options_names_str": ", ".join(sorted(options)), ++ } ++ ) ++ + def qdevice_already_defined(): + """ + qdevice is already set up in a cluster, when it was expected not to be +@@ -641,6 +669,15 @@ def qdevice_remove_or_cluster_stop_needed(): + "You need to stop the cluster or remove qdevice from cluster to continue" + ) + ++def qdevice_client_reload_started(): ++ """ ++ qdevice client configuration is about to be reloaded on nodes ++ """ ++ return ReportItem.info( ++ report_codes.QDEVICE_CLIENT_RELOAD_STARTED, ++ "Reloading qdevice configuration on nodes..." ++ ) ++ + def qdevice_already_initialized(model): + """ + cannot create qdevice on local host, it has been already created +@@ -654,6 +691,19 @@ def qdevice_already_initialized(model): + } + ) + ++def qdevice_not_initialized(model): ++ """ ++ cannot work with qdevice on local host, it has not been created yet ++ string model qdevice model ++ """ ++ return ReportItem.error( ++ report_codes.QDEVICE_NOT_INITIALIZED, ++ "Quorum device '{model}' has not been initialized yet", ++ info={ ++ "model": model, ++ } ++ ) ++ + def qdevice_initialization_success(model): + """ + qdevice was successfully initialized on local host +@@ -682,6 +732,72 @@ def qdevice_initialization_error(model, reason): + } + ) + ++def qdevice_certificate_distribution_started(): ++ """ ++ Qdevice certificates are about to be set up on nodes ++ """ ++ return ReportItem.info( ++ report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED, ++ "Setting up qdevice certificates on nodes..." ++ ) ++ ++def qdevice_certificate_accepted_by_node(node): ++ """ ++ Qdevice certificates have been saved to a node ++ string node node on which certificates have been saved ++ """ ++ return ReportItem.info( ++ report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE, ++ "{node}: Succeeded", ++ info={"node": node} ++ ) ++ ++def qdevice_certificate_removal_started(): ++ """ ++ Qdevice certificates are about to be removed from nodes ++ """ ++ return ReportItem.info( ++ report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED, ++ "Removing qdevice certificates from nodes..." ++ ) ++ ++def qdevice_certificate_removed_from_node(node): ++ """ ++ Qdevice certificates have been removed from a node ++ string node node on which certificates have been deleted ++ """ ++ return ReportItem.info( ++ report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE, ++ "{node}: Succeeded", ++ info={"node": node} ++ ) ++ ++def qdevice_certificate_import_error(reason): ++ """ ++ an error occured when importing qdevice certificate to a node ++ string reason an error message ++ """ ++ return ReportItem.error( ++ report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR, ++ "Unable to import quorum device certificate: {reason}", ++ info={ ++ "reason": reason, ++ } ++ ) ++ ++def qdevice_certificate_sign_error(reason): ++ """ ++ an error occured when signing qdevice certificate ++ string reason an error message ++ """ ++ return ReportItem.error( ++ report_codes.QDEVICE_CERTIFICATE_SIGN_ERROR, ++ "Unable to sign quorum device certificate: {reason}", ++ info={ ++ "reason": reason, ++ } ++ ) ++ + def qdevice_destroy_success(model): + """ + qdevice configuration successfully removed from local host +@@ -710,6 +826,21 @@ def qdevice_destroy_error(model, reason): + } + ) + ++def qdevice_get_status_error(model, reason): ++ """ ++ unable to get runtime status of qdevice ++ string model qdevice model ++ string reason an error message ++ """ ++ return ReportItem.error( ++ report_codes.QDEVICE_GET_STATUS_ERROR, ++ "Unable to get status of quorum device '{model}': {reason}", ++ info={ ++ "model": model, ++ "reason": reason, ++ } ++ ) ++ + def cman_unsupported_command(): + """ + requested library command is not available as local cluster is CMAN based +@@ -1022,31 +1153,55 @@ def service_start_started(service): + } + ) + +-def service_start_error(service, reason): ++def service_start_error(service, reason, node=None): + """ + system service start failed + string service service name or description + string reason error message ++ string node node on which service has been requested to start + """ ++ msg = "Unable to start {service}: {reason}" + return ReportItem.error( + report_codes.SERVICE_START_ERROR, +- "Unable to start {service}: {reason}", ++ msg if node is None else "{node}: " + msg, + info={ + "service": service, + "reason": reason, ++ "node": node, + } + ) + +-def service_start_success(service): ++def service_start_success(service, node=None): + """ + system service was started successfully + string service service name or description ++ string node node on which service has been requested to start + """ ++ msg = "{service} started" + return ReportItem.info( + report_codes.SERVICE_START_SUCCESS, +- "{service} started", ++ msg if node is None else "{node}: " + msg, + info={ + "service": service, ++ "node": node, ++ } ++ ) ++ ++def service_start_skipped(service, reason, node=None): ++ """ ++ starting system service was skipped, no error occured ++ string service service name or description ++ string reason why the start has been skipped ++ string node node on which service has been requested to start ++ """ ++ msg = "not starting {service} - {reason}" ++ return ReportItem.info( ++ report_codes.SERVICE_START_SKIPPED, ++ msg if node is None else "{node}: " + msg, ++ info={ ++ "service": service, ++ "reason": reason, ++ "node": node, + } + ) + +@@ -1063,31 +1218,37 @@ def service_stop_started(service): + } + ) + +-def service_stop_error(service, reason): ++def service_stop_error(service, reason, node=None): + """ + system service stop failed + string service service name or description + string reason error message ++ string node node on which service has been requested to stop + """ ++ msg = "Unable to stop {service}: {reason}" + return ReportItem.error( + report_codes.SERVICE_STOP_ERROR, +- "Unable to stop {service}: {reason}", ++ msg if node is None else "{node}: " + msg, + info={ + "service": service, + "reason": reason, ++ "node": node, + } + ) + +-def service_stop_success(service): ++def service_stop_success(service, node=None): + """ + system service was stopped successfully + string service service name or description ++ string node node on which service has been requested to stop + """ ++ msg = "{service} stopped" + return ReportItem.info( + report_codes.SERVICE_STOP_SUCCESS, +- "{service} stopped", ++ msg if node is None else "{node}: " + msg, + info={ + "service": service, ++ "node": node, + } + ) + +@@ -1121,6 +1282,19 @@ def service_kill_success(services): + } + ) + ++def service_enable_started(service): ++ """ ++ system service is being enabled ++ string service service name or description ++ """ ++ return ReportItem.info( ++ report_codes.SERVICE_ENABLE_STARTED, ++ "Enabling {service}...", ++ info={ ++ "service": service, ++ } ++ ) ++ + def service_enable_error(service, reason, node=None): + """ + system service enable failed +@@ -1143,7 +1317,7 @@ def service_enable_success(service, node=None): + """ + system service was enabled successfully + string service service name or description +- string node node on which service was enabled ++ string node node on which service has been enabled + """ + msg = "{service} enabled" + return ReportItem.info( +@@ -1155,6 +1329,37 @@ def service_enable_success(service, node=None): + } + ) + ++def service_enable_skipped(service, reason, node=None): ++ """ ++ enabling system service was skipped, no error occured ++ string service service name or description ++ string reason why the enabling has been skipped ++ string node node on which service has been requested to enable ++ """ ++ msg = "not enabling {service} - {reason}" ++ return ReportItem.info( ++ report_codes.SERVICE_ENABLE_SKIPPED, ++ msg if node is None else "{node}: " + msg, ++ info={ ++ "service": service, ++ "reason": reason, ++ "node": node, ++ } ++ ) ++ ++def service_disable_started(service): ++ """ ++ system service is being disabled ++ string service service name or description ++ """ ++ return ReportItem.info( ++ report_codes.SERVICE_DISABLE_STARTED, ++ "Disabling {service}...", ++ info={ ++ "service": service, ++ } ++ ) ++ + def service_disable_error(service, reason, node=None): + """ + system service disable failed +@@ -1189,7 +1394,6 @@ def service_disable_success(service, node=None): + } + ) + +- + def invalid_metadata_format(severity=ReportItemSeverity.ERROR, forceable=None): + """ + Invalid format of metadata +@@ -1201,7 +1405,6 @@ def invalid_metadata_format(severity=ReportItemSeverity.ERROR, forceable=None): + forceable=forceable + ) + +- + def unable_to_get_agent_metadata( + agent, reason, severity=ReportItemSeverity.ERROR, forceable=None + ): +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 425b613..a72a9bd 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -518,8 +518,11 @@ rule remove + Remove a rule if a rule id is specified, if rule is last rule in its constraint, the constraint will be removed. + .SS "qdevice" + .TP ++status [\fB\-\-full\fR] [] ++Show runtime status of specified model of quorum device provider. Using \fB\-\-full\fR will give more detailed output. If is specified, only information about the specified cluster will be displayed. ++.TP + setup model [\fB\-\-enable\fR] [\fB\-\-start\fR] +-Configure specified model of quorum device provider. Quorum device then may be added to clusters by "pcs quorum device add" command. \fB\-\-start\fR will also start the provider. \fB\-\-enable\fR will configure the provider to start on boot. ++Configure specified model of quorum device provider. Quorum device then can be added to clusters by running "pcs quorum device add" command in a cluster. \fB\-\-start\fR will also start the provider. \fB\-\-enable\fR will configure the provider to start on boot. + .TP + destroy + Disable and stop specified model of quorum device provider and delete its configuration files. +@@ -531,7 +534,7 @@ stop + Stop specified model of quorum device provider. + .TP + kill +-Force specified model of quorum device provider to stop (performs kill -9). ++Force specified model of quorum device provider to stop (performs kill \-9). Note that init system (e.g. systemd) can detect that the qdevice is not running and start it again. If you want to stop the qdevice, run "pcs qdevice stop" command. + .TP + enable + Configure specified model of quorum device provider to start on boot. +@@ -543,14 +546,22 @@ Configure specified model of quorum device provider to not start on boot. + config + Show quorum configuration. + .TP +-device add [generic options] model [model options] +-Add quorum device to cluster. Quorum device needs to be created first by "pcs qdevice setup" command. ++status ++Show quorum runtime status. ++.TP ++device add [] model [] ++Add a quorum device to the cluster. Quorum device needs to be created first by "pcs qdevice setup" command. It is not possible to use more than one quorum device in a cluster simultaneously. Generic options, model and model options are all documented in corosync's corosync\-qdevice(8) man page. + .TP + device remove +-Remove quorum device from cluster. ++Remove a quorum device from the cluster. + .TP +-device update [generic options] [model ] +-Add/Change quorum device options. Requires cluster to be stopped. ++device status [\fB\-\-full\fR] ++Show quorum device runtime status. Using \fB\-\-full\fR will give more detailed output. ++.TP ++device update [] [model ] ++Add/Change quorum device options. Generic options and model options are all documented in corosync's corosync\-qdevice(8) man page. Requires the cluster to be stopped. ++ ++WARNING: If you want to change "host" option of qdevice model net, use "pcs quorum device remove" and "pcs quorum device add" commands to set up configuration properly unless old and new host is the same machine. + .TP + unblock [\fB\-\-force\fR] + Cancel waiting for all nodes when establishing quorum. Useful in situations where you know the cluster is inquorate, but you are confident that the cluster should proceed with resource management regardless. This command should ONLY be used when nodes which the cluster is waiting for have been confirmed to be powered off and to have no access to shared resources. +@@ -558,7 +569,7 @@ Cancel waiting for all nodes when establishing quorum. Useful in situations whe + .B WARNING: If the nodes are not actually powered off or they do have access to shared resources, data corruption/cluster failure can occur. To prevent accidental running of this command, \-\-force or interactive user response is required in order to proceed. + .TP + update [auto_tie_breaker=[0|1]] [last_man_standing=[0|1]] [last_man_standing_window=[ -
CLUSTER NAME CLUSTER NAME
-
-
-+ -+ - <%= h(c.name) %> -
--- -1.9.1 - diff --git a/SOURCES/bz1189857-02-fix-tree-view-of-resources-in-web-UI.patch b/SOURCES/bz1189857-02-fix-tree-view-of-resources-in-web-UI.patch deleted file mode 100644 index 7e34f88..0000000 --- a/SOURCES/bz1189857-02-fix-tree-view-of-resources-in-web-UI.patch +++ /dev/null @@ -1,735 +0,0 @@ -From 85ea8bf4630bd3760ab935c24c7b78cdd255f55b Mon Sep 17 00:00:00 2001 -From: Ondrej Mular -Date: Wed, 26 Aug 2015 10:55:57 +0200 -Subject: [PATCH] fix tree view of resources in web UI - ---- - pcsd/cluster_entity.rb | 15 +- - pcsd/pcs.rb | 30 ++- - pcsd/public/js/nodes-ember.js | 34 +++- - pcsd/remote.rb | 12 +- - pcsd/views/nodes.erb | 457 +++++++++++++++++++++--------------------- - 5 files changed, 284 insertions(+), 264 deletions(-) - -diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb -index 182969f..b291937 100644 ---- a/pcsd/cluster_entity.rb -+++ b/pcsd/cluster_entity.rb -@@ -895,7 +895,7 @@ module ClusterEntity - class Node < JSONable - attr_accessor :id, :error_list, :warning_list, :status, :quorum, :uptime, - :name, :corosync, :pacemaker, :cman, :corosync_enabled, -- :pacemaker_enabled, :pcsd_enabled, :attr, :fence_levels -+ :pacemaker_enabled, :pcsd_enabled - - def initialize - @id = nil -@@ -911,8 +911,6 @@ module ClusterEntity - @corosync_enabled = false - @pacemaker_enabled = false - @pcsd_enabled = false -- @attr = ClusterEntity::NvSet.new -- @fence_levels = {} - end - - def self.load_current_node(session, crm_dom=nil) -@@ -923,7 +921,6 @@ module ClusterEntity - node.pacemaker_enabled = pacemaker_enabled? - node.cman = cman_running? - node.pcsd_enabled = pcsd_enabled? -- node.fence_levels = get_fence_levels(session) - - node_online = (node.corosync and node.pacemaker) - node.status = node_online ? 'online' : 'offline' -@@ -939,16 +936,6 @@ module ClusterEntity - node.status = 'online' - end - node.quorum = !!crm_dom.elements['//current_dc[@with_quorum="true"]'] -- -- node_name = get_current_node_name() -- all_nodes_attr = get_node_attributes(session) -- if all_nodes_attr[node_name] -- all_nodes_attr[node_name].each { |pair| -- node.attr << ClusterEntity::NvPair.new( -- nil, pair[:key], pair[:value] -- ) -- } -- end - else - node.status = 'offline' - end -diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb -index 37f6b83..1fe9b99 100644 ---- a/pcsd/pcs.rb -+++ b/pcsd/pcs.rb -@@ -1624,8 +1624,11 @@ def get_node_status(session, cib_dom) - :need_ring1_address => need_ring1_address?, - :is_cman_with_udpu_transport => is_cman_with_udpu_transport?, - :acls => get_acls(session), -- :username => session[:username] -+ :username => session[:username], -+ :fence_levels => get_fence_levels(session), -+ :node_attr => node_attrs_to_v2(get_node_attributes(session)) - } -+ - nodes = get_nodes_status() - - known_nodes = [] -@@ -1742,14 +1745,31 @@ def get_cib_dom(session) - return nil - end - -+def node_attrs_to_v2(node_attrs) -+ all_nodes_attr = {} -+ node_attrs.each { |node, attrs| -+ all_nodes_attr[node] = [] -+ attrs.each { |attr| -+ all_nodes_attr[node] << { -+ :id => nil, -+ :name => attr[:key], -+ :value => attr[:value] -+ } -+ } -+ } -+ return all_nodes_attr -+end -+ - def status_v1_to_v2(status) - new_status = status.select { |k,_| - [:cluster_name, :username, :is_cman_with_udpu_transport, - :need_ring1_address, :cluster_settings, :constraints, :groups, - :corosync_online, :corosync_offline, :pacemaker_online, :pacemaker_standby, -- :pacemaker_offline, :acls -+ :pacemaker_offline, :acls, :fence_levels - ].include?(k) - } -+ new_status[:node_attr] = node_attrs_to_v2(status[:node_attr]) -+ - resources = ClusterEntity::make_resources_tree( - ClusterEntity::get_primitives_from_status_v1(status[:resources]) - ) -@@ -1764,15 +1784,9 @@ def status_v1_to_v2(status) - ].include?(k) - } - -- node_attr = ClusterEntity::NvSet.new -- status[:node_attr].each { |k,v| -- node_attr << ClusterEntity::NvPair.new(nil, k, v) -- } - new_status[:node].update( - { - :id => status[:node_id], -- :attr => node_attr.to_status, -- :fence_levels => status[:fence_levels], - :quorum => nil, - :warning_list => [], - :error_list => [], -diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js -index 46e34fa..1f60adc 100644 ---- a/pcsd/public/js/nodes-ember.js -+++ b/pcsd/public/js/nodes-ember.js -@@ -170,7 +170,7 @@ Pcs = Ember.Application.createWithMixins({ - tree_view_onclick(self.get('cur_resource').get('id'), true); - if (!fence_change && self.get('cur_fence')) - tree_view_select(self.get('cur_fence').get('id')); -- if (!resource_change && self.get('cur_fence')) -+ if (!resource_change && self.get('cur_resource')) - tree_view_select(self.get('cur_resource').get('id')); - Pcs.selectedNodeController.reset(); - setup_node_links(); -@@ -932,6 +932,9 @@ Pcs.Setting = Ember.Object.extend({ - Pcs.Clusternode = Ember.Object.extend({ - name: null, - status: null, -+ status_unknown: function() { -+ return this.get('status') == "unknown"; -+ }.property("status"), - status_val: function() { - if (this.warnings && this.warnings.length) - return get_status_value("warning"); -@@ -1013,6 +1016,10 @@ Pcs.Clusternode = Ember.Object.extend({ - return "color:red"; - } - }.property("up","pacemaker_standby"), -+ pacemaker_standby: null, -+ corosync_enabled: null, -+ pacemaker_enabled: null, -+ pcsd_enabled: null, - standby_style: function () { - if (this.pacemaker_standby) - return "display: none;"; -@@ -1043,7 +1050,12 @@ Pcs.Clusternode = Ember.Object.extend({ - else - return "Disabled"; - }.property("pcsd_enabled"), -- location_constraints: null -+ location_constraints: null, -+ node_attrs: [], -+ fence_levels: [], -+ pcsd: null, -+ corosync_daemon: null, -+ pacemaker_daemon: null, - }); - - Pcs.Aclrole = Ember.Object.extend({ -@@ -1509,8 +1521,8 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({ - cur_node: null, - cur_node_attr: function () { - var nc = this; -- if (nc.cur_node && "node_attrs" in nc.cur_node) { -- return nc.cur_node.node_attrs; -+ if (nc.get('cur_node')) { -+ return nc.get('cur_node').get('node_attrs'); - } - return []; - }.property("cur_node", "content.@each.node_attrs"), -@@ -1599,7 +1611,7 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({ - pacemaker_standby = false; - } - -- if (node_obj["noresponse"] == true) { -+ if (node_obj["status"] == 'unknown') { - pcsd_daemon = false - } else { - pcsd_daemon = true -@@ -1618,9 +1630,9 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({ - up_status = false; - } - -- var node_attr = {}; -- if (node_obj["attr"]) { -- node_attr = node_obj["attr"]; -+ var node_attr = []; -+ if (data["node_attr"] && data["node_attr"][node_id]) { -+ node_attr = data["node_attr"][node_id]; - } - - found = false; -@@ -1646,7 +1658,8 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({ - node.set("uptime", node_obj["uptime"]); - node.set("node_id", node_obj["id"]); - node.set("node_attrs", node_attr); -- node.set("fence_levels", node_obj["fence_levels"]); -+ node.set("fence_levels", data["fence_levels"]); -+ node.set("status", node_obj["status"]); - } - }); - -@@ -1670,7 +1683,8 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({ - uptime: node_obj["uptime"], - node_id: node_obj["id"], - node_attrs: node_attr, -- fence_levels: node_obj["fence_levels"] -+ fence_levels: data["fence_levels"], -+ status: node_obj["status"] - }); - } - var pathname = window.location.pathname.split('/'); -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index 22af38a..a40c1c7 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -1014,8 +1014,14 @@ def node_status(params, request, session) - status[:cluster_settings] - - node_attr = {} -- node.attr.each { |v| -- node_attr[v.name.to_sym] = v.value -+ status[:node_attr].each { |node, attrs| -+ node_attr[node] = [] -+ attrs.each { |attr| -+ node_attr[node] << { -+ :key => attr[:name], -+ :value => attr[:value] -+ } -+ } - } - - old_status = { -@@ -1038,7 +1044,7 @@ def node_status(params, request, session) - :cluster_settings => cluster_settings, - :node_id => node.id, - :node_attr => node_attr, -- :fence_levels => node.fence_levels, -+ :fence_levels => status[:fence_levels], - :need_ring1_address => status[:need_ring1_address], - :is_cman_with_udpu_transport => status[:is_cman_with_udpu_transport], - :acls => status[:acls], -diff --git a/pcsd/views/nodes.erb b/pcsd/views/nodes.erb -index b8ecf6d..19bba62 100644 ---- a/pcsd/views/nodes.erb -+++ b/pcsd/views/nodes.erb -@@ -40,242 +40,241 @@ -
- - --
--
--
--
Edit Node 
--
--{{Pcs.nodesController.cur_node.name}} --
-- --
-- --
-- -- -- -- -- -- -- -- -- -- --
-- --
-- -- {{#if Pcs.nodesController.cur_node.pacemaker}} --
-- Pacemaker Connected -- {{else}} -- {{#if Pcs.nodesController.cur_node.pacemaker_standby}} --
-- Pacemaker Standby -- {{else}} --
-- Pacemaker Not Connected -- {{/if}} -- {{/if}} --
--
-- {{#if Pcs.nodesController.cur_node.corosync}} --
-- Corosync Connected -- {{else}} --
-- Corosync Not Connected -- {{/if}} --
--
--
-- --
-- -- -- -- -- --
-- --
-- --
-- -- -- -- -- -- --
Node ID:
{{Pcs.nodesController.cur_node.node_id}}
Uptime:
{{Pcs.nodesController.cur_node.uptime}}
--
-+
-+
-+
-+
Edit Node 
-+
-+ {{Pcs.nodesController.cur_node.name}} -+
-+
- -- -- -- -- --
Cluster Daemons
--
-- -- -- -- -- --
NAMESTATUS
pacemaker
--{{#if Pcs.nodesController.cur_node.pacemaker_daemon}} --Running ({{Pcs.nodesController.cur_node.pacemaker_startup}}) --{{else}} --{{#if Pcs.nodesController.cur_node.pcsd}} --Stopped ({{Pcs.nodesController.cur_node.pacemaker_startup}}) --{{else}} --Unknown ({{Pcs.nodesController.cur_node.pacemaker_startup}}) --{{/if}} --{{/if}} --
corosync
--{{#if Pcs.nodesController.cur_node.corosync_daemon}} --Running ({{Pcs.nodesController.cur_node.corosync_startup}}) --{{else}} --{{#if Pcs.nodesController.cur_node.pcsd}} --Stopped ({{Pcs.nodesController.cur_node.corosync_startup}}) --{{else}} --Unknown ({{Pcs.nodesController.cur_node.corosync_startup}}) --{{/if}} --{{/if}} --
pcsd
--{{#if Pcs.nodesController.cur_node.pcsd}} --Running ({{Pcs.nodesController.cur_node.pcsd_startup}}) --{{else}} -- {{#if Pcs.nodesController.cur_node.authorized}} -- Stopped ({{Pcs.nodesController.cur_node.pcsd_startup}}) -- {{else}} -- Running (not Authorized) ({{Pcs.nodesController.cur_node.pcsd_startup}}) -- {{/if}} --{{/if}} --
--
--
-- -- -- -- --
Running Resources
--
-- -- -- {{#if Pcs.nodesController.cur_node.running_resources}} -- {{#each res in Pcs.nodesController.cur_node.running_resources}} -- -- {{/each}} -- {{else}} -- -- {{/if}} --
NAME
-- {{#unless res.stonith}} -- {{#link-to 'Resources.index' res}}{{res.name}} ({{res.res_type}}){{/link-to}} -- {{/unless}} --
NONE
--
--
-- -- -- -- --
Resource Location Preferences
--
-- -- -- {{#if Pcs.nodesController.cur_node.location_constraints}} -- {{#each Pcs.nodesController.cur_node.location_constraints}} -- -- {{/each}} -- {{else}} -- -- {{/if}} --
NAMEScore
{{rsc}}{{score}}
NONE
--
--
-- -- -- -- -- -- -- -- -+ -+
Node Attributes ({{#if Pcs.nodesController.cur_node_attr.length}}{{Pcs.nodesController.cur_node_attr.length}}{{else}}0{{/if}})
--
-- -- -- {{#each Pcs.nodesController.cur_node_attr}} -- -- -- -- --
AttributeValueRemove
{{this.name}}{{this.value}} -- X -+
-+ -+ -+ -+ -+ - -- {{/each}} -- {{#unless Pcs.nodesController.cur_node_attr}} -- -- {{/unless}} -- -- -- -- -+ -+ -+ - -+
-+ -+
-+ {{#if Pcs.nodesController.cur_node.pacemaker}} -+
-+ Pacemaker Connected -+
-+ {{else}} -+ {{#if Pcs.nodesController.cur_node.pacemaker_standby}} -+
-+ Pacemaker Standby -+
-+ {{else}} -+
-+ Pacemaker Not Connected -+
-+ {{/if}} -+ {{/if}} -
NONE
-+ {{#if Pcs.nodesController.cur_node.corosync}} -+
-+ Corosync Connected -+
-+ {{else}} -+
-+ Corosync Not Connected -+
-+ {{/if}} -+
-+
-+ -+
-+ -+ -+ -+ -+ -+
-+ -+
- -+
-+ {{#unless Pcs.nodesController.cur_node.status_unknown}} -+ -+ -+ -+ -+ -+ -
Node ID:
{{Pcs.nodesController.cur_node.node_id}}
Uptime:
{{Pcs.nodesController.cur_node.uptime}}
-+ {{/unless}} -
--
-- -- -- -- --
Fence Levels ({{#if Pcs.nodesController.cur_node_fence_levels.length}}{{Pcs.nodesController.cur_node_fence_levels.length}}{{else}}0{{/if}})
--
-- -- -- {{#each Pcs.nodesController.cur_node_fence_levels}} -- -- -- -- -- -- {{/each}} -- {{#unless Pcs.nodesController.cur_node_fence_levels}} -- -- {{/unless}} -- -- -- -- -- --
LevelFence DevicesRemove
{{this.level}}{{this.devices}} -- X --
NONE
--
--
--
-+ -+ -+ -+
Cluster Daemons
-+
-+ -+ -+ -+ -+ -+
NAMESTATUS
pacemaker
-+ {{#if Pcs.nodesController.cur_node.pacemaker_daemon}} -+ Running ({{Pcs.nodesController.cur_node.pacemaker_startup}}) -+ {{else}} -+ {{#if Pcs.nodesController.cur_node.pcsd}} -+ Stopped ({{Pcs.nodesController.cur_node.pacemaker_startup}}) -+ {{else}} -+ Unknown ({{Pcs.nodesController.cur_node.pacemaker_startup}}) -+ {{/if}} -+ {{/if}} -+
corosync
-+ {{#if Pcs.nodesController.cur_node.corosync_daemon}} -+ Running ({{Pcs.nodesController.cur_node.corosync_startup}}) -+ {{else}} -+ {{#if Pcs.nodesController.cur_node.pcsd}} -+ Stopped ({{Pcs.nodesController.cur_node.corosync_startup}}) -+ {{else}} -+ Unknown ({{Pcs.nodesController.cur_node.corosync_startup}}) -+ {{/if}} -+ {{/if}} -+
pcsd
-+ {{#if Pcs.nodesController.cur_node.pcsd}} -+ Running ({{Pcs.nodesController.cur_node.pcsd_startup}}) -+ {{else}} -+ {{#if Pcs.nodesController.cur_node.authorized}} -+ Stopped ({{Pcs.nodesController.cur_node.pcsd_startup}}) -+ {{else}} -+ Running (not Authorized) ({{Pcs.nodesController.cur_node.pcsd_startup}}) -+ {{/if}} -+ {{/if}} -+
-+
-+
-+ -+ -+ -+ -+
Running Resources
-+
-+ -+ -+ {{#if Pcs.nodesController.cur_node.running_resources}} -+ {{#each res in Pcs.nodesController.cur_node.running_resources}} -+ -+ {{/each}} -+ {{else}} -+ -+ {{/if}} -+
NAME
-+ {{#unless res.stonith}} -+ {{#link-to 'Resources.index' res}}{{res.name}} ({{res.res_type}}){{/link-to}} -+ {{/unless}} -+
NONE
-+
-+
-+ -+ -+ -+ -+
Resource Location Preferences
-+
-+ -+ -+ {{#if Pcs.nodesController.cur_node.location_constraints}} -+ {{#each Pcs.nodesController.cur_node.location_constraints}} -+ -+ {{/each}} -+ {{else}} -+ -+ {{/if}} -+
NAMEScore
{{rsc}}{{score}}
NONE
-+
-+
-+ -+ -+ -+ -+
Node Attributes ({{#if Pcs.nodesController.cur_node_attr.length}}{{Pcs.nodesController.cur_node_attr.length}}{{else}}0{{/if}})
-+
-+ -+ -+ {{#each attr in Pcs.nodesController.cur_node_attr}} -+ -+ -+ -+ -+ {{else}} -+ -+ {{/each}} -+ -+ -+ -+ -+ -+
AttributeValueRemove
{{attr.name}}{{attr.value}} -+ X -+
NONE
-+
-+
-+ -+ -+ -+ -+
Fence Levels ({{#if Pcs.nodesController.cur_node_fence_levels.length}}{{Pcs.nodesController.cur_node_fence_levels.length}}{{else}}0{{/if}})
-+
-+ -+ -+ {{#each Pcs.nodesController.cur_node_fence_levels}} -+ -+ -+ -+ -+ -+ {{/each}} -+ {{#unless Pcs.nodesController.cur_node_fence_levels}} -+ -+ {{/unless}} -+ -+ -+ -+ -+ -+
LevelFence DevicesRemove
{{this.level}}{{this.devices}} -+ X -+
NONE
-+
-+
-+
-+
-+ -+ - <%= erb :_configure %> - <%= erb :_acls %> - <%= erb :_wizards %> --- -1.9.1 - diff --git a/SOURCES/bz1189857-03-web-UI-prevents-running-update-multiple-times-at-onc.patch b/SOURCES/bz1189857-03-web-UI-prevents-running-update-multiple-times-at-onc.patch deleted file mode 100644 index 9398777..0000000 --- a/SOURCES/bz1189857-03-web-UI-prevents-running-update-multiple-times-at-onc.patch +++ /dev/null @@ -1,173 +0,0 @@ -From 032a2571656c646f17bb3453b6a7d4883241ad46 Mon Sep 17 00:00:00 2001 -From: Ondrej Mular -Date: Tue, 1 Sep 2015 12:06:20 +0200 -Subject: [PATCH] web UI: prevents running update multiple times at once - ---- - pcsd/public/js/nodes-ember.js | 106 ++++++++++++++++++++++++++++++++++++------ - 1 file changed, 91 insertions(+), 15 deletions(-) - -diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js -index 172c00a..d2f85bd 100644 ---- a/pcsd/public/js/nodes-ember.js -+++ b/pcsd/public/js/nodes-ember.js -@@ -49,22 +49,25 @@ Pcs = Ember.Application.createWithMixins({ - }); - return retArray; - }, -- update_timeout: null, -- update: function(first_run) { -+ updater: null, -+ -+ update: function() { -+ Pcs.get('updater').update(); -+ }, -+ -+ _update: function(first_run) { - if (window.location.pathname.lastIndexOf('/manage', 0) !== 0) { - return; - } -- clearTimeout(Pcs.get('update_timeout')); -- Pcs.set('update_timeout', null); -+ if (first_run) { -+ show_loading_screen(); -+ } - var self = Pcs; - var cluster_name = self.cluster_name; - if (cluster_name == null) { - if (location.pathname.indexOf("/manage") != 0) { - return; - } -- if (first_run) { -- show_loading_screen(); -- } - Ember.debug("Empty Cluster Name"); - $.ajax({ - url: "/clusters_overview", -@@ -77,8 +80,6 @@ Pcs = Ember.Application.createWithMixins({ - }); - if (data["not_current_data"]) { - self.update(); -- } else { -- Pcs.set('update_timeout', window.setTimeout(self.update,20000)); - } - hide_loading_screen(); - }, -@@ -93,15 +94,14 @@ Pcs = Ember.Application.createWithMixins({ - console.log("Error: Unable to parse json for clusters_overview"); - } - } -- Pcs.set('update_timeout', window.setTimeout(self.update,20000)); - hide_loading_screen(); -+ }, -+ complete: function() { -+ Pcs.get('updater').update_finished(); - } - }); - return; - } -- if (first_run) { -- show_loading_screen(); -- } - $.ajax({ - url: "cluster_status", - dataType: "json", -@@ -191,12 +191,84 @@ Pcs = Ember.Application.createWithMixins({ - }, - complete: function() { - hide_loading_screen(); -- Pcs.update_timeout = window.setTimeout(Pcs.update,20000); -+ Pcs.get('updater').update_finished(); - } - }); - } - }); - -+Pcs.Updater = Ember.Object.extend({ -+ timeout: 20000, -+ first_run: true, -+ async: true, -+ autostart: true, -+ started: false, -+ in_progress: false, -+ waiting: false, -+ update_function: null, -+ update_target: null, -+ timer: null, -+ -+ start: function() { -+ this.set('started', true); -+ this.update(); -+ }, -+ -+ stop: function() { -+ this.set('started', false); -+ this.cancel_timer(); -+ }, -+ -+ cancel_timer: function() { -+ var self = this; -+ var timer = self.get('timer'); -+ if (timer) { -+ self.set('timer', null); -+ Ember.run.cancel(timer); -+ } -+ }, -+ -+ update: function() { -+ var self = this; -+ if (!self.get('update_function')) { -+ console.log('No update_function defined!'); -+ return; -+ } -+ self.cancel_timer(); -+ self.set('waiting', false); -+ if (self.get('in_progress')) { -+ self.set('waiting', true); -+ } else { -+ self.set('in_progress', true); -+ self.get('update_function').apply(self.get('update_target'), [self.get('first_run')]); -+ self.set('first_run', false); -+ if (!self.get('async')) { -+ self.update_finished(); -+ } -+ } -+ }, -+ -+ update_finished: function() { -+ var self = this; -+ if (self.get('waiting')) { -+ Ember.run.next(self, self.update); -+ } else if (self.get('started')) { -+ self.set('timer', Ember.run.later(self, self.update, self.get('timeout'))); -+ } -+ self.set('in_progress', false); -+ }, -+ -+ init: function() { -+ var self = this; -+ if (!self.get('update_target')) { -+ self.set('update_target', self); -+ } -+ if (self.get('autostart')) { -+ self.start(); -+ } -+ } -+}); -+ - Pcs.resourcesContainer = Ember.Object.create({ - resource_map: {}, - top_level_resource_map: {}, -@@ -1742,4 +1814,8 @@ function myUpdate() { - // window.setTimeout(myUpdate,4000); - } - --Pcs.update(true); -+Pcs.set('updater', Pcs.Updater.create({ -+ timeout: 20000, -+ update_function: Pcs._update, -+ update_target: Pcs -+})); --- -1.9.1 - diff --git a/SOURCES/bz1189857-04-fix-constraints-removing-in-web-UI.patch b/SOURCES/bz1189857-04-fix-constraints-removing-in-web-UI.patch deleted file mode 100644 index 34ec1a9..0000000 --- a/SOURCES/bz1189857-04-fix-constraints-removing-in-web-UI.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 7e92db5789ad09f0e1184691ba69fb087402f24c Mon Sep 17 00:00:00 2001 -From: Ondrej Mular -Date: Wed, 2 Sep 2015 11:16:14 +0200 -Subject: [PATCH] fix constraints removing in web UI - ---- - pcsd/public/js/nodes-ember.js | 9 ++++++--- - 1 file changed, 6 insertions(+), 3 deletions(-) - -diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js -index d2f85bd..0943c65 100644 ---- a/pcsd/public/js/nodes-ember.js -+++ b/pcsd/public/js/nodes-ember.js -@@ -562,9 +562,12 @@ Pcs.resourcesContainer = Ember.Object.create({ - self.set('constraints', constraints); - var resource_map = self.get('resource_map'); - $.each(constraints, function(const_type, cons) { -- $.each(cons, function(resource_id, cons_list) { -- if (resource_id in resource_map) -- resource_map[resource_id].set(const_type, cons_list); -+ $.each(resource_map, function(resource_id, resource_obj) { -+ if (resource_id in cons) { -+ resource_obj.set(const_type, cons[resource_id]); -+ } else { -+ resource_obj.set(const_type, []); -+ } - }); - }); - } --- -1.9.1 - diff --git a/SOURCES/bz1189857-05-remove-removing-constriants-from-client-side-javascr.patch b/SOURCES/bz1189857-05-remove-removing-constriants-from-client-side-javascr.patch deleted file mode 100644 index d702fe2..0000000 --- a/SOURCES/bz1189857-05-remove-removing-constriants-from-client-side-javascr.patch +++ /dev/null @@ -1,73 +0,0 @@ -From 41e2d3e4f5ae0331d7984612485b3bbb84d41304 Mon Sep 17 00:00:00 2001 -From: Ondrej Mular -Date: Wed, 2 Sep 2015 12:39:06 +0200 -Subject: [PATCH] remove removing constriants from client-side (javascript) - -All changes are displayed after update. ---- - pcsd/public/js/nodes-ember.js | 24 ------------------------ - pcsd/public/js/pcsd.js | 6 ------ - 2 files changed, 30 deletions(-) - -diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js -index 0943c65..5fec386 100644 ---- a/pcsd/public/js/nodes-ember.js -+++ b/pcsd/public/js/nodes-ember.js -@@ -390,30 +390,6 @@ Pcs.resourcesContainer = Ember.Object.create({ - }; - }, - -- remove_constraint: function(constraint_id) { -- $.each(this.get('resource_map'), function(key, resource) { -- $.each( -- [ -- "location_constraints", -- "ordering_constraints", -- "ordering_set_constraints", -- "colocation_constraints" -- ], -- function(_, constraint_type) { -- if (resource.get(constraint_type)) { -- resource.set( -- constraint_type, -- $.grep( -- resource.get(constraint_type), -- function(value2, key) { return value2.id != constraint_id; } -- ) -- ); -- } -- } -- ); -- }); -- }, -- - update_meta_attr: function(resource_id, attr, value) { - value = typeof value !== 'undefined' ? value.trim() : ""; - var data = { -diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js -index 879b533..197cdd1 100644 ---- a/pcsd/public/js/pcsd.js -+++ b/pcsd/public/js/pcsd.js -@@ -1595,9 +1595,6 @@ function remove_constraint(id) { - url: get_cluster_remote_url() + 'remove_constraint_remote', - data: {"constraint_id": id}, - timeout: pcs_timeout, -- success: function (data) { -- Pcs.resourcesContainer.remove_constraint(id); -- }, - error: function (xhr, status, error) { - alert( - "Error removing constraint " -@@ -1617,9 +1614,6 @@ function remove_constraint_rule(id) { - url: get_cluster_remote_url() + 'remove_constraint_rule_remote', - data: {"rule_id": id}, - timeout: pcs_timeout, -- success: function (data) { -- Pcs.resourcesContainer.remove_constraint(id); -- }, - error: function (xhr, status, error) { - alert( - "Error removing constraint rule " --- -1.9.1 - diff --git a/SOURCES/bz1189857-06-web-UI-fixes-in-nodes-resources-fence-devices.patch b/SOURCES/bz1189857-06-web-UI-fixes-in-nodes-resources-fence-devices.patch deleted file mode 100644 index eb0098a..0000000 --- a/SOURCES/bz1189857-06-web-UI-fixes-in-nodes-resources-fence-devices.patch +++ /dev/null @@ -1,228 +0,0 @@ -From 7c12321d187ce5919ea5e443612321b404be8cab Mon Sep 17 00:00:00 2001 -From: Ondrej Mular -Date: Tue, 15 Sep 2015 11:03:59 +0200 -Subject: [PATCH] web UI: fixes in nodes, resources, fence devices - -- fix creating disabled resource -- add sorting for cluster list, resource list and fence device list -- hide resource (fence device) details when there is no resource (fence device) -- in resource list color of resource name depends on its status -- fix group selector -- disabled autocorrect for ordering set constraints -- fix status detection of master/slave resources ---- - pcsd/cluster_entity.rb | 2 +- - pcsd/pcsd.rb | 2 +- - pcsd/public/css/style.css | 8 +++++++ - pcsd/public/js/nodes-ember.js | 56 +++++++++++++++++++++++++++++++------------ - pcsd/public/js/pcsd.js | 5 +++- - pcsd/views/main.erb | 4 +++- - 6 files changed, 58 insertions(+), 19 deletions(-) - -diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb -index b5d2719..8f29a40 100644 ---- a/pcsd/cluster_entity.rb -+++ b/pcsd/cluster_entity.rb -@@ -914,7 +914,7 @@ module ClusterEntity - end - @masters, @slaves = get_masters_slaves(primitive_list) - if (@masters.empty? and -- @member.status != ClusterEntity::ResourceStatus.new(:disabled) -+ @member.status == ClusterEntity::ResourceStatus.new(:running) - ) - @status = ClusterEntity::ResourceStatus.new(:partially_running) - end -diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb -index 9a07ee8..b7c2a49 100644 ---- a/pcsd/pcsd.rb -+++ b/pcsd/pcsd.rb -@@ -178,7 +178,7 @@ helpers do - param_line << "#{myparam}=#{val}" - end - if param == "disabled" -- meta_options << "meta target-role=Stopped" -+ meta_options << 'meta' << 'target-role=Stopped' - end - } - return param_line + meta_options -diff --git a/pcsd/public/css/style.css b/pcsd/public/css/style.css -index a3f6638..1c003bd 100644 ---- a/pcsd/public/css/style.css -+++ b/pcsd/public/css/style.css -@@ -778,3 +778,11 @@ li.menuheader { - .issue_table { - margin-top: 1.5em; - } -+ -+.status-error { -+ color: red; -+} -+ -+.status-warning { -+ color: #ff6600; -+} -diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js -index bbeed55..1e00a94 100644 ---- a/pcsd/public/js/nodes-ember.js -+++ b/pcsd/public/js/nodes-ember.js -@@ -147,8 +147,10 @@ Pcs = Ember.Application.createWithMixins({ - } else { - if (self.get('fence_list').length > 0) { - cur_fence = self.get('fence_list')[0]; -- fence_change = true; -+ } else { -+ cur_fence = null; - } -+ fence_change = true; - } - - if (cur_resource && cur_resource.get('id') in resource_map) { -@@ -158,22 +160,28 @@ Pcs = Ember.Application.createWithMixins({ - } else { - if (self.get('resource_list').length > 0) { - cur_resource = self.get('resource_list')[0]; -- resource_change = true; -+ } else { -+ cur_resource = null; - } -+ resource_change = true; - } - - self.set('cur_fence', cur_fence); - self.set('cur_resource', cur_resource); - - Ember.run.scheduleOnce('afterRender', Pcs, function () { -- if (fence_change) -- tree_view_onclick(self.get('cur_fence').get('id'), true); -- if (resource_change) -- tree_view_onclick(self.get('cur_resource').get('id'), true); -- if (!fence_change && self.get('cur_fence')) -- tree_view_select(self.get('cur_fence').get('id')); -- if (!resource_change && self.get('cur_resource')) -- tree_view_select(self.get('cur_resource').get('id')); -+ if (self.get('cur_fence')) { -+ if (fence_change) -+ tree_view_onclick(self.get('cur_fence').get('id'), true); -+ else -+ tree_view_select(self.get('cur_fence').get('id')); -+ } -+ if (self.get('cur_resource')) { -+ if (resource_change) -+ tree_view_onclick(self.get('cur_resource').get('id'), true); -+ else -+ tree_view_select(self.get('cur_resource').get('id')); -+ } - Pcs.selectedNodeController.reset(); - disable_checkbox_clicks(); - }); -@@ -546,6 +554,11 @@ Pcs.resourcesContainer = Ember.Object.create({ - } - }); - }); -+ $.each(resource_map, function(resource_id, resource_obj) { -+ resource_obj.set('group_list', self.get('group_list')); -+ }); -+ self.set('resource_list', Ember.copy(self.get('resource_list')).sort(function(a,b){return a.get('id').localeCompare(b.get('id'))})); -+ self.set('fence_list', Ember.copy(self.get('fence_list')).sort(function(a,b){return a.get('id').localeCompare(b.get('id'))})); - } - }); - -@@ -565,6 +578,7 @@ Pcs.ResourceObj = Ember.Object.extend({ - disabled: false, - error_list: [], - warning_list: [], -+ group_list: [], - get_group_id: function() { - var self = this; - var p = self.get('parent'); -@@ -577,7 +591,7 @@ Pcs.ResourceObj = Ember.Object.extend({ - var self = this; - var cur_group = self.get('get_group_id'); - var html = ' - - {{{node.status_icon}}} -- {{node._id}} -+ -+ {{node._id}} -+ - {{node.resource_type}} - - --- -1.9.1 - diff --git a/SOURCES/bz1189857-07-web-UI-fixes.patch b/SOURCES/bz1189857-07-web-UI-fixes.patch deleted file mode 100644 index 9df183e..0000000 --- a/SOURCES/bz1189857-07-web-UI-fixes.patch +++ /dev/null @@ -1,99 +0,0 @@ -From c601e0f7e93db3e136eb9080fc2d4f4a0c999360 Mon Sep 17 00:00:00 2001 -From: Ondrej Mular -Date: Mon, 21 Sep 2015 17:53:51 +0200 -Subject: [PATCH] web UI fixes - -- fix loading resource optional argument form -- fix master/slave resource status from old pcsd -- fix status of failed resource ---- - pcsd/cluster_entity.rb | 10 ++++++---- - pcsd/public/js/pcsd.js | 4 +++- - pcsd/views/main.erb | 2 ++ - 3 files changed, 11 insertions(+), 5 deletions(-) - -diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb -index 8f29a40..c746544 100644 ---- a/pcsd/cluster_entity.rb -+++ b/pcsd/cluster_entity.rb -@@ -191,6 +191,7 @@ module ClusterEntity - mi = ClusterEntity::Clone.new - else - mi = ClusterEntity::MasterSlave.new -+ mi.masters_unknown = true - end - mi.id = mi_id - mi.meta_attr = ClusterEntity::get_meta_attr_from_status_v1( -@@ -539,7 +540,7 @@ module ClusterEntity - status = ClusterEntity::ResourceStatus.new(:disabled) - elsif running > 0 - status = ClusterEntity::ResourceStatus.new(:running) -- elsif failed > 0 -+ elsif failed > 0 or @error_list.length > 0 - status = ClusterEntity::ResourceStatus.new(:failed) - else - status = ClusterEntity::ResourceStatus.new(:blocked) -@@ -854,10 +855,11 @@ module ClusterEntity - - - class MasterSlave < MultiInstance -- attr_accessor :masters, :slaves -+ attr_accessor :masters, :slaves, :masters_unknown - - def initialize(master_cib_element=nil, crm_dom=nil, rsc_status=nil, parent=nil, operations=nil) - super(master_cib_element, crm_dom, rsc_status, parent, operations) -+ @masters_unknown = false - @class_type = 'master' - @masters = [] - @slaves = [] -@@ -869,7 +871,7 @@ module ClusterEntity - primitive_list = @member.members - end - @masters, @slaves = get_masters_slaves(primitive_list) -- if (@masters.empty? and -+ if (@masters.empty? and !@masters_unknown and - @status != ClusterEntity::ResourceStatus.new(:disabled) - ) - @warning_list << { -@@ -913,7 +915,7 @@ module ClusterEntity - primitive_list = @member.members - end - @masters, @slaves = get_masters_slaves(primitive_list) -- if (@masters.empty? and -+ if (@masters.empty? and !@masters_unknown and - @member.status == ClusterEntity::ResourceStatus.new(:running) - ) - @status = ClusterEntity::ResourceStatus.new(:partially_running) -diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js -index 23fd316..04bee0f 100644 ---- a/pcsd/public/js/pcsd.js -+++ b/pcsd/public/js/pcsd.js -@@ -2010,7 +2010,9 @@ function tree_view_onclick(resource_id, auto) { - - tree_view_select(resource_id); - -- load_agent_form(resource_id, resource_obj.get('stonith')); -+ Ember.run.next(Pcs, function() { -+ load_agent_form(resource_id, resource_obj.get('stonith')); -+ }); - } - - function tree_view_select(element_id) { -diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb -index e7e611d..b7260ad 100644 ---- a/pcsd/views/main.erb -+++ b/pcsd/views/main.erb -@@ -277,8 +277,10 @@ - {{#if stonith}} -
- {{else}} -+ {{#if resource.is_primitive}} -
- {{/if}} -+ {{/if}} - {{else}} - {{#if stonith}} - NO FENCE DEVICE IN CLUSTER --- -1.9.1 - diff --git a/SOURCES/bz1198640-01-web-UI-allows-spaces-in-optional-arguments-when-crea.patch b/SOURCES/bz1198640-01-web-UI-allows-spaces-in-optional-arguments-when-crea.patch deleted file mode 100644 index 69de2d1..0000000 --- a/SOURCES/bz1198640-01-web-UI-allows-spaces-in-optional-arguments-when-crea.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 906780d7d61fef803c5e1adfa9d156e07e67c26a Mon Sep 17 00:00:00 2001 -From: Ondrej Mular -Date: Tue, 15 Sep 2015 11:14:04 +0200 -Subject: [PATCH] web UI: allows spaces in optional arguments when creating new - resource - ---- - pcsd/public/js/pcsd.js | 7 +++---- - 1 file changed, 3 insertions(+), 4 deletions(-) - -diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js -index cddf14e..84db292 100644 ---- a/pcsd/public/js/pcsd.js -+++ b/pcsd/public/js/pcsd.js -@@ -284,15 +284,14 @@ function disable_spaces(item) { - } - - function load_resource_form(item, ra, stonith) { -- data = { "new": true, resourcename: ra}; -+ var data = { new: true, resourcename: ra}; -+ var command; - if (!stonith) - command = "resource_metadata"; - else - command = "fence_device_metadata"; - -- item.load(get_cluster_remote_url() + command, data, function() { -- disable_spaces(this); -- }); -+ item.load(get_cluster_remote_url() + command, data); - } - - function update_resource_form_groups(form, group_list) { --- -1.9.1 - diff --git a/SOURCES/bz1225423-01-allow-to-remove-a-dead-node-from-a-cluster.patch b/SOURCES/bz1225423-01-allow-to-remove-a-dead-node-from-a-cluster.patch new file mode 100644 index 0000000..b3c10e4 --- /dev/null +++ b/SOURCES/bz1225423-01-allow-to-remove-a-dead-node-from-a-cluster.patch @@ -0,0 +1,122 @@ +From 2a080e5986331989a3164a35129e576641b2cca5 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Tue, 19 Jul 2016 16:42:44 +0200 +Subject: [PATCH 1/2] allow to remove a dead node from a cluster + +--- + pcs/cluster.py | 41 +++++++++++++++++++++++++++-------------- + 1 file changed, 27 insertions(+), 14 deletions(-) + +diff --git a/pcs/cluster.py b/pcs/cluster.py +index baa0f44..7a8615d 100644 +--- a/pcs/cluster.py ++++ b/pcs/cluster.py +@@ -1076,7 +1076,7 @@ def disable_cluster_nodes(nodes): + if len(error_list) > 0: + utils.err("unable to disable all nodes\n" + "\n".join(error_list)) + +-def destroy_cluster(argv): ++def destroy_cluster(argv, keep_going=False): + if len(argv) > 0: + # stop pacemaker and resources while cluster is still quorate + nodes = argv +@@ -1085,7 +1085,14 @@ def destroy_cluster(argv): + # destroy will stop any remaining cluster daemons + error_list = parallel_for_nodes(utils.destroyCluster, nodes, quiet=True) + if error_list: +- utils.err("unable to destroy cluster\n" + "\n".join(error_list)) ++ if keep_going: ++ print( ++ "Warning: unable to destroy cluster\n" ++ + ++ "\n".join(error_list) ++ ) ++ else: ++ utils.err("unable to destroy cluster\n" + "\n".join(error_list)) + + def stop_cluster(argv): + if len(argv) > 0: +@@ -1347,19 +1354,25 @@ def cluster_node(argv): + + node = argv[1] + node0, node1 = utils.parse_multiring_node(node) +- + if not node0: + utils.err("missing ring 0 address of the node") +- status,output = utils.checkAuthorization(node0) +- if status == 2: +- utils.err("pcsd is not running on %s" % node0) +- elif status == 3: +- utils.err( +- "%s is not yet authenticated (try pcs cluster auth %s)" +- % (node0, node0) +- ) +- elif status != 0: +- utils.err(output) ++ ++ # allow to continue if removing a node with --force ++ if add_node or "--force" not in utils.pcs_options: ++ status, output = utils.checkAuthorization(node0) ++ if status != 0: ++ if status == 2: ++ msg = "pcsd is not running on {0}".format(node0) ++ elif status == 3: ++ msg = ( ++ "{node} is not yet authenticated " ++ + " (try pcs cluster auth {node})" ++ ).format(node=node0) ++ else: ++ msg = output ++ if not add_node: ++ msg += ", use --force to override" ++ utils.err(msg) + + if add_node == True: + wait = False +@@ -1540,7 +1553,7 @@ def cluster_node(argv): + + nodesRemoved = False + c_nodes = utils.getNodesFromCorosyncConf() +- destroy_cluster([node0]) ++ destroy_cluster([node0], keep_going=("--force" in utils.pcs_options)) + for my_node in c_nodes: + if my_node == node0: + continue +-- +1.8.3.1 + + +From c48716233ace08c16e7e4b66075aebeca9366321 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Wed, 20 Jul 2016 10:01:13 +0200 +Subject: [PATCH 2/2] gui: allow to remove a dead node from a cluster + +--- + pcsd/remote.rb | 11 +++++++++-- + 1 file changed, 9 insertions(+), 2 deletions(-) + +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index 25fb74d..05a6d03 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -837,8 +837,15 @@ def remote_remove_nodes(params, request, auth_user) + stdout, stderr, retval = run_cmd( + auth_user, PCS, "cluster", "stop", *stop_params + ) +- if retval != 0 +- return [400, stderr.join] ++ if retval != 0 and not params['force'] ++ # If forced, keep going even if unable to stop all nodes (they may be dead). ++ # Add info this error is forceable if pcs did not do it (e.g. when unable ++ # to connect to some nodes). ++ message = stderr.join ++ if not message.include?(', use --force to override') ++ message += ', use --force to override' ++ end ++ return [400, message] + end + + node_list.each {|node| +-- +1.8.3.1 + diff --git a/SOURCES/bz1231858-01-web-UI-fix-occasional-issue-with-not-showing-optiona.patch b/SOURCES/bz1231858-01-web-UI-fix-occasional-issue-with-not-showing-optiona.patch new file mode 100644 index 0000000..6f3c99a --- /dev/null +++ b/SOURCES/bz1231858-01-web-UI-fix-occasional-issue-with-not-showing-optiona.patch @@ -0,0 +1,84 @@ +From 4fbf6a24492b0ac61be7822208275f1837165ae2 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Fri, 22 Jul 2016 13:37:28 +0200 +Subject: [PATCH] web UI: fix occasional issue with not showing optional + arguments of resources + +--- + pcsd/public/js/nodes-ember.js | 12 ++++-------- + pcsd/public/js/pcsd.js | 17 +++++++---------- + 2 files changed, 11 insertions(+), 18 deletions(-) + +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index cb62806..2b43559 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -210,20 +210,16 @@ Pcs = Ember.Application.createWithMixins({ + Ember.run.scheduleOnce('afterRender', Pcs, function () { + if (self.get('cur_fence')) { + if (fence_change) { +- if (first_run) { +- update_instance_attributes(self.get('cur_fence').get('id')); +- } +- tree_view_onclick(self.get('cur_fence').get('id'), true); ++ tree_view_onclick(self.get('cur_fence').get('id'), first_run); + } else { + tree_view_select(self.get('cur_fence').get('id')); + } + } + if (self.get('cur_resource')) { + if (resource_change) { +- if (first_run) { +- update_instance_attributes(self.get('cur_resource').get('id')); +- } +- tree_view_onclick(self.get('cur_resource').get('id'), true); ++ tree_view_onclick( ++ self.get('cur_resource').get('id'), first_run ++ ); + } else { + tree_view_select(self.get('cur_resource').get('id')); + } +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index e763482..1ec0f1c 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -2108,29 +2108,26 @@ function update_instance_attributes(resource_id) { + }, res_obj.get("stonith")); + } + +-function tree_view_onclick(resource_id, auto) { +- auto = typeof auto !== 'undefined' ? auto : false; ++function tree_view_onclick(resource_id, first_run) { ++ first_run = typeof first_run !== 'undefined' ? first_run : false; + var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id); + if (!resource_obj) { + console.log("Resource " + resource_id + "not found."); + return; + } + if (resource_obj.get('stonith')) { +- Pcs.resourcesContainer.set('cur_fence', resource_obj); +- if (!auto) { ++ if (!first_run) { + window.location.hash = "/fencedevices/" + resource_id; +- update_instance_attributes(resource_id); + } ++ Pcs.resourcesContainer.set('cur_fence', resource_obj); + } else { +- Pcs.resourcesContainer.set('cur_resource', resource_obj); +- +- if (!auto) { ++ if (!first_run) { + window.location.hash = "/resources/" + resource_id; +- update_instance_attributes(resource_id); + } ++ Pcs.resourcesContainer.set('cur_resource', resource_obj); + auto_show_hide_constraints(); + } +- ++ update_instance_attributes(resource_id); + tree_view_select(resource_id); + } + +-- +1.8.3.1 + diff --git a/SOURCES/bz1231858-02-web-UI-don-t-change-current-resource-in-URL-if-not-i.patch b/SOURCES/bz1231858-02-web-UI-don-t-change-current-resource-in-URL-if-not-i.patch new file mode 100644 index 0000000..7f7c9a6 --- /dev/null +++ b/SOURCES/bz1231858-02-web-UI-don-t-change-current-resource-in-URL-if-not-i.patch @@ -0,0 +1,76 @@ +From 590157ae3e595560632ddc25c725b67c42a3f2ab Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Wed, 27 Jul 2016 09:56:55 +0200 +Subject: [PATCH] web UI: don't change current resource in URL if not in + resources tab + +--- + pcsd/public/js/nodes-ember.js | 6 ++---- + pcsd/public/js/pcsd.js | 11 +++++------ + 2 files changed, 7 insertions(+), 10 deletions(-) + +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index 2b43559..efc0192 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -210,16 +210,14 @@ Pcs = Ember.Application.createWithMixins({ + Ember.run.scheduleOnce('afterRender', Pcs, function () { + if (self.get('cur_fence')) { + if (fence_change) { +- tree_view_onclick(self.get('cur_fence').get('id'), first_run); ++ tree_view_onclick(self.get('cur_fence').get('id')); + } else { + tree_view_select(self.get('cur_fence').get('id')); + } + } + if (self.get('cur_resource')) { + if (resource_change) { +- tree_view_onclick( +- self.get('cur_resource').get('id'), first_run +- ); ++ tree_view_onclick(self.get('cur_resource').get('id')); + } else { + tree_view_select(self.get('cur_resource').get('id')); + } +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index c8ed340..a646bed 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -1134,8 +1134,8 @@ function hover_out(o) { + } + + function reload_current_resource() { +- tree_view_onclick(curResource(), true); +- tree_view_onclick(curStonith(), true); ++ tree_view_onclick(curResource()); ++ tree_view_onclick(curStonith()); + } + + function load_row(node_row, ac, cur_elem, containing_elem, also_set, initial_load){ +@@ -2112,20 +2112,19 @@ function update_instance_attributes(resource_id) { + }, res_obj.get("stonith")); + } + +-function tree_view_onclick(resource_id, first_run) { +- first_run = typeof first_run !== 'undefined' ? first_run : false; ++function tree_view_onclick(resource_id) { + var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id); + if (!resource_obj) { + console.log("Resource " + resource_id + "not found."); + return; + } + if (resource_obj.get('stonith')) { +- if (!first_run) { ++ if (window.location.hash.startsWith("#/fencedevices")) { + window.location.hash = "/fencedevices/" + resource_id; + } + Pcs.resourcesContainer.set('cur_fence', resource_obj); + } else { +- if (!first_run) { ++ if (window.location.hash.startsWith("#/resources")) { + window.location.hash = "/resources/" + resource_id; + } + Pcs.resourcesContainer.set('cur_resource', resource_obj); +-- +1.8.3.1 + diff --git a/SOURCES/bz1231858-03-resourcefence-agent-options-form-needs-an-overhau.patch b/SOURCES/bz1231858-03-resourcefence-agent-options-form-needs-an-overhau.patch new file mode 100644 index 0000000..99c01e2 --- /dev/null +++ b/SOURCES/bz1231858-03-resourcefence-agent-options-form-needs-an-overhau.patch @@ -0,0 +1,401 @@ +From 0d440890ade31a2050ac861270a39be5c91d4bbb Mon Sep 17 00:00:00 2001 +From: Ivan Devat +Date: Wed, 14 Sep 2016 15:29:06 +0200 +Subject: [PATCH] squash bz1231858 resource/fence agent options form + +6007fba70212 web UI: treat resource as managed by default + +f1b60c3a2bac WebUI: fix node standby for pcs 0.9.138 and older + +73adbedf268e webUI: allow change groups, clone and unclone of resource on clusters running older pcsd + +1302b4e62e19 webUI: fix group list when managing cluster running older pcsd + +f639c0dded12 webUI: don't show group selector in case cluster doesn't support it + +584092ce7d04 webUI: consolidate backward compatibility code +--- + pcsd/cluster_entity.rb | 2 +- + pcsd/pcs.rb | 20 ++++- + pcsd/pcsd.rb | 169 +++++++++++++++++++++++++++++++++++++----- + pcsd/public/js/nodes-ember.js | 11 ++- + pcsd/remote.rb | 6 +- + pcsd/views/main.erb | 20 ++--- + 6 files changed, 194 insertions(+), 34 deletions(-) + +diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb +index 4ffcd4b..b8f363a 100644 +--- a/pcsd/cluster_entity.rb ++++ b/pcsd/cluster_entity.rb +@@ -120,7 +120,7 @@ module ClusterEntity + status = ClusterEntity::CRMResourceStatus.new + status.id = primitive.id + status.resource_agent = primitive.agentname +- status.managed = false ++ status.managed = true + status.failed = resource[:failed] + status.role = nil + status.active = resource[:active] +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index 137bb3d..e05f3ef 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -1864,7 +1864,7 @@ end + def status_v1_to_v2(status) + new_status = status.select { |k,_| + [:cluster_name, :username, :is_cman_with_udpu_transport, +- :need_ring1_address, :cluster_settings, :constraints, :groups, ++ :need_ring1_address, :cluster_settings, :constraints, + :corosync_online, :corosync_offline, :pacemaker_online, :pacemaker_standby, + :pacemaker_offline, :acls, :fence_levels + ].include?(k) +@@ -1885,6 +1885,8 @@ def status_v1_to_v2(status) + ].include?(k) + } + ++ new_status[:groups] = get_group_list_from_tree_of_resources(resources) ++ + new_status[:node].update( + { + :id => status[:node_id], +@@ -1901,6 +1903,22 @@ def status_v1_to_v2(status) + return new_status + end + ++def get_group_list_from_tree_of_resources(tree) ++ group_list = [] ++ tree.each { |resource| ++ if resource.instance_of?(ClusterEntity::Group) ++ group_list << resource.id ++ end ++ if ( ++ resource.kind_of?(ClusterEntity::MultiInstance) and ++ resource.member.instance_of?(ClusterEntity::Group) ++ ) ++ group_list << resource.member.id ++ end ++ } ++ return group_list ++end ++ + def allowed_for_local_cluster(auth_user, action) + pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text()) + return pcs_config.permissions_local.allows?( +diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb +index 287cf03..dcfd5a0 100644 +--- a/pcsd/pcsd.rb ++++ b/pcsd/pcsd.rb +@@ -908,7 +908,7 @@ already been added to pcsd. You may not add two clusters with the same name int + 'type' => 'boolean', + 'shortdesc' => 'Should deleted actions be cancelled', + 'longdesc' => 'Should deleted actions be cancelled', +- 'readable_name' => 'top Orphan Actions', ++ 'readable_name' => 'Stop Orphan Actions', + 'advanced' => false + }, + 'start-failure-is-fatal' => { +@@ -1215,33 +1215,168 @@ already been added to pcsd. You may not add two clusters with the same name int + return [200, "Node added successfully."] + end + ++ def pcs_0_9_142_resource_change_group(auth_user, params) ++ parameters = { ++ :resource_id => params[:resource_id], ++ :resource_group => '', ++ :_orig_resource_group => '', ++ } ++ parameters[:resource_group] = params[:group_id] if params[:group_id] ++ if params[:old_group_id] ++ parameters[:_orig_resource_group] = params[:old_group_id] ++ end ++ return send_cluster_request_with_token( ++ auth_user, params[:cluster], 'update_resource', true, parameters ++ ) ++ end ++ ++ def pcs_0_9_142_resource_clone(auth_user, params) ++ parameters = { ++ :resource_id => params[:resource_id], ++ :resource_clone => true, ++ :_orig_resource_clone => 'false', ++ } ++ return send_cluster_request_with_token( ++ auth_user, params[:cluster], 'update_resource', true, parameters ++ ) ++ end ++ ++ def pcs_0_9_142_resource_unclone(auth_user, params) ++ parameters = { ++ :resource_id => params[:resource_id], ++ :resource_clone => nil, ++ :_orig_resource_clone => 'true', ++ } ++ return send_cluster_request_with_token( ++ auth_user, params[:cluster], 'update_resource', true, parameters ++ ) ++ end ++ ++ def pcs_0_9_142_resource_master(auth_user, params) ++ parameters = { ++ :resource_id => params[:resource_id], ++ :resource_ms => true, ++ :_orig_resource_ms => 'false', ++ } ++ return send_cluster_request_with_token( ++ auth_user, params[:cluster], 'update_resource', true, parameters ++ ) ++ end ++ ++ # There is a bug in pcs-0.9.138 and older in processing the standby and ++ # unstandby request. JS of that pcsd always sent nodename in "node" ++ # parameter, which caused pcsd daemon to run the standby command locally with ++ # param["node"] as node name. This worked fine if the local cluster was ++ # managed from JS, as pacemaker simply put the requested node into standby. ++ # However it didn't work for managing non-local clusters, as the command was ++ # run on the local cluster everytime. Pcsd daemon would send the request to a ++ # remote cluster if the param["name"] variable was set, and that never ++ # happened. That however wouldn't work either, as then the required parameter ++ # "node" wasn't sent in the request causing an exception on the receiving ++ # node. This is fixed in commit 053f63ca109d9ef9e7f0416e90aab8e140480f5b ++ # ++ # In order to be able to put nodes running pcs-0.9.138 into standby, the ++ # nodename must be sent in "node" param, and the "name" must not be sent. ++ def pcs_0_9_138_node_standby(auth_user, params) ++ translated_params = { ++ 'node' => params[:name], ++ } ++ return send_cluster_request_with_token( ++ auth_user, params[:cluster], 'node_standby', true, translated_params ++ ) ++ end ++ ++ def pcs_0_9_138_node_unstandby(auth_user, params) ++ translated_params = { ++ 'node' => params[:name], ++ } ++ return send_cluster_request_with_token( ++ auth_user, params[:cluster], 'node_unstandby', true, translated_params ++ ) ++ end ++ + post '/managec/:cluster/?*' do + auth_user = PCSAuth.sessionToAuthUser(session) + raw_data = request.env["rack.input"].read + if params[:cluster] + request = "/" + params[:splat].join("/") +- code, out = send_cluster_request_with_token( +- auth_user, params[:cluster], request, true, params, true, raw_data +- ) + + # backward compatibility layer BEGIN +- # This code correctly remove constraints on pcs/pcsd version 0.9.137 and older +- redirection = { +- "/remove_constraint_remote" => "/resource_cmd/rm_constraint", +- "/remove_constraint_rule_remote" => "/resource_cmd/rm_constraint_rule" ++ translate_for_version = { ++ '/node_standby' => [ ++ [[0, 9, 138], method(:pcs_0_9_138_node_standby)], ++ ], ++ '/node_unstandby' => [ ++ [[0, 9, 138], method(:pcs_0_9_138_node_unstandby)], ++ ], + } +- if code == 404 and redirection.key?(request) ++ if translate_for_version.key?(request) ++ target_pcsd_version = [0, 0, 0] ++ version_code, version_out = send_cluster_request_with_token( ++ auth_user, params[:cluster], 'get_sw_versions' ++ ) ++ if version_code == 200 ++ begin ++ versions = JSON.parse(version_out) ++ target_pcsd_version = versions['pcs'] if versions['pcs'] ++ rescue JSON::ParserError ++ end ++ end ++ translate_function = nil ++ translate_for_version[request].each { |pair| ++ if (target_pcsd_version <=> pair[0]) != 1 # target <= pair ++ translate_function = pair[1] ++ break ++ end ++ } ++ end ++ # backward compatibility layer END ++ ++ if translate_function ++ code, out = translate_function.call(auth_user, params) ++ else + code, out = send_cluster_request_with_token( +- auth_user, +- params[:cluster], +- redirection[request], +- true, +- params, +- false, +- raw_data ++ auth_user, params[:cluster], request, true, params, true, raw_data + ) + end +- # bcl END ++ ++ # backward compatibility layer BEGIN ++ if code == 404 ++ case request ++ # supported since pcs-0.9.143 (tree view of resources) ++ when '/resource_change_group' ++ code, out = pcs_0_9_142_resource_change_group(auth_user, params) ++ # supported since pcs-0.9.143 (tree view of resources) ++ when '/resource_clone' ++ code, out = pcs_0_9_142_resource_clone(auth_user, params) ++ # supported since pcs-0.9.143 (tree view of resources) ++ when '/resource_unclone' ++ code, out = pcs_0_9_142_resource_unclone(auth_user, params) ++ # supported since pcs-0.9.143 (tree view of resources) ++ when '/resource_master' ++ code, out = pcs_0_9_142_resource_master(auth_user, params) ++ else ++ redirection = { ++ # constraints removal for pcs-0.9.137 and older ++ "/remove_constraint_remote" => "/resource_cmd/rm_constraint", ++ # constraints removal for pcs-0.9.137 and older ++ "/remove_constraint_rule_remote" => "/resource_cmd/rm_constraint_rule" ++ } ++ if redirection.key?(request) ++ code, out = send_cluster_request_with_token( ++ auth_user, ++ params[:cluster], ++ redirection[request], ++ true, ++ params, ++ false, ++ raw_data ++ ) ++ end ++ end ++ end ++ # backward compatibility layer END ++ + return code, out + end + end +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index 19caf14..6ef49e2 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -922,6 +922,15 @@ Pcs.ResourceObj = Ember.Object.extend({ + return ""; + } + }.property("status_val"), ++ show_group_selector: function() { ++ var parent = this.get("parent"); ++ return !( ++ parent && ++ parent.is_group && ++ parent.get("parent") && ++ Pcs.resourcesContainer.get("is_version_1") ++ ); ++ }.property(), + + location_constraints: [], + ordering_constraints: [], +@@ -1012,7 +1021,7 @@ Pcs.PrimitiveObj = Pcs.ResourceObj.extend({ + is_unmanaged: function() { + var instance_status_list = this.get("instance_status"); + if (!instance_status_list) { +- return false; ++ return true; + } + var is_managed = true; + $.each(instance_status_list, function(_, instance_status) { +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index 7dc7951..97e63f1 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -334,9 +334,8 @@ end + def node_standby(params, request, auth_user) + if params[:name] + code, response = send_request_with_token( +- auth_user, params[:name], 'node_standby', true, {"node"=>params[:name]} ++ auth_user, params[:name], 'node_standby', true + ) +- # data={"node"=>params[:name]} for backward compatibility with older versions of pcs/pcsd + else + if not allowed_for_local_cluster(auth_user, Permissions::WRITE) + return 403, 'Permission denied' +@@ -350,9 +349,8 @@ end + def node_unstandby(params, request, auth_user) + if params[:name] + code, response = send_request_with_token( +- auth_user, params[:name], 'node_unstandby', true, {"node"=>params[:name]} ++ auth_user, params[:name], 'node_unstandby', true + ) +- # data={"node"=>params[:name]} for backward compatibility with older versions of pcs/pcsd + else + if not allowed_for_local_cluster(auth_user, Permissions::WRITE) + return 403, 'Permission denied' +diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb +index 8de1c60..a138f68 100644 +--- a/pcsd/views/main.erb ++++ b/pcsd/views/main.erb +@@ -246,7 +246,6 @@ + Current Location: + {{resource.nodes_running_on_string}} + +- {{#unless old_pcsd}} + {{#unless resource.parent}} + + Clone: +@@ -268,6 +267,7 @@ + + {{else}} + {{#if resource.parent.is_group}} ++ {{#if resource.show_group_selector}} + + Group: + +@@ -275,11 +275,10 @@ + + + {{/if}} +- {{/unless}} ++ {{/if}} + {{/unless}} + {{/if}} + {{/unless}} +- {{#unless old_pcsd}} + {{#if resource.is_group}} + {{#unless resource.parent}} + +@@ -294,12 +293,14 @@ + + + +- +- Group: +- +- +- +- ++ {{#unless old_pcsd}} ++ ++ Group: ++ ++ ++ ++ ++ {{/unless}} + {{/unless}} + {{/if}} + {{#if resource.is_multi_instance}} +@@ -310,7 +311,6 @@ + + + {{/if}} +- {{/unless}} + + {{#unless resource.stonith}} + {{location_constraints-table constraints=resource.location_constraints}} +-- +1.8.3.1 + diff --git a/SOURCES/bz1235022-01-add-nagios-support-to-pcs-resource-list-and-web-UI.patch b/SOURCES/bz1235022-01-add-nagios-support-to-pcs-resource-list-and-web-UI.patch deleted file mode 100644 index 1850941..0000000 --- a/SOURCES/bz1235022-01-add-nagios-support-to-pcs-resource-list-and-web-UI.patch +++ /dev/null @@ -1,189 +0,0 @@ -From 082be752ee38c8d1314c2130a029e60648f7896b Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Tue, 11 Aug 2015 16:34:02 +0200 -Subject: [PATCH] add nagios support to 'pcs resource list' and web UI - ---- - pcs/resource.py | 58 ++++++++++++++++++++++++++++++++++++++++++-------------- - pcsd/remote.rb | 4 ++++ - pcsd/resource.rb | 23 ++++++++++++++++++---- - pcsd/settings.rb | 1 + - 4 files changed, 68 insertions(+), 18 deletions(-) - -diff --git a/pcs/resource.py b/pcs/resource.py -index f7d8821..8e05aeb 100644 ---- a/pcs/resource.py -+++ b/pcs/resource.py -@@ -198,13 +198,28 @@ def parse_resource_options(argv, with_clone=False): - # List available resources - # TODO make location more easily configurable - def resource_list_available(argv): -+ def get_name_and_desc(full_res_name, metadata): -+ sd = "" -+ try: -+ dom = parseString(metadata) -+ shortdesc = dom.documentElement.getElementsByTagName("shortdesc") -+ if len(shortdesc) > 0: -+ sd = " - " + format_desc( -+ len(full_res_name + " - "), -+ shortdesc[0].firstChild.nodeValue.strip().replace("\n", " ") -+ ) -+ except xml.parsers.expat.ExpatError: -+ sd = "" -+ finally: -+ return full_res_name + sd + "\n" -+ - ret = "" - if len(argv) != 0: - filter_string = argv[0] - else: - filter_string = "" - --# ocf agents -+ # ocf agents - os.environ['OCF_ROOT'] = "/usr/lib/ocf/" - providers = sorted(os.listdir("/usr/lib/ocf/resource.d")) - for provider in providers: -@@ -223,32 +238,47 @@ def resource_list_available(argv): - metadata = utils.get_metadata("/usr/lib/ocf/resource.d/" + provider + "/" + resource) - if metadata == False: - continue -- sd = "" -- try: -- dom = parseString(metadata) -- shortdesc = dom.documentElement.getElementsByTagName("shortdesc") -- if len(shortdesc) > 0: -- sd = " - " + format_desc(full_res_name.__len__() + 3, shortdesc[0].firstChild.nodeValue.strip().replace("\n", " ")) -- except xml.parsers.expat.ExpatError: -- sd = "" -- finally: -- ret += full_res_name + sd + "\n" --# lsb agents -+ ret += get_name_and_desc( -+ "ocf:" + provider + ":" + resource, -+ metadata -+ ) -+ -+ # lsb agents - lsb_dir = "/etc/init.d/" - agents = sorted(os.listdir(lsb_dir)) - for agent in agents: - if os.access(lsb_dir + agent, os.X_OK): - ret += "lsb:" + agent + "\n" --# systemd agents -+ -+ # systemd agents - if utils.is_systemctl(): - agents, retval = utils.run(["systemctl", "list-unit-files", "--full"]) - agents = agents.split("\n") -- - for agent in agents: - match = re.search(r'^([\S]*)\.service',agent) - if match: - ret += "systemd:" + match.group(1) + "\n" - -+ # nagios metadata -+ nagios_metadata_path = "/usr/share/pacemaker/nagios/plugins-metadata" -+ for metadata_file in sorted(os.listdir(nagios_metadata_path)): -+ if metadata_file.startswith("."): -+ continue -+ full_res_name = "nagios:" + metadata_file -+ if full_res_name.lower().endswith(".xml"): -+ full_res_name = full_res_name[:-len(".xml")] -+ if "--nodesc" in utils.pcs_options: -+ ret += full_res_name + "\n" -+ continue -+ try: -+ ret += get_name_and_desc( -+ full_res_name, -+ open(os.path.join(nagios_metadata_path, metadata_file), "r").read() -+ ) -+ except EnvironmentError as e: -+ pass -+ -+ # output - if not ret: - utils.err( - "No resource agents available. " -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index 5b7c753..cb5b176 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -1373,6 +1373,8 @@ def resource_form(params, request, session) - @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(HEARTBEAT_AGENTS_DIR + @cur_resource.type) - elsif @cur_resource.provider == 'pacemaker' - @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(PACEMAKER_AGENTS_DIR + @cur_resource.type) -+ elsif @cur_resource._class == 'nagios' -+ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(NAGIOS_METADATA_DIR + @cur_resource.type + '.xml') - end - @existing_resource = true - if @resource -@@ -1546,6 +1548,8 @@ def resource_metadata(params, request, session) - @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(HEARTBEAT_AGENTS_DIR + resource_name) - elsif class_provider == "ocf:pacemaker" - @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(PACEMAKER_AGENTS_DIR + resource_name) -+ elsif class_provider == 'nagios' -+ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(NAGIOS_METADATA_DIR + resource_name + '.xml') - end - @new_resource = params[:new] - @resources, @groups = getResourcesGroups(session) -diff --git a/pcsd/resource.rb b/pcsd/resource.rb -index f375bae..c6b513b 100644 ---- a/pcsd/resource.rb -+++ b/pcsd/resource.rb -@@ -303,13 +303,28 @@ def getColocationConstraints(session, resource_id) - end - - def getResourceMetadata(resourcepath) -- ENV['OCF_ROOT'] = OCF_ROOT -- metadata = `#{resourcepath} meta-data` -- doc = REXML::Document.new(metadata) - options_required = {} - options_optional = {} - long_desc = "" - short_desc = "" -+ -+ if resourcepath.end_with?('.xml') -+ begin -+ metadata = IO.read(resourcepath) -+ rescue -+ metadata = "" -+ end -+ else -+ ENV['OCF_ROOT'] = OCF_ROOT -+ metadata = `#{resourcepath} meta-data` -+ end -+ -+ begin -+ doc = REXML::Document.new(metadata) -+ rescue REXML::ParseException -+ return [options_required, options_optional, [short_desc, long_desc]] -+ end -+ - doc.elements.each('resource-agent/longdesc') {|ld| - long_desc = ld.text ? ld.text.strip : ld.text - } -@@ -345,7 +360,7 @@ def getResourceMetadata(resourcepath) - options_optional[param.attributes["name"]] = temp_array - end - } -- [options_required, options_optional, [short_desc,long_desc]] -+ [options_required, options_optional, [short_desc, long_desc]] - end - - def getResourceAgents(session, resource_agent=nil) -diff --git a/pcsd/settings.rb b/pcsd/settings.rb -index 0cd3109..4cea800 100644 ---- a/pcsd/settings.rb -+++ b/pcsd/settings.rb -@@ -8,6 +8,7 @@ COOKIE_FILE = PCSD_VAR_LOCATION + 'pcsd.cookiesecret' - OCF_ROOT = "/usr/lib/ocf" - HEARTBEAT_AGENTS_DIR = "/usr/lib/ocf/resource.d/heartbeat/" - PACEMAKER_AGENTS_DIR = "/usr/lib/ocf/resource.d/pacemaker/" -+NAGIOS_METADATA_DIR = '/usr/share/pacemaker/nagios/plugins-metadata/' - PENGINE = "/usr/libexec/pacemaker/pengine" - CRM_MON = "/usr/sbin/crm_mon" - CRM_NODE = "/usr/sbin/crm_node" --- -1.9.1 - diff --git a/SOURCES/bz1235022-02-fix-crash-when-missing-nagios-metadata.patch b/SOURCES/bz1235022-02-fix-crash-when-missing-nagios-metadata.patch deleted file mode 100644 index 9fc4091..0000000 --- a/SOURCES/bz1235022-02-fix-crash-when-missing-nagios-metadata.patch +++ /dev/null @@ -1,59 +0,0 @@ -From 2c269bd74344dab5b55f398c90ab0077b3d31e21 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Fri, 4 Sep 2015 12:59:41 +0200 -Subject: [PATCH] fix crash when missing nagios-metadata - ---- - pcs/resource.py | 36 ++++++++++++++++++++---------------- - 1 file changed, 20 insertions(+), 16 deletions(-) - -diff --git a/pcs/resource.py b/pcs/resource.py -index 8e05aeb..2dcddc3 100644 ---- a/pcs/resource.py -+++ b/pcs/resource.py -@@ -261,22 +261,26 @@ def resource_list_available(argv): - - # nagios metadata - nagios_metadata_path = "/usr/share/pacemaker/nagios/plugins-metadata" -- for metadata_file in sorted(os.listdir(nagios_metadata_path)): -- if metadata_file.startswith("."): -- continue -- full_res_name = "nagios:" + metadata_file -- if full_res_name.lower().endswith(".xml"): -- full_res_name = full_res_name[:-len(".xml")] -- if "--nodesc" in utils.pcs_options: -- ret += full_res_name + "\n" -- continue -- try: -- ret += get_name_and_desc( -- full_res_name, -- open(os.path.join(nagios_metadata_path, metadata_file), "r").read() -- ) -- except EnvironmentError as e: -- pass -+ if os.path.isdir(nagios_metadata_path): -+ for metadata_file in sorted(os.listdir(nagios_metadata_path)): -+ if metadata_file.startswith("."): -+ continue -+ full_res_name = "nagios:" + metadata_file -+ if full_res_name.lower().endswith(".xml"): -+ full_res_name = full_res_name[:-len(".xml")] -+ if "--nodesc" in utils.pcs_options: -+ ret += full_res_name + "\n" -+ continue -+ try: -+ ret += get_name_and_desc( -+ full_res_name, -+ open( -+ os.path.join(nagios_metadata_path, metadata_file), -+ "r" -+ ).read() -+ ) -+ except EnvironmentError as e: -+ pass - - # output - if not ret: --- -1.9.1 - diff --git a/SOURCES/bz1245264-01-Added-more-detailed-warnings-for-pcs-stonith-confirm.patch b/SOURCES/bz1245264-01-Added-more-detailed-warnings-for-pcs-stonith-confirm.patch deleted file mode 100644 index 509061d..0000000 --- a/SOURCES/bz1245264-01-Added-more-detailed-warnings-for-pcs-stonith-confirm.patch +++ /dev/null @@ -1,44 +0,0 @@ -From fc89908d91a2438f59dd08cf79aedfb85512091b Mon Sep 17 00:00:00 2001 -From: Chris Feist -Date: Fri, 18 Sep 2015 16:29:58 -0500 -Subject: [PATCH] Added more detailed warnings for 'pcs stonith confirm' - ---- - pcs/pcs.8 | 4 +++- - pcs/usage.py | 5 ++++- - 2 files changed, 7 insertions(+), 2 deletions(-) - -diff --git a/pcs/pcs.8 b/pcs/pcs.8 -index 70f0f6c..e89c813 100644 ---- a/pcs/pcs.8 -+++ b/pcs/pcs.8 -@@ -333,7 +333,9 @@ fence [\fB\-\-off\fR] - Fence the node specified (if \fB\-\-off\fR is specified, use the 'off' API call to stonith which will turn the node off instead of rebooting it) - .TP - confirm --Confirm that the host specified is currently down. WARNING: if this node is not actually down data corruption/cluster failure can occur. -+Confirm that the host specified is currently down. This command should \fBONLY\fR be used when the node specified has already been confirmed to be down. -+ -+.B WARNING: if this node is not actually down data corruption/cluster failure can occur. - .SS "acl" - .TP - [show] -diff --git a/pcs/usage.py b/pcs/usage.py -index c430965..63baa76 100644 ---- a/pcs/usage.py -+++ b/pcs/usage.py -@@ -833,7 +833,10 @@ Commands: - call to stonith which will turn the node off instead of rebooting it) - - confirm -- Confirm that the host specified is currently down. -+ Confirm that the host specified is currently down. This command -+ should ONLY be used when the node specified has already been -+ confirmed to be down. -+ - WARNING: if this node is not actually down data corruption/cluster - failure can occur. - --- -1.9.1 - diff --git a/SOURCES/bz1247088-01-fix-error-message-in-node-maintenanceunmaintenance-commands.patch b/SOURCES/bz1247088-01-fix-error-message-in-node-maintenanceunmaintenance-commands.patch new file mode 100644 index 0000000..4eedb64 --- /dev/null +++ b/SOURCES/bz1247088-01-fix-error-message-in-node-maintenanceunmaintenance-commands.patch @@ -0,0 +1,102 @@ +From d1a31c8b887fc668eff8ef582124a84524a5b760 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Mon, 22 Aug 2016 15:52:08 +0200 +Subject: [PATCH] fix error message in node maintenance/unmaintenance commands + +--- + pcs/node.py | 23 ++++++++++++++--------- + pcs/test/test_node.py | 10 ++++++++-- + 2 files changed, 22 insertions(+), 11 deletions(-) + +diff --git a/pcs/node.py b/pcs/node.py +index be2fb13..ed77d5d 100644 +--- a/pcs/node.py ++++ b/pcs/node.py +@@ -77,8 +77,8 @@ def node_maintenance(argv, on=True): + for node in argv: + if node not in cluster_nodes: + utils.err( +- "Node '%s' does not appear to exist in configuration" % +- argv[0], ++ "Node '{0}' does not appear to exist in " ++ "configuration".format(node), + False + ) + failed_count += 1 +@@ -87,25 +87,30 @@ def node_maintenance(argv, on=True): + else: + nodes.append("") + ++ if failed_count > 0: ++ sys.exit(1) ++ + for node in nodes: +- node = ["-N", node] if node else [] ++ node_attr = ["-N", node] if node else [] + output, retval = utils.run( + ["crm_attribute", "-t", "nodes", "-n", "maintenance"] + action + +- node ++ node_attr + ) + if retval != 0: +- node_name = ("node '%s'" % node) if argv else "current node" ++ node_name = ("node '{0}'".format(node)) if argv else "current node" + failed_count += 1 + if on: + utils.err( +- "Unable to put %s to maintenance mode.\n%s" % +- (node_name, output), ++ "Unable to put {0} to maintenance mode: {1}".format( ++ node_name, output ++ ), + False + ) + else: + utils.err( +- "Unable to remove %s from maintenance mode.\n%s" % +- (node_name, output), ++ "Unable to remove {0} from maintenance mode: {1}".format( ++ node_name, output ++ ), + False + ) + if failed_count > 0: +diff --git a/pcs/test/test_node.py b/pcs/test/test_node.py +index 6f03112..785c711 100644 +--- a/pcs/test/test_node.py ++++ b/pcs/test/test_node.py +@@ -88,11 +88,14 @@ Node Attributes: + """ + ac(expected_out, output) + +- output, returnVal = pcs(temp_cib, "node maintenance nonexistant-node") ++ output, returnVal = pcs( ++ temp_cib, "node maintenance nonexistant-node and-another" ++ ) + self.assertEqual(returnVal, 1) + self.assertEqual( + output, + "Error: Node 'nonexistant-node' does not appear to exist in configuration\n" ++ "Error: Node 'and-another' does not appear to exist in configuration\n" + ) + output, _ = pcs(temp_cib, "property") + expected_out = """\ +@@ -134,11 +137,14 @@ Cluster Properties: + """ + ac(expected_out, output) + +- output, returnVal = pcs(temp_cib, "node unmaintenance nonexistant-node") ++ output, returnVal = pcs( ++ temp_cib, "node unmaintenance nonexistant-node and-another" ++ ) + self.assertEqual(returnVal, 1) + self.assertEqual( + output, + "Error: Node 'nonexistant-node' does not appear to exist in configuration\n" ++ "Error: Node 'and-another' does not appear to exist in configuration\n" + ) + output, _ = pcs(temp_cib, "property") + expected_out = """\ +-- +1.8.3.1 + diff --git a/SOURCES/bz1253294-01-fixed-command-injection-vulnerability.patch b/SOURCES/bz1253294-01-fixed-command-injection-vulnerability.patch deleted file mode 100644 index 1b6aa4f..0000000 --- a/SOURCES/bz1253294-01-fixed-command-injection-vulnerability.patch +++ /dev/null @@ -1,259 +0,0 @@ -From b47f6196aaf405f17197d4bb312d94ec84042343 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Tue, 25 Aug 2015 16:46:46 +0200 -Subject: [PATCH] fixed command injection vulnerability - ---- - pcsd/fenceagent.rb | 53 ++++++++++++++++++++++++++++++++++------------------- - pcsd/pcsd.rb | 6 +++--- - pcsd/remote.rb | 18 +++++++++--------- - pcsd/resource.rb | 27 +++++++++++++++++++++++---- - 4 files changed, 69 insertions(+), 35 deletions(-) - -diff --git a/pcsd/fenceagent.rb b/pcsd/fenceagent.rb -index b7674fd..b52ad6f 100644 ---- a/pcsd/fenceagent.rb -+++ b/pcsd/fenceagent.rb -@@ -1,4 +1,4 @@ --def getFenceAgents(fence_agent = nil) -+def getFenceAgents(session, fence_agent = nil) - fence_agent_list = {} - agents = Dir.glob('/usr/sbin/fence_' + '*') - agents.each { |a| -@@ -7,7 +7,7 @@ def getFenceAgents(fence_agent = nil) - next if fa.name == "fence_ack_manual" - - if fence_agent and a.sub(/.*\//,"") == fence_agent.sub(/.*:/,"") -- required_options, optional_options, advanced_options, info = getFenceAgentMetadata(fa.name) -+ required_options, optional_options, advanced_options, info = getFenceAgentMetadata(session, fa.name) - fa.required_options = required_options - fa.optional_options = optional_options - fa.advanced_options = advanced_options -@@ -18,13 +18,42 @@ def getFenceAgents(fence_agent = nil) - fence_agent_list - end - --def getFenceAgentMetadata(fenceagentname) -+def getFenceAgentMetadata(session, fenceagentname) -+ options_required = {} -+ options_optional = {} -+ options_advanced = { -+ "priority" => "", -+ "pcmk_host_argument" => "", -+ "pcmk_host_map" => "", -+ "pcmk_host_list" => "", -+ "pcmk_host_check" => "" -+ } -+ for a in ["reboot", "list", "status", "monitor", "off"] -+ options_advanced["pcmk_" + a + "_action"] = "" -+ options_advanced["pcmk_" + a + "_timeout"] = "" -+ options_advanced["pcmk_" + a + "_retries"] = "" -+ end -+ - # There are bugs in stonith_admin & the new fence_agents interaction - # eventually we'll want to switch back to this, but for now we directly - # call the agent to get metadata - #metadata = `stonith_admin --metadata -a #{fenceagentname}` -- metadata = `/usr/sbin/#{fenceagentname} -o metadata` -- doc = REXML::Document.new(metadata) -+ if not fenceagentname.start_with?('fence_') or fenceagentname.include?('/') -+ $logger.error "Invalid fence agent '#{fenceagentname}'" -+ return [options_required, options_optional, options_advanced] -+ end -+ stdout, stderr, retval = run_cmd( -+ session, "/usr/sbin/#{fenceagentname}", '-o', 'metadata' -+ ) -+ metadata = stdout.join -+ begin -+ doc = REXML::Document.new(metadata) -+ rescue REXML::ParseException => e -+ $logger.error( -+ "Unable to parse metadata of fence agent '#{resourcepath}': #{e}" -+ ) -+ return [options_required, options_optional, options_advanced] -+ end - - short_desc = "" - long_desc = "" -@@ -40,20 +69,6 @@ def getFenceAgentMetadata(fenceagentname) - long_desc = ld.text ? ld.text.strip : ld.text - } - -- options_required = {} -- options_optional = {} -- options_advanced = { -- "priority" => "", -- "pcmk_host_argument" => "", -- "pcmk_host_map" => "", -- "pcmk_host_list" => "", -- "pcmk_host_check" => "" -- } -- for a in ["reboot", "list", "status", "monitor", "off"] -- options_advanced["pcmk_" + a + "_action"] = "" -- options_advanced["pcmk_" + a + "_timeout"] = "" -- options_advanced["pcmk_" + a + "_retries"] = "" -- end - doc.elements.each('resource-agent/parameters/parameter') { |param| - temp_array = [] - if param.elements["shortdesc"] -diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb -index e4b4c25..1f26fe5 100644 ---- a/pcsd/pcsd.rb -+++ b/pcsd/pcsd.rb -@@ -401,7 +401,7 @@ if not DISABLE_GUI - - if @resources.length == 0 - @cur_resource = nil -- @resource_agents = getFenceAgents() -+ @resource_agents = getFenceAgents(session) - else - @cur_resource = @resources[0] - if params[:fencedevice] -@@ -413,7 +413,7 @@ if not DISABLE_GUI - end - end - @cur_resource.options = getResourceOptions(session, @cur_resource.id) -- @resource_agents = getFenceAgents(@cur_resource.agentname) -+ @resource_agents = getFenceAgents(session, @cur_resource.agentname) - end - erb :fencedevices, :layout => :main - end -@@ -477,7 +477,7 @@ if not DISABLE_GUI - # } - # } - @resource_agents = getResourceAgents(session) -- @stonith_agents = getFenceAgents() -+ @stonith_agents = getFenceAgents(session) - # @nodes = @nodes.sort_by{|k,v|k} - erb :nodes, :layout => :main - end -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index cb5b176..4655756 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -1370,11 +1370,11 @@ def resource_form(params, request, session) - @cur_resource_ms = @cur_resource.get_master - @resource = ResourceAgent.new(@cur_resource.agentname) - if @cur_resource.provider == 'heartbeat' -- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(HEARTBEAT_AGENTS_DIR + @cur_resource.type) -+ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, HEARTBEAT_AGENTS_DIR + @cur_resource.type) - elsif @cur_resource.provider == 'pacemaker' -- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(PACEMAKER_AGENTS_DIR + @cur_resource.type) -+ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, PACEMAKER_AGENTS_DIR + @cur_resource.type) - elsif @cur_resource._class == 'nagios' -- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(NAGIOS_METADATA_DIR + @cur_resource.type + '.xml') -+ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, NAGIOS_METADATA_DIR + @cur_resource.type + '.xml') - end - @existing_resource = true - if @resource -@@ -1395,7 +1395,7 @@ def fence_device_form(params, request, session) - @cur_resource = get_resource_by_id(params[:resource], get_cib_dom(session)) - - if @cur_resource.instance_of?(ClusterEntity::Primitive) and @cur_resource.stonith -- @resource_agents = getFenceAgents(@cur_resource.agentname) -+ @resource_agents = getFenceAgents(session, @cur_resource.agentname) - @existing_resource = true - @fenceagent = @resource_agents[@cur_resource.type] - erb :fenceagentform -@@ -1531,7 +1531,7 @@ def get_avail_fence_agents(params, request, session) - if not allowed_for_local_cluster(session, Permissions::READ) - return 403, 'Permission denied' - end -- agents = getFenceAgents() -+ agents = getFenceAgents(session) - return JSON.generate(agents) - end - -@@ -1545,11 +1545,11 @@ def resource_metadata(params, request, session) - - @resource = ResourceAgent.new(params[:resourcename]) - if class_provider == "ocf:heartbeat" -- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(HEARTBEAT_AGENTS_DIR + resource_name) -+ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, HEARTBEAT_AGENTS_DIR + resource_name) - elsif class_provider == "ocf:pacemaker" -- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(PACEMAKER_AGENTS_DIR + resource_name) -+ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, PACEMAKER_AGENTS_DIR + resource_name) - elsif class_provider == 'nagios' -- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(NAGIOS_METADATA_DIR + resource_name + '.xml') -+ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, NAGIOS_METADATA_DIR + resource_name + '.xml') - end - @new_resource = params[:new] - @resources, @groups = getResourcesGroups(session) -@@ -1563,7 +1563,7 @@ def fence_device_metadata(params, request, session) - end - return 200 if not params[:resourcename] or params[:resourcename] == "" - @fenceagent = FenceAgent.new(params[:resourcename]) -- @fenceagent.required_options, @fenceagent.optional_options, @fenceagent.advanced_options, @fenceagent.info = getFenceAgentMetadata(params[:resourcename]) -+ @fenceagent.required_options, @fenceagent.optional_options, @fenceagent.advanced_options, @fenceagent.info = getFenceAgentMetadata(session, params[:resourcename]) - @new_fenceagent = params[:new] - - erb :fenceagentform -diff --git a/pcsd/resource.rb b/pcsd/resource.rb -index c6b513b..6f8f7fe 100644 ---- a/pcsd/resource.rb -+++ b/pcsd/resource.rb -@@ -1,4 +1,5 @@ - require 'pp' -+require 'pathname' - - def getResourcesGroups(session, get_fence_devices = false, get_all_options = false, - get_operations=false -@@ -302,12 +303,24 @@ def getColocationConstraints(session, resource_id) - return together,apart - end - --def getResourceMetadata(resourcepath) -+def getResourceMetadata(session, resourcepath) - options_required = {} - options_optional = {} - long_desc = "" - short_desc = "" - -+ resourcepath = Pathname.new(resourcepath).cleanpath.to_s -+ resource_dirs = [ -+ HEARTBEAT_AGENTS_DIR, PACEMAKER_AGENTS_DIR, NAGIOS_METADATA_DIR, -+ ] -+ if not resource_dirs.any? { |allowed| resourcepath.start_with?(allowed) } -+ $logger.error( -+ "Unable to get metadata of resource agent '#{resourcepath}': " + -+ 'path not allowed' -+ ) -+ return [options_required, options_optional, [short_desc, long_desc]] -+ end -+ - if resourcepath.end_with?('.xml') - begin - metadata = IO.read(resourcepath) -@@ -316,12 +329,16 @@ def getResourceMetadata(resourcepath) - end - else - ENV['OCF_ROOT'] = OCF_ROOT -- metadata = `#{resourcepath} meta-data` -+ stdout, stderr, retval = run_cmd(session, resourcepath, 'meta-data') -+ metadata = stdout.join - end - - begin - doc = REXML::Document.new(metadata) -- rescue REXML::ParseException -+ rescue REXML::ParseException => e -+ $logger.error( -+ "Unable to parse metadata of resource agent '#{resourcepath}': #{e}" -+ ) - return [options_required, options_optional, [short_desc, long_desc]] - end - -@@ -381,7 +398,9 @@ def getResourceAgents(session, resource_agent=nil) - if resource_agent and (a.start_with?("ocf:heartbeat:") or a.start_with?("ocf:pacemaker:")) - split_agent = ra.name.split(/:/) - path = OCF_ROOT + '/resource.d/' + split_agent[1] + "/" + split_agent[2] -- required_options, optional_options, resource_info = getResourceMetadata(path) -+ required_options, optional_options, resource_info = getResourceMetadata( -+ session, path -+ ) - ra.required_options = required_options - ra.optional_options = optional_options - ra.info = resource_info --- -1.9.1 - diff --git a/SOURCES/bz1253491-01-fix-pcs-pcsd-path-detection.patch b/SOURCES/bz1253491-01-fix-pcs-pcsd-path-detection.patch deleted file mode 100644 index 3483ad3..0000000 --- a/SOURCES/bz1253491-01-fix-pcs-pcsd-path-detection.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 7323d4fb2454d65bb26839fd6fb4809d19258d34 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Tue, 25 Aug 2015 14:51:19 +0200 -Subject: [PATCH] fix pcs/pcsd path detection - ---- - pcs/utils.py | 2 +- - pcsd/bootstrap.rb | 4 +++- - 2 files changed, 4 insertions(+), 2 deletions(-) - -diff --git a/pcs/utils.py b/pcs/utils.py -index cd33a27..761723b 100644 ---- a/pcs/utils.py -+++ b/pcs/utils.py -@@ -736,7 +736,7 @@ def run_pcsdcli(command, data=None): - env_var = dict() - if "--debug" in pcs_options: - env_var["PCSD_DEBUG"] = "true" -- pcs_dir = os.path.dirname(sys.argv[0]) -+ pcs_dir = os.path.realpath(os.path.dirname(sys.argv[0])) - if pcs_dir == "/usr/sbin": - pcsd_dir_path = settings.pcsd_exec_location - else: -diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb -index 07a7d27..64c3b98 100644 ---- a/pcsd/bootstrap.rb -+++ b/pcsd/bootstrap.rb -@@ -1,4 +1,5 @@ - require 'logger' -+require 'pathname' - - require 'settings.rb' - -@@ -32,7 +33,8 @@ def is_systemctl() - end - - def get_pcs_path(pcsd_path) -- if PCSD_EXEC_LOCATION == pcsd_path or PCSD_EXEC_LOCATION == (pcsd_path + '/') -+ real_path = Pathname.new(pcsd_path).realpath.to_s -+ if PCSD_EXEC_LOCATION == real_path or PCSD_EXEC_LOCATION == (real_path + '/') - return '/usr/sbin/pcs' - else - return '../pcs/pcs' --- -1.9.1 - diff --git a/SOURCES/bz1257369-01-always-print-output-of-crm_resource-cleanup.patch b/SOURCES/bz1257369-01-always-print-output-of-crm_resource-cleanup.patch deleted file mode 100644 index a3c5cec..0000000 --- a/SOURCES/bz1257369-01-always-print-output-of-crm_resource-cleanup.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 122c7b6b5d31fdc0cf997aeb01252fb4c8801da5 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Fri, 4 Sep 2015 17:12:27 +0200 -Subject: [PATCH] always print output of crm_resource --cleanup - ---- - pcs/resource.py | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/pcs/resource.py b/pcs/resource.py -index 2dcddc3..be1f1ba 100644 ---- a/pcs/resource.py -+++ b/pcs/resource.py -@@ -2559,14 +2559,14 @@ def resource_cleanup(res_id): - if retval != 0: - utils.err("Unable to cleanup resource: %s" % res_id + "\n" + output) - else: -- print "Resource: %s successfully cleaned up" % res_id -+ print output - - def resource_cleanup_all(): - (output, retval) = utils.run(["crm_resource", "-C"]) - if retval != 0: - utils.err("Unexpected error occured. 'crm_resource -C' err_code: %s\n%s" % (retval, output)) - else: -- print "All resources/stonith devices successfully cleaned up" -+ print output - - def resource_history(args): - dom = utils.get_cib_dom() --- -1.9.1 - diff --git a/SOURCES/bz1258619-01-fix-ruby-traceback-on-pcsd-startup.patch b/SOURCES/bz1258619-01-fix-ruby-traceback-on-pcsd-startup.patch deleted file mode 100644 index ae03878..0000000 --- a/SOURCES/bz1258619-01-fix-ruby-traceback-on-pcsd-startup.patch +++ /dev/null @@ -1,37 +0,0 @@ -commit 4d4ad9fc870998f4e70256ef62371f38da3a4855 -Author: Chris Feist -AuthorDate: Mon Aug 31 15:13:46 2015 -0500 -Commit: Chris Feist -CommitDate: Mon Aug 31 15:13:46 2015 -0500 - - Fix tracebacks during pcsd shutdowns - -diff --git a/pcsd/ssl.rb b/pcsd/ssl.rb -index e948aef..97d131e 100644 ---- a/pcsd/ssl.rb -+++ b/pcsd/ssl.rb -@@ -67,14 +67,20 @@ end - server = ::Rack::Handler::WEBrick - trap(:INT) do - puts "Shutting down (INT)" -- server.shutdown -- #exit -+ if server.instance_variable_get("@server") -+ server.shutdown -+ else -+ exit -+ end - end - - trap(:TERM) do - puts "Shutting down (TERM)" -- server.shutdown -- #exit -+ if server.instance_variable_get("@server") -+ server.shutdown -+ else -+ exit -+ end - end - - require 'pcsd' diff --git a/SOURCES/bz1264360-01-web-UI-add-support-for-unmanaged-resources.patch b/SOURCES/bz1264360-01-web-UI-add-support-for-unmanaged-resources.patch new file mode 100644 index 0000000..78764a3 --- /dev/null +++ b/SOURCES/bz1264360-01-web-UI-add-support-for-unmanaged-resources.patch @@ -0,0 +1,320 @@ +From cf1c95354a9db8b81712d7b98d0cc55e777e0516 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Thu, 4 Aug 2016 00:59:11 +0200 +Subject: [PATCH] web UI: add support for unmanaged resources + +--- + pcsd/cluster_entity.rb | 13 ++++++++-- + pcsd/pcs.rb | 1 + + pcsd/public/js/nodes-ember.js | 22 +++++++++++++---- + pcsd/public/js/pcsd.js | 52 ++++++++++++++++++++++++++++++++++++++++ + pcsd/remote.rb | 55 +++++++++++++++++++++++++++++++++++++++---- + pcsd/views/main.erb | 26 ++++++++++++++++++++ + 6 files changed, 158 insertions(+), 11 deletions(-) + +diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb +index fa56fe2..7216626 100644 +--- a/pcsd/cluster_entity.rb ++++ b/pcsd/cluster_entity.rb +@@ -332,7 +332,11 @@ module ClusterEntity + :unknown => { + :val => 6, + :str => 'unknown' +- } ++ }, ++ :unmanaged => { ++ :val => 7, ++ :str => 'unmanaged' ++ }, + } + + def initialize(status=:unknown) +@@ -532,8 +536,11 @@ module ClusterEntity + def get_status + running = 0 + failed = 0 ++ unmanaged = 0 + @crm_status.each do |s| +- if s.active ++ if !s.managed ++ unmanaged += 1 ++ elsif s.active + running += 1 + elsif s.failed + failed += 1 +@@ -542,6 +549,8 @@ module ClusterEntity + + if disabled? + status = ClusterEntity::ResourceStatus.new(:disabled) ++ elsif unmanaged >0 ++ status = ClusterEntity::ResourceStatus.new(:unmanaged) + elsif running > 0 + status = ClusterEntity::ResourceStatus.new(:running) + elsif failed > 0 or @error_list.length > 0 +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index 1eb9e9e..553a20c 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -1703,6 +1703,7 @@ def get_node_status(auth_user, cib_dom) + 'sbd', + 'ticket_constraints', + 'moving_resource_in_group', ++ 'unmanaged_resource', + ] + } + +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index 3d4fe79..c51a341 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -57,6 +57,9 @@ Pcs = Ember.Application.createWithMixins({ + this.get("available_features").indexOf("moving_resource_in_group") != -1 + ); + }.property("available_features"), ++ is_supported_unmanaged_resource: function() { ++ return (this.get("available_features").indexOf("unmanaged_resource") != -1); ++ }.property("available_features"), + is_sbd_running: false, + is_sbd_enabled: false, + is_sbd_enabled_or_running: function() { +@@ -869,9 +872,17 @@ Pcs.ResourceObj = Ember.Object.extend({ + return '' + this.get('status') + ''; + }.property("status_style", "disabled"), + status_class: function() { +- var show = ((Pcs.clusterController.get("show_all_resources"))? "" : "hidden "); +- return ((this.get("status_val") == get_status_value("ok") || this.status == "disabled") ? show + "default-hidden" : ""); +- }.property("status_val"), ++ if ( ++ this.get("status_val") == get_status_value("ok") || ++ ["disabled", "unmanaged"].indexOf(this.get("status")) != -1 ++ ) { ++ return ( ++ Pcs.clusterController.get("show_all_resources") ? "" : "hidden " ++ ) + "default-hidden"; ++ } else { ++ return ""; ++ } ++ }.property("status_val", "status"), + status_class_fence: function() { + var show = ((Pcs.clusterController.get("show_all_fence"))? "" : "hidden "); + return ((this.get("status_val") == get_status_value("ok")) ? show + "default-hidden" : ""); +@@ -1681,8 +1692,9 @@ Pcs.Cluster = Ember.Object.extend({ + var num = 0; + $.each(this.get(type), function(key, value) { + if (value.get("status_val") < get_status_value("ok") && +- value.status != "disabled" && value.status != "standby" && +- value.status != "maintenance" ++ [ ++ "unmanaged", "disabled", "standby", "maintenance" ++ ].indexOf(value.status) == -1 + ) { + num++; + } +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index 82187ef..56219d4 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -1333,6 +1333,9 @@ function remove_resource(ids, force) { + message += "\n\n" + xhr.responseText.replace( + "--force", "'Enforce removal'" + ); ++ alert(message); ++ $("#verify_remove_submit_btn").button("option", "disabled", false); ++ return; + } + } + alert(message); +@@ -1957,6 +1960,7 @@ function get_status_value(status) { + maintenance: 2, + "partially running": 2, + disabled: 3, ++ unmanaged: 3, + unknown: 4, + ok: 5, + running: 5, +@@ -2987,3 +2991,51 @@ function sbd_status_dialog() { + buttons: buttonsOpts + }); + } ++ ++function unmanage_resource(resource_id) { ++ if (!resource_id) { ++ return; ++ } ++ fade_in_out("#resource_unmanage_link"); ++ ajax_wrapper({ ++ type: 'POST', ++ url: get_cluster_remote_url() + "unmanage_resource", ++ data: { ++ resource_list_json: JSON.stringify([resource_id]), ++ }, ++ timeout: pcs_timeout, ++ complete: function() { ++ Pcs.update(); ++ }, ++ error: function (xhr, status, error) { ++ alert( ++ `Unable to unmanage '${resource_id}': ` + ++ ajax_simple_error(xhr, status, error) ++ ); ++ }, ++ }); ++} ++ ++function manage_resource(resource_id) { ++ if (!resource_id) { ++ return; ++ } ++ fade_in_out("#resource_manage_link"); ++ ajax_wrapper({ ++ type: 'POST', ++ url: get_cluster_remote_url() + "manage_resource", ++ data: { ++ resource_list_json: JSON.stringify([resource_id]), ++ }, ++ timeout: pcs_timeout, ++ complete: function() { ++ Pcs.update(); ++ }, ++ error: function (xhr, status, error) { ++ alert( ++ `Unable to manage '${resource_id}': ` + ++ ajax_simple_error(xhr, status, error) ++ ); ++ } ++ }); ++} +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index 4844adf..ebf425c 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -116,7 +116,9 @@ def remote(params, request, auth_user) + :set_resource_utilization => method(:set_resource_utilization), + :set_node_utilization => method(:set_node_utilization), + :get_resource_agent_metadata => method(:get_resource_agent_metadata), +- :get_fence_agent_metadata => method(:get_fence_agent_metadata) ++ :get_fence_agent_metadata => method(:get_fence_agent_metadata), ++ :manage_resource => method(:manage_resource), ++ :unmanage_resource => method(:unmanage_resource), + } + + command = params[:command].to_sym +@@ -1575,10 +1577,10 @@ def remove_resource(params, request, auth_user) + end + cmd = [PCS, '-f', tmp_file.path, 'resource', 'disable'] + resource_list.each { |resource| +- _, err, retval = run_cmd(user, *(cmd + [resource])) ++ out, err, retval = run_cmd(user, *(cmd + [resource])) + if retval != 0 + unless ( +- err.join('').index('unable to find a resource') != -1 and ++ (out + err).join('').include?(' does not exist.') and + no_error_if_not_exists + ) + errors += "Unable to stop resource '#{resource}': #{err.join('')}" +@@ -1613,7 +1615,10 @@ def remove_resource(params, request, auth_user) + end + out, err, retval = run_cmd(auth_user, *cmd) + if retval != 0 +- unless out.index(' does not exist.') != -1 and no_error_if_not_exists ++ unless ( ++ (out + err).join('').include?(' does not exist.') and ++ no_error_if_not_exists ++ ) + errors += err.join(' ').strip + "\n" + end + end +@@ -2630,3 +2635,45 @@ def qdevice_client_start(param, request, auth_user) + return [400, msg] + end + end ++ ++def manage_resource(param, request, auth_user) ++ unless allowed_for_local_cluster(auth_user, Permissions::WRITE) ++ return 403, 'Permission denied' ++ end ++ unless param[:resource_list_json] ++ return [400, "Required parameter 'resource_list_json' is missing."] ++ end ++ begin ++ resource_list = JSON.parse(param[:resource_list_json]) ++ _, err, retval = run_cmd( ++ auth_user, PCS, 'resource', 'manage', *resource_list ++ ) ++ if retval != 0 ++ return [400, err.join('')] ++ end ++ return [200, ''] ++ rescue JSON::ParserError ++ return [400, 'Invalid input data format'] ++ end ++end ++ ++def unmanage_resource(param, request, auth_user) ++ unless allowed_for_local_cluster(auth_user, Permissions::WRITE) ++ return 403, 'Permission denied' ++ end ++ unless param[:resource_list_json] ++ return [400, "Required parameter 'resource_list_json' is missing."] ++ end ++ begin ++ resource_list = JSON.parse(param[:resource_list_json]) ++ _, err, retval = run_cmd( ++ auth_user, PCS, 'resource', 'unmanage', *resource_list ++ ) ++ if retval != 0 ++ return [400, err.join('')] ++ end ++ return [200, ''] ++ rescue JSON::ParserError ++ return [400, 'Invalid input data format'] ++ end ++end +diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb +index 1b21f92..64fe560 100644 +--- a/pcsd/views/main.erb ++++ b/pcsd/views/main.erb +@@ -160,6 +160,7 @@ + +
+
++
+ {{#if resource.stonith}} +
+ +@@ -174,7 +175,32 @@ + +
+ ++
++
++ {{#if Pcs.is_supported_unmanaged_resource}} ++
++
++ ++
++
++
++ ++
++ {{/if}} + {{/if}} ++
+