diff --git a/.gitignore b/.gitignore index be8cd56..09fb27e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,2 @@ SOURCES/HAM-logo.png -SOURCES/clufter-0.3.0.tar.gz -SOURCES/pcs-withgems-0.9.137.tar.gz +SOURCES/pcs-withgems-0.9.143.tar.gz diff --git a/.pcs.metadata b/.pcs.metadata index 6e63147..552336a 100644 --- a/.pcs.metadata +++ b/.pcs.metadata @@ -1,3 +1,2 @@ 80dc7788a3468fb7dd362a4b8bedd9efb373de89 SOURCES/HAM-logo.png -5a4d023ca35d952e52c2a8bc11c96c7fefb57b6d SOURCES/clufter-0.3.0.tar.gz -08ab97a1378ba9dcd08c5b5fccce243fffba6bff SOURCES/pcs-withgems-0.9.137.tar.gz +f4cfd8dd9ffdc4ce13a9b6946008ded2e1676709 SOURCES/pcs-withgems-0.9.143.tar.gz diff --git a/SOURCES/bz1054491-Add-acl-enable-and-disable-commands-3.patch b/SOURCES/bz1054491-Add-acl-enable-and-disable-commands-3.patch deleted file mode 100644 index 89741a1..0000000 --- a/SOURCES/bz1054491-Add-acl-enable-and-disable-commands-3.patch +++ /dev/null @@ -1,115 +0,0 @@ -From 3732bb03e2f0b710e85b502c772ad7174d91db80 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Thu, 8 Jan 2015 16:00:47 +0100 -Subject: [PATCH] Add acl enable and disable commands - -* add acl enable and disable commands -* display whether acls are enabled in the 'pcs acl' output ---- - pcs/acl.py | 19 +++++++++++++ - pcs/pcs.8 | 6 ++++ - pcs/usage.py | 6 ++++ - pcs/utils.py | 5 ++++ - 5 files changed, 92 insertions(+), 21 deletions(-) - -diff --git a/pcs/acl.py b/pcs/acl.py -index aa07d40..4c2d696 100644 ---- a/pcs/acl.py -+++ b/pcs/acl.py -@@ -1,6 +1,7 @@ - import sys - import usage - import utils -+import prop - - def acl_cmd(argv): - if len(argv) == 0: -@@ -18,6 +19,10 @@ def acl_cmd(argv): - acl_show(argv) - # elif (sub_cmd == "grant"): - # acl_grant(argv) -+ elif (sub_cmd == "enable"): -+ acl_enable(argv) -+ elif (sub_cmd == "disable"): -+ acl_disable(argv) - elif (sub_cmd == "role"): - acl_role(argv) - elif (sub_cmd == "target" or sub_cmd == "user"): -@@ -33,10 +38,24 @@ def acl_cmd(argv): - def acl_show(argv): - dom = utils.get_cib_dom() - -+ properties = prop.get_set_properties(defaults=prop.get_default_properties()) -+ acl_enabled = properties.get("enable-acl", "").lower() -+ if utils.is_cib_true(acl_enabled): -+ print "ACLs are enabled" -+ else: -+ print "ACLs are disabled, run 'pcs acl enable' to enable" -+ print -+ - print_targets(dom) - print_groups(dom) - print_roles(dom) - -+def acl_enable(argv): -+ prop.set_property(["enable-acl=true"]) -+ -+def acl_disable(argv): -+ prop.set_property(["enable-acl=false"]) -+ - def acl_grant(argv): - print "Not yet implemented" - -diff --git a/pcs/pcs.8 b/pcs/pcs.8 -index 00ac11b..14917f7 100644 ---- a/pcs/pcs.8 -+++ b/pcs/pcs.8 -@@ -316,6 +316,12 @@ Confirm that the host specified is currently down. WARNING: if this node is not - [show] - List all current access control lists - .TP -+enable -+Enable access control lists -+.TP -+disable -+Disable access control lists -+.TP - role create [description=] [((read | write | deny) (xpath | id ))...] - Create a role with the name and (optional) description specified. - Each role can also have an unlimited number of permissions -diff --git a/pcs/usage.py b/pcs/usage.py -index 7bd3368..2c39901 100644 ---- a/pcs/usage.py -+++ b/pcs/usage.py -@@ -969,6 +969,12 @@ Commands: - [show] - List all current access control lists - -+ enable -+ Enable access control lists -+ -+ disable -+ Disable access control lists -+ - role create [description=] [((read | write | deny) - (xpath | id ))...] - Create a role with the name and (optional) description specified. -diff --git a/pcs/utils.py b/pcs/utils.py -index 8713c81..de000fa 100644 ---- a/pcs/utils.py -+++ b/pcs/utils.py -@@ -2129,6 +2129,11 @@ def is_iso8601_date(var): - output, retVal = run(["iso8601", "-d", var]) - return retVal == 0 - -+# Does pacemaker consider a variable as true in cib? -+# See crm_is_true in pacemaker/lib/common/utils.c -+def is_cib_true(var): -+ return var.lower() in ("true", "on", "yes", "y", "1") -+ - def is_systemctl(): - if os.path.exists('/usr/bin/systemctl'): - return True --- -1.9.1 - diff --git a/SOURCES/bz1054491-Delete-a-user-group-when-deleting-its-last-role-in-GUI.patch b/SOURCES/bz1054491-Delete-a-user-group-when-deleting-its-last-role-in-GUI.patch deleted file mode 100644 index 0e53c7d..0000000 --- a/SOURCES/bz1054491-Delete-a-user-group-when-deleting-its-last-role-in-GUI.patch +++ /dev/null @@ -1,45 +0,0 @@ -From 50333856badcd0dd6d0f4e4876fd605738317ef9 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Wed, 7 Jan 2015 13:08:26 +0100 -Subject: [PATCH] Delete a user/group when deleting its last ACl role in GUI - ---- - pcs/acl.py | 6 +++++- - pcsd/remote.rb | 4 +++- - 2 files changed, 8 insertions(+), 2 deletions(-) - -diff --git a/pcs/acl.py b/pcs/acl.py -index cbaef37..aa07d40 100644 ---- a/pcs/acl.py -+++ b/pcs/acl.py -@@ -107,7 +107,11 @@ def acl_role(argv): - # Remove any references to this role in acl_target or acl_group - for elem in dom.getElementsByTagName("role"): - if elem.getAttribute("id") == role_id: -- elem.parentNode.removeChild(elem) -+ user_group = elem.parentNode -+ user_group.removeChild(elem) -+ if "--autodelete" in utils.pcs_options: -+ if not user_group.getElementsByTagName("role"): -+ user_group.parentNode.removeChild(user_group) - - utils.replace_cib_configuration(dom) - elif command == "assign": -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index 2e898ab..9709941 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -914,7 +914,9 @@ def remove_acl_roles_remote(params) - errors = "" - params.each { |name, value| - if name.index("role-") == 0 -- out, errout, retval = run_cmd(PCS, "acl", "role", "delete", value.to_s) -+ out, errout, retval = run_cmd( -+ PCS, "acl", "role", "delete", value.to_s, "--autodelete" -+ ) - if retval != 0 - errors += "Unable to remove role #{value}" - unless errout.include?("cib_replace failure") --- -1.9.1 - diff --git a/SOURCES/bz1054491-Fix-acl-add-duplicate-names-and-remove-roles-in-GUI.patch b/SOURCES/bz1054491-Fix-acl-add-duplicate-names-and-remove-roles-in-GUI.patch deleted file mode 100644 index bd1db38..0000000 --- a/SOURCES/bz1054491-Fix-acl-add-duplicate-names-and-remove-roles-in-GUI.patch +++ /dev/null @@ -1,78 +0,0 @@ ---- pcs-0.9.137/pcs/pcs.py.acl-fix 2014-12-19 16:23:13.264292808 -0600 -+++ pcs-0.9.137/pcs/pcs.py 2014-12-19 16:23:57.361816642 -0600 -@@ -54,7 +54,7 @@ def main(argv): - pcs_short_options_with_args.append(prev_char) - prev_char = c - -- pcs_long_options = ["local","start","all","clone","master","force","corosync_conf=", "defaults","debug","version","help","fullhelp","off","from=","to=", "name=", "wait", "group=","groups","full","enable","node=","nodesc","transport=", "addr0=","addr1=","bcast0=","bcast1=","mcast0=","mcast1=","mcastport0=","mcastport1=","ttl0=","ttl1=","rrpmode=", "broadcast0", "broadcast1","wait_for_all=","auto_tie_breaker=","last_man_standing=", "last_man_standing_window=","no-default-ops","ipv6","token=", "token_coefficient=", "consensus=", "miss_count_const=", "fail_recv_const=","join=", "disabled", "after=", "before=", "autocorrect", "interactive"] -+ pcs_long_options = ["local","start","all","clone","master","force","corosync_conf=", "defaults","debug","version","help","fullhelp","off","from=","to=", "name=", "wait", "group=","groups","full","enable","node=","nodesc","transport=", "addr0=","addr1=","bcast0=","bcast1=","mcast0=","mcast1=","mcastport0=","mcastport1=","ttl0=","ttl1=","rrpmode=", "broadcast0", "broadcast1","wait_for_all=","auto_tie_breaker=","last_man_standing=", "last_man_standing_window=","no-default-ops","ipv6","token=", "token_coefficient=", "consensus=", "miss_count_const=", "fail_recv_const=","join=", "disabled", "after=", "before=", "autocorrect", "interactive", "autodelete"] - # pull out negative number arguments and add them back after getopt - prev_arg = "" - for arg in argv: ---- pcs-0.9.137/pcs/acl.py.acl-fix 2014-10-21 09:28:55.000000000 -0500 -+++ pcs-0.9.137/pcs/acl.py 2014-12-19 16:23:13.264292808 -0600 -@@ -58,6 +58,8 @@ def acl_role(argv): - id_valid, id_error = utils.validate_xml_id(role_name, 'ACL role') - if not id_valid: - utils.err(id_error) -+ if utils.dom_get_element_with_id(dom, "acl_role", role_name): -+ utils.err("role %s already exists" % role_name) - if utils.does_id_exist(dom,role_name): - utils.err(role_name + " already exists") - -@@ -178,8 +180,13 @@ def acl_role(argv): - - if not found: - utils.err("cannot find role: %s, assigned to user/group: %s" % (role_id, ug_id)) -+ -+ if "--autodelete" in utils.pcs_options: -+ if not ug.getElementsByTagName("role"): -+ ug.parentNode.removeChild(ug) -+ - utils.replace_cib_configuration(dom) -- -+ - else: - utils.err("Unknown pcs acl role command: '" + command + "' (try create or delete)") - -@@ -198,8 +205,14 @@ def acl_target(argv,group=False): - command = argv.pop(0) - tug_id = argv.pop(0) - if command == "create": -+ # pcsd parses the error message in order to determine whether the id is -+ # assigned to user/group or some other cib element -+ if group and utils.dom_get_element_with_id(dom, "acl_group", tug_id): -+ utils.err("group %s already exists" % tug_id) -+ if not group and utils.dom_get_element_with_id(dom, "acl_target", tug_id): -+ utils.err("user %s already exists" % tug_id) - if utils.does_id_exist(dom,tug_id): -- utils.err(tug_id + " already exists in cib") -+ utils.err(tug_id + " already exists") - - if group: - element = dom.createElement("acl_group") ---- pcs-0.9.137/pcsd/pcs.rb.acl-fix 2014-11-20 02:53:57.000000000 -0600 -+++ pcs-0.9.137/pcsd/pcs.rb 2014-12-19 16:23:20.063219392 -0600 -@@ -189,8 +189,8 @@ def add_acl_usergroup(acl_role_id, user_ - if retval == 0 - return "" - end -- if stderr.join("\n").strip.downcase != "error: #{name.to_s.downcase} already exists in cib" -- return stderror.join("\n").strip -+ if not /^error: (user|group) #{name.to_s} already exists$/i.match(stderr.join("\n").strip) -+ return stderr.join("\n").strip - end - end - stdout, stderror, retval = run_cmd( -@@ -211,7 +211,10 @@ def remove_acl_permission(acl_perm_id) - end - - def remove_acl_usergroup(role_id, usergroup_id) -- stdout, stderror, retval = run_cmd(PCS, "acl", "role", "unassign", role_id.to_s, usergroup_id.to_s) -+ stdout, stderror, retval = run_cmd( -+ PCS, "acl", "role", "unassign", role_id.to_s, usergroup_id.to_s, -+ "--autodelete" -+ ) - if retval != 0 - return stderror.join("\n").chomp - end diff --git a/SOURCES/bz1115537-Improve-error-messages-for-scoped-cib-operations.patch b/SOURCES/bz1115537-Improve-error-messages-for-scoped-cib-operations.patch deleted file mode 100644 index cac27b7..0000000 --- a/SOURCES/bz1115537-Improve-error-messages-for-scoped-cib-operations.patch +++ /dev/null @@ -1,58 +0,0 @@ -From 03b38736eef95a430e74298642cec3701cabc8d7 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Tue, 2 Dec 2014 14:58:44 +0100 -Subject: [PATCH] Improve error messages for scoped cib operations - ---- - pcs/cluster.py | 11 +++++++++++ - pcs/utils.py | 5 ++++- - 2 files changed, 15 insertions(+), 1 deletion(-) - -diff --git a/pcs/cluster.py b/pcs/cluster.py -index 8879316..9730e55 100644 ---- a/pcs/cluster.py -+++ b/pcs/cluster.py -@@ -17,6 +17,7 @@ import tempfile - import datetime - import commands - import json -+import xml.dom.minidom - - pcs_dir = os.path.dirname(os.path.realpath(__file__)) - COROSYNC_CONFIG_TEMPLATE = pcs_dir + "/corosync.conf.template" -@@ -667,6 +668,16 @@ def cluster_push(argv): - usage.cluster(["cib-push"]) - sys.exit(1) - -+ try: -+ new_cib_dom = xml.dom.minidom.parse(filename) -+ if scope and not new_cib_dom.getElementsByTagName(scope): -+ utils.err( -+ "unable to push cib, scope '%s' not present in new cib" -+ % scope -+ ) -+ except (EnvironmentError, xml.parsers.expat.ExpatError) as e: -+ utils.err("unable to parse new cib: %s" % e) -+ - command = ["cibadmin", "--replace", "--xml-file", filename] - if scope: - command.append("--scope=%s" % scope) -diff --git a/pcs/utils.py b/pcs/utils.py -index 531b837..0e6c70c 100644 ---- a/pcs/utils.py -+++ b/pcs/utils.py -@@ -1520,7 +1520,10 @@ def get_cib(scope=None): - command.append("--scope=%s" % scope) - output, retval = run(command) - if retval != 0: -- err("unable to get cib") -+ if retval == 6 and scope: -+ err("unable to get cib, scope '%s' not present in cib" % scope) -+ else: -+ err("unable to get cib") - return output - - def get_cib_dom(): --- -1.9.1 - diff --git a/SOURCES/bz1122818-01-fix-resource-relocation-of-globally-unique-clones.patch b/SOURCES/bz1122818-01-fix-resource-relocation-of-globally-unique-clones.patch new file mode 100644 index 0000000..e76425e --- /dev/null +++ b/SOURCES/bz1122818-01-fix-resource-relocation-of-globally-unique-clones.patch @@ -0,0 +1,34 @@ +From a4fa532d6c1091caf94d64c95c5625738aa1ebf3 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Wed, 12 Aug 2015 13:36:27 +0200 +Subject: [PATCH] fix resource relocation of globally-unique clones + +--- + pcs/test/test_utils.py | 57 +++++++++++++++++++++++++++++++++++++++++++++++--- + pcs/utils.py | 9 +++++++- + 2 files changed, 62 insertions(+), 4 deletions(-) + +diff --git a/pcs/utils.py b/pcs/utils.py +index d61ff44..740ff04 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -2044,8 +2044,15 @@ def get_resources_location_from_operations(cib_dom, resources_operations): + continue + long_id = res_op["long_id"] + if long_id not in locations: ++ # Move clone instances as if they were non-cloned resources, it ++ # really works with current pacemaker (1.1.13-6). Otherwise there ++ # is probably no way to move them other then setting their ++ # stickiness to 0. ++ res_id = res_op["id"] ++ if ":" in res_id: ++ res_id = res_id.split(":")[0] + id_for_constraint = validate_constraint_resource( +- cib_dom, res_op["id"] ++ cib_dom, res_id + )[2] + if not id_for_constraint: + continue +-- +1.9.1 + diff --git a/SOURCES/bz1122818-02-fix-resource-relocate-for-remote-nodes.patch b/SOURCES/bz1122818-02-fix-resource-relocate-for-remote-nodes.patch new file mode 100644 index 0000000..adca66c --- /dev/null +++ b/SOURCES/bz1122818-02-fix-resource-relocate-for-remote-nodes.patch @@ -0,0 +1,48 @@ +From 5f6b6c657f2a88985baf02d24a2de8dafa8ec736 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Tue, 25 Aug 2015 13:08:46 +0200 +Subject: [PATCH] fix resource relocate for remote nodes + +--- + pcs/test/test_utils.py | 69 +++++++++++++++++++++++++++ + pcs/test/transitions02.xml | 116 +++++++++++++++++++++++++++++++++++++++++++++ + pcs/utils.py | 8 ++-- + 3 files changed, 190 insertions(+), 3 deletions(-) + create mode 100644 pcs/test/transitions02.xml + +diff --git a/pcs/utils.py b/pcs/utils.py +index 740ff04..cd33a27 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -2014,7 +2014,9 @@ def simulate_cib(cib_dom): + + def get_operations_from_transitions(transitions_dom): + operation_list = [] +- watched_operations = ("start", "stop", "promote", "demote") ++ watched_operations = ( ++ "start", "stop", "promote", "demote", "migrate_from", "migrate_to" ++ ) + for rsc_op in transitions_dom.getElementsByTagName("rsc_op"): + primitives = rsc_op.getElementsByTagName("primitive") + if not primitives: +@@ -2040,7 +2042,7 @@ def get_resources_location_from_operations(cib_dom, resources_operations): + locations = {} + for res_op in resources_operations: + operation = res_op["operation"] +- if operation not in ("start", "promote"): ++ if operation not in ("start", "promote", "migrate_from"): + continue + long_id = res_op["long_id"] + if long_id not in locations: +@@ -2061,7 +2063,7 @@ def get_resources_location_from_operations(cib_dom, resources_operations): + "long_id": long_id, + "id_for_constraint": id_for_constraint, + } +- if operation == "start": ++ if operation in ("start", "migrate_from"): + locations[long_id]["start_on_node"] = res_op["on_node"] + if operation == "promote": + locations[long_id]["promote_on_node"] = res_op["on_node"] +-- +1.9.1 + diff --git a/SOURCES/bz1156311-Fix-waiting-for-resource-operations.patch b/SOURCES/bz1156311-Fix-waiting-for-resource-operations.patch deleted file mode 100644 index ca5e725..0000000 --- a/SOURCES/bz1156311-Fix-waiting-for-resource-operations.patch +++ /dev/null @@ -1,397 +0,0 @@ -From 9ed24231c194985f16ba14633d4f215f48608ee2 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Thu, 4 Dec 2014 17:10:11 +0100 -Subject: [PATCH] Fix waiting for resource operations - -* fixed waiting for globally-unique clone resources -* added --wait support to 'pcs resource update' command -* do not exit with an error when a resource is not running, print a warning - instead ---- - pcs/pcs.8 | 4 +- - pcs/resource.py | 175 +++++++++++++++++++++++++++++++++++++++----------------- - pcs/usage.py | 8 ++- - pcs/utils.py | 46 ++++++++++++++- - 4 files changed, 174 insertions(+), 59 deletions(-) - -diff --git a/pcs/pcs.8 b/pcs/pcs.8 -index 2020f99..67f85d5 100644 ---- a/pcs/pcs.8 -+++ b/pcs/pcs.8 -@@ -92,8 +92,8 @@ List available OCF resource agent providers - agents [standard[:provider]] - List available agents optionally filtered by standard and provider - .TP --update [resource options] [op [ ]...] [meta ...] --Add/Change options to specified resource, clone or multi\-state resource. If an operation (op) is specified it will update the first found operation with the same action on the specified resource, if no operation with that action exists then a new operation will be created (WARNING: all current options on the update op will be reset if not specified). If you want to create multiple monitor operations you should use the add_operation & remove_operation commands. -+update [resource options] [op [ ]...] [meta ...] [\fB\-\-wait\fR[=n]] -+Add/Change options to specified resource, clone or multi\-state resource. If an operation (op) is specified it will update the first found operation with the same action on the specified resource, if no operation with that action exists then a new operation will be created (WARNING: all current options on the update op will be reset if not specified). If you want to create multiple monitor operations you should use the add_operation & remove_operation commands. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the changes to take effect and then return 0 if the changes have been processed or 1 otherwise. If 'n' is not specified, default resource timeout will be used. - .TP - op add [operation properties] - Add operation for specified resource -diff --git a/pcs/resource.py b/pcs/resource.py -index 48d894d..75cd8bb 100644 ---- a/pcs/resource.py -+++ b/pcs/resource.py -@@ -575,8 +575,10 @@ def resource_move(argv,clear=False,ban=False): - if utils.usefile: - utils.err("Cannot use '-f' together with '--wait'") - if not utils.is_resource_started(resource_id, 0)[0]: -- utils.err("Cannot use '--wait' on non-running resources") -- wait = True -+ print "Warning: Cannot use '--wait' on non-running resources" -+ else: -+ wait = True -+ if wait: - timeout = utils.pcs_options["--wait"] - if timeout is None: - timeout = ( -@@ -737,6 +739,12 @@ def resource_update(res_id,args): - else: - ra_values.append(arg) - -+ wait = False -+ if "--wait" in utils.pcs_options: -+ if utils.usefile: -+ utils.err("Cannot use '-f' together with '--wait'") -+ wait = True -+ - resource = None - for r in dom.getElementsByTagName("primitive"): - if r.getAttribute("id") == res_id: -@@ -753,10 +761,8 @@ def resource_update(res_id,args): - if clone: - for a in c.childNodes: - if a.localName == "primitive" or a.localName == "group": -- return utils.replace_cib_configuration( -- resource_clone_create( -- dom, [a.getAttribute("id")] + args, True -- ) -+ return resource_update_clone_master( -+ dom, clone, "clone", a.getAttribute("id"), args, wait - ) - - master = None -@@ -766,12 +772,18 @@ def resource_update(res_id,args): - break - - if master: -- return utils.replace_cib_configuration( -- resource_master_create(dom, [res_id] + args, True) -+ return resource_update_clone_master( -+ dom, master, "master", res_id, args, wait - ) - - utils.err ("Unable to find resource: %s" % res_id) - -+ if wait: -+ node_count = len(utils.getNodesFromPacemaker()) -+ status_old = utils.get_resource_status_for_wait( -+ dom, resource, node_count -+ ) -+ - instance_attributes = resource.getElementsByTagName("instance_attributes") - if len(instance_attributes) == 0: - instance_attributes = dom.createElement("instance_attributes") -@@ -919,8 +931,85 @@ def resource_update(res_id,args): - if len(instance_attributes.getElementsByTagName("nvpair")) == 0: - instance_attributes.parentNode.removeChild(instance_attributes) - -+ if wait: -+ status_new = utils.get_resource_status_for_wait( -+ dom, resource, node_count -+ ) -+ wait_for_start, wait_for_stop = utils.get_resource_wait_decision( -+ status_old, status_new -+ ) -+ if wait_for_start or wait_for_stop: -+ timeout = utils.pcs_options["--wait"] -+ if timeout is None: -+ timeout = utils.get_resource_op_timeout( -+ dom, res_id, "start" if wait_for_start else "stop" -+ ) -+ elif not timeout.isdigit(): -+ utils.err("You must specify the number of seconds to wait") -+ else: -+ timeout = 0 -+ - utils.replace_cib_configuration(dom) - -+ if wait: -+ if wait_for_start or wait_for_stop: -+ success, message = utils.is_resource_started( -+ res_id, int(timeout), wait_for_stop, -+ count=status_new["instances"] -+ ) -+ if success: -+ print message -+ else: -+ utils.err("Unable to start '%s'\n%s" % (res_id, message)) -+ else: -+ print utils.resource_running_on(res_id)["message"] -+ -+def resource_update_clone_master(dom, clone, clone_type, res_id, args, wait): -+ if wait: -+ node_count = len(utils.getNodesFromPacemaker()) -+ status_old = utils.get_resource_status_for_wait(dom, clone, node_count) -+ -+ if clone_type == "clone": -+ dom = resource_clone_create(dom, [res_id] + args, True) -+ elif clone_type == "master": -+ dom = resource_master_create(dom, [res_id] + args, True) -+ -+ if wait: -+ status_new = utils.get_resource_status_for_wait(dom, clone, node_count) -+ wait_for_start, wait_for_stop = utils.get_resource_wait_decision( -+ status_old, status_new -+ ) -+ if wait_for_start or wait_for_stop: -+ timeout = utils.pcs_options["--wait"] -+ if timeout is None: -+ timeout = utils.get_resource_op_timeout( -+ dom, res_id, "start" if wait_for_start else "stop" -+ ) -+ elif not timeout.isdigit(): -+ utils.err("You must specify the number of seconds to wait") -+ else: -+ timeout = 0 -+ -+ dom = utils.replace_cib_configuration(dom) -+ -+ if wait: -+ if wait_for_start or wait_for_stop: -+ success, message = utils.is_resource_started( -+ clone.getAttribute("id"), int(timeout), wait_for_stop, -+ count=status_new["instances"] -+ ) -+ if success: -+ print message -+ else: -+ utils.err( -+ "Unable to start '%s'\n%s" -+ % (clone.getAttribute("id"), message) -+ ) -+ else: -+ print utils.resource_running_on(clone.getAttribute("id"))["message"] -+ -+ return dom -+ - # Removes all OCF_CHECK_LEVEL nvpairs - def remove_ocf_check_levels(dom): - for np in dom.getElementsByTagName("nvpair")[:]: -@@ -1092,15 +1181,7 @@ def resource_meta(res_id, argv): - utils.err("Cannot use '-f' together with '--wait'") - wait = True - node_count = len(utils.getNodesFromPacemaker()) -- clone_ms_parent = utils.dom_get_resource_clone_ms_parent(dom, res_id) -- old_status_running = utils.is_resource_started(res_id, 0)[0] -- old_role = utils.dom_get_meta_attr_value( -- meta_attributes.parentNode, "target-role" -- ) -- old_status_enabled = not old_role or old_role.lower() != "stopped" -- old_status_instances = utils.count_expected_resource_instances( -- clone_ms_parent if clone_ms_parent else elem, node_count -- ) -+ status_old = utils.get_resource_status_for_wait(dom, elem, node_count) - - update_meta_attributes( - meta_attributes, -@@ -1109,29 +1190,10 @@ def resource_meta(res_id, argv): - ) - - if wait: -- new_role = utils.dom_get_meta_attr_value( -- meta_attributes.parentNode, "target-role" -+ status_new = utils.get_resource_status_for_wait(dom, elem, node_count) -+ wait_for_start, wait_for_stop = utils.get_resource_wait_decision( -+ status_old, status_new - ) -- new_status_enabled = not new_role or new_role.lower() != "stopped" -- new_status_instances = utils.count_expected_resource_instances( -- clone_ms_parent if clone_ms_parent else elem, node_count -- ) -- wait_for_start = False -- wait_for_stop = False -- if old_status_running and not new_status_enabled: -- wait_for_stop = True -- elif ( -- not old_status_running -- and -- (not old_status_enabled and new_status_enabled) -- ): -- wait_for_start = True -- elif ( -- old_status_running -- and -- old_status_instances != new_status_instances -- ): -- wait_for_start = True - if wait_for_start or wait_for_stop: - timeout = utils.pcs_options["--wait"] - if timeout is None: -@@ -1145,14 +1207,17 @@ def resource_meta(res_id, argv): - - utils.replace_cib_configuration(dom) - -- if wait and (wait_for_start or wait_for_stop): -- success, message = utils.is_resource_started( -- res_id, int(timeout), wait_for_stop, count=new_status_instances -- ) -- if success: -- print message -+ if wait: -+ if wait_for_start or wait_for_stop: -+ success, message = utils.is_resource_started( -+ res_id, int(timeout), wait_for_stop, count=status_new["instances"] -+ ) -+ if success: -+ print message -+ else: -+ utils.err("Unable to start '%s'\n%s" % (res_id, message)) - else: -- utils.err("Unable to start '%s'\n%s" % (res_id, message)) -+ print utils.resource_running_on(res_id)["message"] - - def update_meta_attributes(meta_attributes, meta_attrs, id_prefix): - dom = meta_attributes.ownerDocument -@@ -1377,8 +1442,10 @@ def resource_clone(argv): - if utils.usefile: - utils.err("Cannot use '-f' together with '--wait'") - if not utils.is_resource_started(res, 0)[0]: -- utils.err("Cannot use '--wait' on non-running resources") -- wait = True -+ print "Warning: Cannot use '--wait' on non-running resources" -+ else: -+ wait = True -+ if wait: - wait_op = "start" - for arg in argv: - if arg.lower() == "target-role=stopped": -@@ -1486,8 +1553,10 @@ def resource_clone_master_remove(argv): - if utils.usefile: - utils.err("Cannot use '-f' together with '--wait'") - if not utils.is_resource_started(resource_id, 0)[0]: -- utils.err("Cannot use '--wait' on non-running resources") -- wait = True -+ print "Warning: Cannot use '--wait' on non-running resources" -+ else: -+ wait = True -+ if wait: - timeout = utils.pcs_options["--wait"] - if timeout is None: - timeout = utils.get_resource_op_timeout(dom, resource_id, "stop") -@@ -1534,8 +1603,10 @@ def resource_master(argv): - if utils.usefile: - utils.err("Cannot use '-f' together with '--wait'") - if not utils.is_resource_started(res_id, 0)[0]: -- utils.err("Cannot use '--wait' on non-running resources") -- wait = True -+ print "Warning: Cannot use '--wait' on non-running resources" -+ else: -+ wait = True -+ if wait: - wait_op = "promote" - for arg in argv: - if arg.lower() == "target-role=stopped": -diff --git a/pcs/usage.py b/pcs/usage.py -index ed99148..a66b90e 100644 ---- a/pcs/usage.py -+++ b/pcs/usage.py -@@ -333,14 +333,18 @@ Commands: - List available agents optionally filtered by standard and provider - - update [resource options] [op [ -- ]...] [meta ...] -+ ]...] [meta ...] [--wait[=n]] - Add/Change options to specified resource, clone or multi-state - resource. If an operation (op) is specified it will update the first - found operation with the same action on the specified resource, if no - operation with that action exists then a new operation will be created. - (WARNING: all current options on the update op will be reset if not - specified) If you want to create multiple monitor operations you should -- use the add_operation & remove_operation commands. -+ use the add_operation & remove_operation commands. If --wait is -+ specified, pcs will wait up to 'n' seconds for the changes to take -+ effect and then return 0 if the changes have been processed or 1 -+ otherwise. If 'n' is not specified, default resource timeout will -+ be used. - - op add [operation properties] - Add operation for specified resource -diff --git a/pcs/utils.py b/pcs/utils.py -index 0e6c70c..76fe57f 100644 ---- a/pcs/utils.py -+++ b/pcs/utils.py -@@ -1038,11 +1038,12 @@ def is_resource_started( - for res in resources: - # If resource is a clone it can have an id of ':N' - if res.getAttribute("id") == resource or res.getAttribute("id").startswith(resource+":"): -- set_running_on = set( -+ list_running_on = ( - running_on["nodes_started"] + running_on["nodes_master"] - ) - if slave_as_started: -- set_running_on.update(running_on["nodes_slave"]) -+ list_running_on.extend(running_on["nodes_slave"]) -+ set_running_on = set(list_running_on) - if stopped: - if ( - res.getAttribute("role") != "Stopped" -@@ -1071,7 +1072,7 @@ def is_resource_started( - and - res.getAttribute("failed") != "true" - and -- (count is None or len(set_running_on) == count) -+ (count is None or len(list_running_on) == count) - and - ( - not banned_nodes -@@ -1180,6 +1181,45 @@ def wait_for_primitive_ops_to_process(op_list, timeout=None): - % (op[1], op[0], op[2], message) - ) - -+def get_resource_status_for_wait(dom, resource_el, node_count): -+ res_id = resource_el.getAttribute("id") -+ clone_ms_parent = dom_get_resource_clone_ms_parent(dom, res_id) -+ meta_resource_el = clone_ms_parent if clone_ms_parent else resource_el -+ status_running = is_resource_started(res_id, 0)[0] -+ status_enabled = True -+ for meta in meta_resource_el.getElementsByTagName("meta_attributes"): -+ for nvpair in meta.getElementsByTagName("nvpair"): -+ if nvpair.getAttribute("name") == "target-role": -+ if nvpair.getAttribute("value").lower() == "stopped": -+ status_enabled = False -+ status_instances = count_expected_resource_instances( -+ meta_resource_el, node_count -+ ) -+ return { -+ "running": status_running, -+ "enabled": status_enabled, -+ "instances": status_instances, -+ } -+ -+def get_resource_wait_decision(old_status, new_status): -+ wait_for_start = False -+ wait_for_stop = False -+ if old_status["running"] and not new_status["enabled"]: -+ wait_for_stop = True -+ elif ( -+ not old_status["running"] -+ and -+ (not old_status["enabled"] and new_status["enabled"]) -+ ): -+ wait_for_start = True -+ elif ( -+ old_status["running"] -+ and -+ old_status["instances"] != new_status["instances"] -+ ): -+ wait_for_start = True -+ return wait_for_start, wait_for_stop -+ - def get_lrm_rsc_op(cib, resource, op_list=None, last_call_id=None): - lrm_rsc_op_list = [] - for lrm_resource in cib.getElementsByTagName("lrm_resource"): --- -1.9.1 - diff --git a/SOURCES/bz1158566-01-fix-dashboard-in-web-UI.patch b/SOURCES/bz1158566-01-fix-dashboard-in-web-UI.patch new file mode 100644 index 0000000..baded2f --- /dev/null +++ b/SOURCES/bz1158566-01-fix-dashboard-in-web-UI.patch @@ -0,0 +1,396 @@ +From ef01aa872871b8e1ea79058cbe3301ce878dde9a Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Tue, 25 Aug 2015 11:44:00 +0200 +Subject: [PATCH] fix dashboard in web UI + +--- + pcsd/cluster_entity.rb | 53 +++++++++++++++++++++++++++++----------- + pcsd/pcs.rb | 14 ++++++++--- + pcsd/public/js/nodes-ember.js | 17 ++++++++++--- + pcsd/public/js/pcsd.js | 38 ++++++++++++++-------------- + pcsd/remote.rb | 22 +++++++++++++++-- + pcsd/test/test_cluster_entity.rb | 4 +-- + pcsd/views/_resource.erb | 20 +++++++-------- + pcsd/views/main.erb | 4 +++ + 8 files changed, 117 insertions(+), 55 deletions(-) + +diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb +index b291937..78bc5ab 100644 +--- a/pcsd/cluster_entity.rb ++++ b/pcsd/cluster_entity.rb +@@ -112,6 +112,9 @@ module ClusterEntity + status.node = node + primitive.crm_status << status + } ++ primitives.each {|_, resource| ++ resource[0].update_status ++ } + return primitives + end + +@@ -178,6 +181,9 @@ module ClusterEntity + end + end + } ++ tree.each {|resource| ++ resource.update_status ++ } + return tree + end + +@@ -491,23 +497,27 @@ module ClusterEntity + end + end + ++ def update_status ++ @status = get_status ++ end ++ + def get_status +- count = @crm_status.length + running = 0 ++ failed = 0 + @crm_status.each do |s| +- if ['Started', 'Master', 'Slave'].include?(s.role) ++ if s.active + running += 1 ++ elsif s.failed ++ failed += 1 + end + end + + if disabled? + status = ClusterEntity::ResourceStatus.new(:disabled) +- elsif running != 0 +- if running == count +- status = ClusterEntity::ResourceStatus.new(:running) +- else +- status = ClusterEntity::ResourceStatus.new(:partially_running) +- end ++ elsif running > 0 ++ status = ClusterEntity::ResourceStatus.new(:running) ++ elsif failed > 0 ++ status = ClusterEntity::ResourceStatus.new(:failed) + else + status = ClusterEntity::ResourceStatus.new(:blocked) + end +@@ -655,6 +665,14 @@ module ClusterEntity + end + end + ++ def update_status ++ @status = ClusterEntity::ResourceStatus.new(:running) ++ @members.each { |p| ++ p.update_status ++ @status = p.status if @status < p.status ++ } ++ end ++ + def to_status(version='1') + if version == '2' + hash = super(version) +@@ -730,6 +748,13 @@ module ClusterEntity + end + end + ++ def update_status ++ if @member ++ @member.update_status ++ @status = @member.status ++ end ++ end ++ + def to_status(version='1') + if version == '2' + hash = super(version) +@@ -794,13 +819,13 @@ module ClusterEntity + primitive_list = @member.members + end + @masters, @slaves = get_masters_slaves(primitive_list) +- end +- if @masters.empty? +- @error_list << { +- :message => 'Resource is master/slave but has not been promoted '\ ++ if @masters.empty? ++ @error_list << { ++ :message => 'Resource is master/slave but has not been promoted '\ + + 'to master on any node.', +- :type => 'no_master' +- } ++ :type => 'no_master' ++ } ++ end + end + end + +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index 1fe9b99..cc5b038 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -1506,10 +1506,18 @@ def cluster_status_from_nodes(session, cluster_nodes, cluster_name) + status = overview.update(cluster_nodes_map[quorate_nodes[0]]) + status[:quorate] = true + status[:node_list] = node_status_list +- # if we don't have quorum, use data from any node +- # no node has quorum, so no node has any info about the cluster ++ # if we don't have quorum, use data from any online node, ++ # otherwise use data from any node no node has quorum, so no node has any ++ # info about the cluster + elsif not old_status +- status = overview.update(cluster_nodes_map.values[0]) ++ node_to_use = cluster_nodes_map.values[0] ++ cluster_nodes_map.each { |_, node_data| ++ if node_data[:node] and node_data[:node][:status] == 'online' ++ node_to_use = node_data ++ break ++ end ++ } ++ status = overview.update(node_to_use) + status[:quorate] = false + status[:node_list] = node_status_list + # old pcsd doesn't provide info about quorum, use data from any node +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index 1f60adc..172c00a 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -54,7 +54,8 @@ Pcs = Ember.Application.createWithMixins({ + if (window.location.pathname.lastIndexOf('/manage', 0) !== 0) { + return; + } +- clearTimeout(Pcs.update_timeout); ++ clearTimeout(Pcs.get('update_timeout')); ++ Pcs.set('update_timeout', null); + var self = Pcs; + var cluster_name = self.cluster_name; + if (cluster_name == null) { +@@ -77,7 +78,7 @@ Pcs = Ember.Application.createWithMixins({ + if (data["not_current_data"]) { + self.update(); + } else { +- Pcs.update_timeout = window.setTimeout(self.update, 20000); ++ Pcs.set('update_timeout', window.setTimeout(self.update,20000)); + } + hide_loading_screen(); + }, +@@ -92,7 +93,7 @@ Pcs = Ember.Application.createWithMixins({ + console.log("Error: Unable to parse json for clusters_overview"); + } + } +- Pcs.update_timeout = window.setTimeout(self.update,20000); ++ Pcs.set('update_timeout', window.setTimeout(self.update,20000)); + hide_loading_screen(); + } + }); +@@ -126,6 +127,7 @@ Pcs = Ember.Application.createWithMixins({ + var cur_resource = self.get('cur_resource'); + var resource_map = self.get('resource_map'); + if (first_run) { ++ setup_node_links(); + Pcs.nodesController.load_node($('#node_list_row').find('.node_selected').first(),true); + Pcs.aclsController.load_role($('#acls_list_row').find('.node_selected').first(), true); + if (self.get("fence_id_to_load")) { +@@ -173,7 +175,6 @@ Pcs = Ember.Application.createWithMixins({ + if (!resource_change && self.get('cur_resource')) + tree_view_select(self.get('cur_resource').get('id')); + Pcs.selectedNodeController.reset(); +- setup_node_links(); + disable_checkbox_clicks(); + }); + }); +@@ -207,6 +208,7 @@ Pcs.resourcesContainer = Ember.Object.create({ + cur_fence: null, + constraints: {}, + group_list: [], ++ data_version: null, + + get_resource_by_id: function(resource_id) { + var resource_map = this.get('resource_map'); +@@ -434,6 +436,7 @@ Pcs.resourcesContainer = Ember.Object.create({ + update: function(data) { + var self = this; + self.set('group_list', data['groups']); ++ self.set("data_version", data['status_version']); + var resources = data["resource_list"]; + var resource_obj = null; + var resource_id; +@@ -495,6 +498,12 @@ Pcs.resourcesContainer = Ember.Object.create({ + } + }); + ++Pcs.resourcesContainer.reopen({ ++ is_version_1: function() { ++ return (this.get("data_version") == '1'); ++ }.property('data_version') ++}); ++ + Pcs.ResourceObj = Ember.Object.extend({ + id: null, + _id: Ember.computed.alias('id'), +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index 9891aa8..2c71e6b 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -1242,26 +1242,24 @@ function destroy_tooltips() { + } + + function remove_cluster(ids) { +- for (var i=0; i cluster.name, ++ 'error_list' => [ ++ {'message' => 'Unable to connect to the cluster. Request timeout.'} ++ ], ++ 'warning_list' => [], ++ 'status' => 'unknown', ++ 'node_list' => get_default_overview_node_list(cluster.name), ++ 'resource_list' => [] ++ } + overview_cluster = nil + online, offline, not_authorized_nodes = check_gui_status_of_nodes( + session, +@@ -1134,7 +1145,7 @@ def clusters_overview(params, request, session) + nodes_not_in_cluster = [] + for node in cluster_nodes_auth + code, response = send_request_with_token( +- session, node, 'cluster_status', true, {}, true, nil, 15 ++ session, node, 'cluster_status', true, {}, true, nil, 8 + ) + if code == 404 + not_supported = true +@@ -1228,7 +1239,14 @@ def clusters_overview(params, request, session) + cluster_map[cluster.name] = overview_cluster + } + } +- threads.each { |t| t.join } ++ ++ begin ++ Timeout::timeout(18) { ++ threads.each { |t| t.join } ++ } ++ rescue Timeout::Error ++ threads.each { |t| t.exit } ++ end + + # update clusters in PCSConfig + not_current_data = false +diff --git a/pcsd/views/_resource.erb b/pcsd/views/_resource.erb +index 862b648..cc4c06e 100644 +--- a/pcsd/views/_resource.erb ++++ b/pcsd/views/_resource.erb +@@ -32,16 +32,16 @@ + + <%= erb :_resource_list %> + +- +-
+- <% if @myView == "resource" %> +- {{resource-edit resource=Pcs.resourcesContainer.cur_resource page_name="Resource"}} +- <% else %> +- {{resource-edit resource=Pcs.resourcesContainer.cur_fence page_name="Fence device" stonith=1}} +- <% end %> +-
+- +- ++ ++
++ <% if @myView == "resource" %> ++ {{resource-edit resource=Pcs.resourcesContainer.cur_resource page_name="Resource" old_pcsd=Pcs.resourcesContainer.is_version_1}} ++ <% else %> ++ {{resource-edit resource=Pcs.resourcesContainer.cur_fence page_name="Fence device" stonith=1 old_pcsd=Pcs.resourcesContainer.is_version_1}} ++ <% end %> ++
++ ++ + <% if @myView == "resource" %> + + +diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb +index 3c1e0cd..bb4e989 100644 +--- a/pcsd/views/main.erb ++++ b/pcsd/views/main.erb +@@ -197,6 +197,7 @@ + Current Location: + {{resource.nodes_running_on_string}} + ++ {{#unless old_pcsd}} + {{#unless resource.parent}} + + Clone: +@@ -226,8 +227,10 @@ + + {{/if}} + {{/unless}} ++ {{/unless}} + {{/if}} + {{/unless}} ++ {{#unless old_pcsd}} + {{#if resource.is_group}} + {{#unless resource.parent}} + +@@ -258,6 +261,7 @@ + + + {{/if}} ++ {{/unless}} + + {{#unless resource.stonith}} + {{location_constraints-table constraints=resource.location_constraints}} +-- +1.9.1 + diff --git a/SOURCES/bz1158566-02-fix-loading-cluster-status-for-web-UI.patch b/SOURCES/bz1158566-02-fix-loading-cluster-status-for-web-UI.patch new file mode 100644 index 0000000..f901308 --- /dev/null +++ b/SOURCES/bz1158566-02-fix-loading-cluster-status-for-web-UI.patch @@ -0,0 +1,143 @@ +From f55ca2f12c4552fcd516737fa797cf806aa70705 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Thu, 3 Sep 2015 12:29:37 +0200 +Subject: [PATCH] fix loading cluster status for web UI + +--- + pcs/status.py | 37 ++++++++++++++++++++++++++++++++++--- + pcsd/cluster_entity.rb | 25 ++++++++++++++++++++++--- + pcsd/pcs.rb | 3 +++ + 3 files changed, 59 insertions(+), 6 deletions(-) + +diff --git a/pcs/status.py b/pcs/status.py +index eb2a5eb..34354ef 100644 +--- a/pcs/status.py ++++ b/pcs/status.py +@@ -123,14 +123,28 @@ def nodes_status(argv): + onlinenodes = [] + offlinenodes = [] + standbynodes = [] ++ remote_onlinenodes = [] ++ remote_offlinenodes = [] ++ remote_standbynodes = [] + for node in nodes[0].getElementsByTagName("node"): ++ node_name = node.getAttribute("name") ++ node_remote = node.getAttribute("type") == "remote" + if node.getAttribute("online") == "true": + if node.getAttribute("standby") == "true": +- standbynodes.append(node.getAttribute("name")) ++ if node_remote: ++ remote_standbynodes.append(node_name) ++ else: ++ standbynodes.append(node_name) + else: +- onlinenodes.append(node.getAttribute("name")) ++ if node_remote: ++ remote_onlinenodes.append(node_name) ++ else: ++ onlinenodes.append(node_name) + else: +- offlinenodes.append(node.getAttribute("name")) ++ if node_remote: ++ remote_offlinenodes.append(node_name) ++ else: ++ offlinenodes.append(node_name) + + print "Pacemaker Nodes:" + +@@ -149,6 +163,23 @@ def nodes_status(argv): + print node, + print "" + ++ print "Pacemaker Remote Nodes:" ++ ++ print " Online:", ++ for node in remote_onlinenodes: ++ print node, ++ print "" ++ ++ print " Standby:", ++ for node in remote_standbynodes: ++ print node, ++ print "" ++ ++ print " Offline:", ++ for node in remote_offlinenodes: ++ print node, ++ print "" ++ + # TODO: Remove, currently unused, we use status from the resource.py + def resources_status(argv): + info_dom = utils.getClusterState() +diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb +index 78bc5ab..4f751b8 100644 +--- a/pcsd/cluster_entity.rb ++++ b/pcsd/cluster_entity.rb +@@ -533,7 +533,8 @@ module ClusterEntity + @operations = [] + failed_ops = [] + message_list = [] +- cib_dom.elements.each("//lrm_resource[@id='#{@id}']/lrm_rsc_op") { |e| ++ cib_dom.elements.each("//lrm_resource[@id='#{@id}']/lrm_rsc_op | "\ ++ + "//lrm_resource[starts-with(@id, \"#{@id}:\")]/lrm_rsc_op") { |e| + operation = ResourceOperation.new(e) + @operations << operation + if operation.rc_code != 0 +@@ -819,13 +820,15 @@ module ClusterEntity + primitive_list = @member.members + end + @masters, @slaves = get_masters_slaves(primitive_list) +- if @masters.empty? +- @error_list << { ++ if @masters.empty? and !disabled? ++ @status = ClusterEntity::ResourceStatus.new(:partially_running) ++ @warning_list << { + :message => 'Resource is master/slave but has not been promoted '\ + + 'to master on any node.', + :type => 'no_master' + } + end ++ @status = @member.status if @status < @member.status + end + end + +@@ -851,6 +854,22 @@ module ClusterEntity + end + end + ++ def update_status ++ if @member ++ @member.update_status ++ if @member.instance_of?(Primitive) ++ primitive_list = [@member] ++ else ++ primitive_list = @member.members ++ end ++ @masters, @slaves = get_masters_slaves(primitive_list) ++ if @masters.empty? and !disabled? ++ @status = ClusterEntity::ResourceStatus.new(:partially_running) ++ end ++ @status = @member.status if @status < @member.status ++ end ++ end ++ + private + def get_masters_slaves(primitive_list) + masters = [] +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index cc5b038..87404ac 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -568,6 +568,9 @@ def get_nodes_status() + if l.start_with?("Pacemaker Nodes:") + in_pacemaker = true + end ++ if l.start_with?("Pacemaker Remote Nodes:") ++ break ++ end + if l.end_with?(":") + next + end +-- +1.9.1 + diff --git a/SOURCES/bz1158566-03-web-UI-multiple-fixes-in-the-dashboard.patch b/SOURCES/bz1158566-03-web-UI-multiple-fixes-in-the-dashboard.patch new file mode 100644 index 0000000..56bac08 --- /dev/null +++ b/SOURCES/bz1158566-03-web-UI-multiple-fixes-in-the-dashboard.patch @@ -0,0 +1,1223 @@ +From 9830bad113bf07fb65af18e2f2423c27da0180c0 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Tue, 8 Sep 2015 12:46:50 +0200 +Subject: [PATCH] web UI: multiple fixes in the dashboard + +- fix no quorum message +- fix status inconsistency of offline cluster +- fix status icons +- cluster status is 'failed' if there is resource with status 'blocked' +- fix random unselecting of current cluster +- performance improvements in loading cluster status +- removed icon that indicates issue in cluster +- changed status detection of resources +--- + pcsd/cluster_entity.rb | 150 +++++++++++++++-------- + pcsd/pcs.rb | 231 +++++++++++++++++------------------ + pcsd/public/js/nodes-ember.js | 122 +++++++++---------- + pcsd/public/js/pcsd.js | 24 +++- + pcsd/test/test_all_suite.rb | 1 + + pcsd/test/test_cluster_entity.rb | 126 +++++++++++++++---- + pcsd/test/test_pcs.rb | 257 +++++++++++++++++++++++++++++++++++++++ + pcsd/views/_cluster_list.erb | 6 +- + pcsd/views/main.erb | 2 +- + pcsd/views/manage.erb | 243 ++++++++++++++++++------------------ + 10 files changed, 779 insertions(+), 383 deletions(-) + create mode 100644 pcsd/test/test_pcs.rb + +diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb +index 4f751b8..b5d2719 100644 +--- a/pcsd/cluster_entity.rb ++++ b/pcsd/cluster_entity.rb +@@ -3,6 +3,34 @@ require 'pcs.rb' + + module ClusterEntity + ++ def self.get_rsc_status(crm_dom) ++ unless crm_dom ++ return {} ++ end ++ status = {} ++ crm_dom.elements.each('/crm_mon/resources//resource') { |e| ++ rsc_id = e.attributes['id'].split(':')[0] ++ status[rsc_id] ||= [] ++ status[rsc_id] << ClusterEntity::CRMResourceStatus.new(e) ++ } ++ return status ++ end ++ ++ def self.get_resources_operations(cib_dom) ++ unless cib_dom ++ return {} ++ end ++ operations = {} ++ cib_dom.elements.each( ++ '/cib/status/node_state/lrm/lrm_resources/lrm_resource/lrm_rsc_op' ++ ) { |e| ++ rsc_id = e.parent.attributes['id'].split(':')[0] ++ operations[rsc_id] ||= [] ++ operations[rsc_id] << ClusterEntity::ResourceOperation.new(e) ++ } ++ return operations ++ end ++ + def self.obj_to_hash(obj, variables=nil) + unless variables + variables = obj.instance_variables +@@ -454,8 +482,9 @@ module ClusterEntity + attr_accessor :agentname, :_class, :provider, :type, :stonith, + :instance_attr, :crm_status, :operations + +- def initialize(primitive_cib_element=nil, crm_dom=nil, parent=nil, cib_dom=nil) +- super(primitive_cib_element, crm_dom, parent) ++ def initialize(primitive_cib_element=nil, rsc_status=nil, parent=nil, ++ operations=nil) ++ super(primitive_cib_element, nil, parent) + @class_type = 'primitive' + @agentname = nil + @_class = nil +@@ -482,18 +511,12 @@ module ClusterEntity + ) + } + @stonith = @_class == 'stonith' +- if @id and crm_dom +- crm_dom.elements.each("//resource[starts-with(@id, \"#{@id}:\")] | "\ +- + "//resource[@id=\"#{@id}\"]") { |e| +- @crm_status << CRMResourceStatus.new(e) +- } ++ if @id and rsc_status ++ @crm_status = rsc_status[@id] || [] + end + + @status = get_status +- +- if cib_dom +- load_operations(cib_dom) +- end ++ load_operations(operations) + end + end + +@@ -525,28 +548,26 @@ module ClusterEntity + return status + end + +- def load_operations(cib_dom) +- unless @id ++ def load_operations(operations) ++ @operations = [] ++ unless operations and @id and operations[@id] + return + end + +- @operations = [] + failed_ops = [] + message_list = [] +- cib_dom.elements.each("//lrm_resource[@id='#{@id}']/lrm_rsc_op | "\ +- + "//lrm_resource[starts-with(@id, \"#{@id}:\")]/lrm_rsc_op") { |e| +- operation = ResourceOperation.new(e) +- @operations << operation +- if operation.rc_code != 0 ++ operations[@id].each { |o| ++ @operations << o ++ if o.rc_code != 0 + # 7 == OCF_NOT_RUNNING == The resource is safely stopped. +- next if operation.operation == 'monitor' and operation.rc_code == 7 ++ next if o.operation == 'monitor' and o.rc_code == 7 + # 8 == OCF_RUNNING_MASTER == The resource is running in master mode. +- next if 8 == operation.rc_code +- failed_ops << operation +- message = "Failed to #{operation.operation} #{@id}" +- message += " on #{Time.at(operation.last_rc_change).asctime}" +- message += " on node #{operation.on_node}" if operation.on_node +- message += ": #{operation.exit_reason}" if operation.exit_reason ++ next if 8 == o.rc_code ++ failed_ops << o ++ message = "Failed to #{o.operation} #{@id}" ++ message += " on #{Time.at(o.last_rc_change).asctime}" ++ message += " on node #{o.on_node}" if o.on_node ++ message += ": #{o.exit_reason}" if o.exit_reason + message_list << { + :message => message + } +@@ -652,26 +673,48 @@ module ClusterEntity + class Group < Resource + attr_accessor :members + +- def initialize(group_cib_element=nil, crm_dom=nil, parent=nil, cib_dom=nil) +- super(group_cib_element, crm_dom, parent) ++ def initialize( ++ group_cib_element=nil, rsc_status=nil, parent=nil, operations=nil ++ ) ++ super(group_cib_element, nil, parent) + @class_type = 'group' + @members = [] + if group_cib_element and group_cib_element.name == 'group' + @status = ClusterEntity::ResourceStatus.new(:running) + group_cib_element.elements.each('primitive') { |e| +- p = Primitive.new(e, crm_dom, self, cib_dom) ++ p = Primitive.new(e, rsc_status, self, operations) + members << p +- @status = p.status if @status < p.status + } ++ update_status + end + end + + def update_status + @status = ClusterEntity::ResourceStatus.new(:running) ++ first = true + @members.each { |p| + p.update_status +- @status = p.status if @status < p.status ++ if first ++ first = false ++ next ++ end ++ if ( ++ p.status == ClusterEntity::ResourceStatus.new(:disabled) or ++ p.status == ClusterEntity::ResourceStatus.new(:blocked) or ++ p.status == ClusterEntity::ResourceStatus.new(:failed) ++ ) ++ @status = ClusterEntity::ResourceStatus.new(:partially_running) ++ end + } ++ if (@members and @members.length > 0 and ++ (ClusterEntity::ResourceStatus.new(:running) != @members[0].status and ++ ClusterEntity::ResourceStatus.new(:unknown) != @members[0].status) ++ ) ++ @status = @members[0].status ++ end ++ if disabled? ++ @status = ClusterEntity::ResourceStatus.new(:disabled) ++ end + end + + def to_status(version='1') +@@ -713,8 +756,9 @@ module ClusterEntity + class MultiInstance < Resource + attr_accessor :member, :unique, :managed, :failed, :failure_ignored + +- def initialize(resource_cib_element=nil, crm_dom=nil, parent=nil, cib_dom=nil) +- super(resource_cib_element, crm_dom, parent) ++ def initialize(resource_cib_element=nil, crm_dom=nil, rsc_status=nil, ++ parent=nil, operations=nil) ++ super(resource_cib_element, nil, parent) + @member = nil + @multi_state = false + @unique = false +@@ -730,15 +774,13 @@ module ClusterEntity + ) + member = resource_cib_element.elements['group | primitive'] + if member and member.name == 'group' +- @member = Group.new(member, crm_dom, self, cib_dom) ++ @member = Group.new(member, rsc_status, self, operations) + elsif member and member.name == 'primitive' +- @member = Primitive.new(member, crm_dom, self, cib_dom) +- end +- if @member +- @status = @member.status ++ @member = Primitive.new(member, rsc_status, self, operations) + end ++ update_status + if crm_dom +- status = crm_dom.elements["//clone[@id='#{@id}']"] ++ status = crm_dom.elements["/crm_mon/resources//clone[@id='#{@id}']"] + if status + @unique = status.attributes['unique'] == 'true' + @managed = status.attributes['managed'] == 'true' +@@ -754,6 +796,9 @@ module ClusterEntity + @member.update_status + @status = @member.status + end ++ if disabled? ++ @status = ClusterEntity::ResourceStatus.new(:disabled) ++ end + end + + def to_status(version='1') +@@ -776,8 +821,11 @@ module ClusterEntity + + class Clone < MultiInstance + +- def initialize(resource_cib_element=nil, crm_dom=nil, parent=nil, cib_dom=nil) +- super(resource_cib_element, crm_dom, parent, cib_dom) ++ def initialize( ++ resource_cib_element=nil, crm_dom=nil, rsc_status=nil, parent=nil, ++ operations=nil ++ ) ++ super(resource_cib_element, crm_dom, rsc_status, parent, operations) + @class_type = 'clone' + end + +@@ -808,11 +856,12 @@ module ClusterEntity + class MasterSlave < MultiInstance + attr_accessor :masters, :slaves + +- def initialize(master_cib_element=nil, crm_dom=nil, parent=nil, cib_dom=nil) +- super(master_cib_element, crm_dom, parent, cib_dom) ++ def initialize(master_cib_element=nil, crm_dom=nil, rsc_status=nil, parent=nil, operations=nil) ++ super(master_cib_element, crm_dom, rsc_status, parent, operations) + @class_type = 'master' + @masters = [] + @slaves = [] ++ update_status + if @member + if @member.instance_of?(Primitive) + primitive_list = [@member] +@@ -820,15 +869,15 @@ module ClusterEntity + primitive_list = @member.members + end + @masters, @slaves = get_masters_slaves(primitive_list) +- if @masters.empty? and !disabled? +- @status = ClusterEntity::ResourceStatus.new(:partially_running) ++ if (@masters.empty? and ++ @status != ClusterEntity::ResourceStatus.new(:disabled) ++ ) + @warning_list << { + :message => 'Resource is master/slave but has not been promoted '\ + + 'to master on any node.', + :type => 'no_master' + } + end +- @status = @member.status if @status < @member.status + end + end + +@@ -857,16 +906,21 @@ module ClusterEntity + def update_status + if @member + @member.update_status ++ @status = @member.status + if @member.instance_of?(Primitive) + primitive_list = [@member] + else + primitive_list = @member.members + end + @masters, @slaves = get_masters_slaves(primitive_list) +- if @masters.empty? and !disabled? ++ if (@masters.empty? and ++ @member.status != ClusterEntity::ResourceStatus.new(:disabled) ++ ) + @status = ClusterEntity::ResourceStatus.new(:partially_running) + end +- @status = @member.status if @status < @member.status ++ end ++ if disabled? ++ @status = ClusterEntity::ResourceStatus.new(:disabled) + end + end + +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index 87404ac..9a0d145 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -15,14 +15,14 @@ require 'resource.rb' + require 'cluster_entity.rb' + require 'auth.rb' + +-def getAllSettings(session) +- stdout, stderr, retval = run_cmd(session, PCS, "property") +- stdout.map(&:chomp!) +- stdout.map(&:strip!) ++def getAllSettings(session, cib_dom=nil) ++ unless cib_dom ++ cib_dom = get_cib_dom(session) ++ end + stdout2, stderr2, retval2 = run_cmd(session, PENGINE, "metadata") + metadata = stdout2.join + ret = {} +- if retval == 0 and retval2 == 0 ++ if cib_dom and retval2 == 0 + doc = REXML::Document.new(metadata) + + default = "" +@@ -37,8 +37,9 @@ def getAllSettings(session) + ret[name] = {"value" => default, "type" => el_type} + } + +- stdout.each {|line| +- key,val = line.split(': ', 2) ++ cib_dom.elements.each('/cib/configuration/crm_config//nvpair') { |e| ++ key = e.attributes['name'] ++ val = e.attributes['value'] + key.gsub!(/-/,"_") + if ret.has_key?(key) + if ret[key]["type"] == "boolean" +@@ -723,106 +724,92 @@ def get_cluster_name() + end + end + +-def get_node_attributes(session) +- stdout, stderr, retval = run_cmd(session, PCS, "property", "list") +- if retval != 0 +- return {} +- end +- +- attrs = {} +- found = false +- stdout.each { |line| +- if not found +- if line.strip.start_with?("Node Attributes:") +- found = true +- end +- next +- end +- if not line.start_with?(" ") +- break +- end +- sline = line.split(":", 2) +- nodename = sline[0].strip +- attrs[nodename] = [] +- sline[1].strip.split(" ").each { |attr| +- key, val = attr.split("=", 2) +- attrs[nodename] << {:key => key, :value => val} ++def get_node_attributes(session, cib_dom=nil) ++ unless cib_dom ++ cib_dom = get_cib_dom(session) ++ return {} unless cib_dom ++ end ++ node_attrs = {} ++ cib_dom.elements.each( ++ '/cib/configuration/nodes/node/instance_attributes/nvpair' ++ ) { |e| ++ node = e.parent.parent.attributes['uname'] ++ node_attrs[node] ||= [] ++ node_attrs[node] << { ++ :id => e.attributes['id'], ++ :key => e.attributes['name'], ++ :value => e.attributes['value'] + } + } +- return attrs ++ node_attrs.each { |_, val| val.sort_by! { |obj| obj[:key] }} ++ return node_attrs + end + +-def get_fence_levels(session) +- stdout, stderr, retval = run_cmd(session, PCS, "stonith", "level") +- if retval != 0 or stdout == "" +- return {} ++def get_fence_levels(session, cib_dom=nil) ++ unless cib_dom ++ cib_dom = get_cib_dom(session) ++ return {} unless cib_dom + end + + fence_levels = {} +- node = "" +- stdout.each {|line| +- if line.start_with?(" Node: ") +- node = line.split(":",2)[1].strip +- next +- end +- fence_levels[node] ||= [] +- md = / Level (\S+) - (.*)$/.match(line) +- fence_levels[node] << {"level" => md[1], "devices" => md[2]} ++ cib_dom.elements.each( ++ '/cib/configuration/fencing-topology/fencing-level' ++ ) { |e| ++ target = e.attributes['target'] ++ fence_levels[target] ||= [] ++ fence_levels[target] << { ++ 'level' => e.attributes['index'], ++ 'devices' => e.attributes['devices'] ++ } + } ++ fence_levels.each { |_, val| val.sort_by! { |obj| obj['level'].to_i }} + return fence_levels + end + +-def get_acls(session) +- stdout, stderr, retval = run_cmd(session, PCS, "acl", "show") +- if retval != 0 or stdout == "" +- return {} ++def get_acls(session, cib_dom=nil) ++ unless cib_dom ++ cib_dom = get_cib_dom(session) ++ return {} unless cib_dom + end + +- ret_val = {} +- state = nil +- user = "" +- role = "" +- +- stdout.each do |line| +- if m = /^User: (.*)$/.match(line) +- user = m[1] +- state = "user" +- ret_val[state] ||= {} +- ret_val[state][user] ||= [] +- next +- elsif m = /^Group: (.*)$/.match(line) +- user = m[1] +- state = "group" +- ret_val[state] ||= {} +- ret_val[state][user] ||= [] +- next +- elsif m = /^Role: (.*)$/.match(line) +- role = m[1] +- state = "role" +- ret_val[state] ||= {} +- ret_val[state][role] ||= {} +- next +- end ++ acls = { ++ 'role' => {}, ++ 'group' => {}, ++ 'user' => {}, ++ 'target' => {} ++ } + +- case state +- when "user", "group" +- m = /^ Roles: (.*)$/.match(line) +- ret_val[state][user] ||= [] +- m[1].scan(/\S+/).each {|urole| +- ret_val[state][user] << urole ++ cib_dom.elements.each('/cib/configuration/acls/*') { |e| ++ type = e.name[4..-1] ++ if e.name == 'acl_role' ++ role_id = e.attributes['id'] ++ desc = e.attributes['description'] ++ acls[type][role_id] = {} ++ acls[type][role_id]['description'] = desc ? desc : '' ++ acls[type][role_id]['permissions'] = [] ++ e.elements.each('acl_permission') { |p| ++ p_id = p.attributes['id'] ++ p_kind = p.attributes['kind'] ++ val = '' ++ if p.attributes['xpath'] ++ val = "xpath #{p.attributes['xpath']}" ++ elsif p.attributes['reference'] ++ val = "id #{p.attributes['reference']}" ++ else ++ next ++ end ++ acls[type][role_id]['permissions'] << "#{p_kind} #{val} (#{p_id})" ++ } ++ elsif ['acl_target', 'acl_group'].include?(e.name) ++ id = e.attributes['id'] ++ acls[type][id] = [] ++ e.elements.each('role') { |r| ++ acls[type][id] << r.attributes['id'] + } +- when "role" +- ret_val[state][role] ||= {} +- ret_val[state][role]["permissions"] ||= [] +- ret_val[state][role]["description"] ||= "" +- if m = /^ Description: (.*)$/.match(line) +- ret_val[state][role]["description"] = m[1] +- elsif m = /^ Permission: (.*)$/.match(line) +- ret_val[state][role]["permissions"] << m[1] +- end + end +- end +- return ret_val ++ } ++ acls['user'] = acls['target'] ++ return acls + end + + def enable_cluster(session) +@@ -1438,7 +1425,7 @@ def cluster_status_from_nodes(session, cluster_nodes, cluster_name) + {:version=>'2', :operations=>'1'}, + true, + nil, +- 6 ++ 15 + ) + node_map[node] = {} + node_map[node].update(overview) +@@ -1601,10 +1588,10 @@ def cluster_status_from_nodes(session, cluster_nodes, cluster_name) + } + if status[:status] != 'error' + status[:resource_list].each { |resource| +- if resource[:status] == 'failed' ++ if ['failed', 'blocked'].include?(resource[:status]) + status[:status] = 'error' + break +- elsif ['blocked', 'partially running'].include?(resource[:status]) ++ elsif ['partially running'].include?(resource[:status]) + status[:status] = 'warning' + end + } +@@ -1634,10 +1621,11 @@ def get_node_status(session, cib_dom) + :cluster_settings => {}, + :need_ring1_address => need_ring1_address?, + :is_cman_with_udpu_transport => is_cman_with_udpu_transport?, +- :acls => get_acls(session), ++ :acls => get_acls(session, cib_dom), + :username => session[:username], +- :fence_levels => get_fence_levels(session), +- :node_attr => node_attrs_to_v2(get_node_attributes(session)) ++ :fence_levels => get_fence_levels(session, cib_dom), ++ :node_attr => node_attrs_to_v2(get_node_attributes(session, cib_dom)), ++ :known_nodes => [] + } + + nodes = get_nodes_status() +@@ -1654,10 +1642,10 @@ def get_node_status(session, cib_dom) + + if cib_dom + node_status[:groups] = get_resource_groups(cib_dom) +- node_status[:constraints] = getAllConstraints(cib_dom.elements['//constraints']) ++ node_status[:constraints] = getAllConstraints(cib_dom.elements['/cib/configuration/constraints']) + end + +- cluster_settings = getAllSettings(session) ++ cluster_settings = getAllSettings(session, cib_dom) + if not cluster_settings.has_key?('error') + node_status[:cluster_settings] = cluster_settings + end +@@ -1670,7 +1658,7 @@ def get_resource_groups(cib_dom) + return [] + end + group_list = [] +- cib_dom.elements.each('cib/configuration/resources//group') do |e| ++ cib_dom.elements.each('/cib/configuration/resources//group') do |e| + group_list << e.attributes['id'] + end + return group_list +@@ -1682,49 +1670,54 @@ def get_resources(cib_dom, crm_dom=nil, get_operations=false) + end + + resource_list = [] +- cib = (get_operations) ? cib_dom : nil ++ operations = (get_operations) ? ClusterEntity::get_resources_operations(cib_dom) : nil ++ rsc_status = ClusterEntity::get_rsc_status(crm_dom) + +- cib_dom.elements.each('cib/configuration/resources/primitive') do |e| +- resource_list << ClusterEntity::Primitive.new(e, crm_dom, nil, cib) ++ cib_dom.elements.each('/cib/configuration/resources/primitive') do |e| ++ resource_list << ClusterEntity::Primitive.new(e, rsc_status, nil, operations) + end +- cib_dom.elements.each('cib/configuration/resources/group') do |e| +- resource_list << ClusterEntity::Group.new(e, crm_dom, nil, cib) ++ cib_dom.elements.each('/cib/configuration/resources/group') do |e| ++ resource_list << ClusterEntity::Group.new(e, rsc_status, nil, operations) + end +- cib_dom.elements.each('cib/configuration/resources/clone') do |e| +- resource_list << ClusterEntity::Clone.new(e, crm_dom, nil, cib) ++ cib_dom.elements.each('/cib/configuration/resources/clone') do |e| ++ resource_list << ClusterEntity::Clone.new( ++ e, crm_dom, rsc_status, nil, operations ++ ) + end +- cib_dom.elements.each('cib/configuration/resources/master') do |e| +- resource_list << ClusterEntity::MasterSlave.new(e, crm_dom, nil, cib) ++ cib_dom.elements.each('/cib/configuration/resources/master') do |e| ++ resource_list << ClusterEntity::MasterSlave.new( ++ e, crm_dom, rsc_status, nil, operations ++ ) + end + return resource_list + end + +-def get_resource_by_id(id, cib_dom, crm_dom=nil, get_operations=false) ++def get_resource_by_id(id, cib_dom, crm_dom=nil, rsc_status=nil, operations=false) + unless cib_dom + return nil + end + +- e = cib_dom.elements["cib/configuration/resources//*[@id='#{id}']"] ++ e = cib_dom.elements["/cib/configuration/resources//*[@id='#{id}']"] + unless e + return nil + end + + if e.parent.name != 'resources' # if resource is in group, clone or master/slave +- p = get_resource_by_id(e.parent.attributes['id'], cib_dom, crm_dom, get_operations) ++ p = get_resource_by_id( ++ e.parent.attributes['id'], cib_dom, crm_dom, rsc_status, operations ++ ) + return p.get_map[id.to_sym] + end + +- cib = (get_operations) ? cib_dom : nil +- + case e.name + when 'primitive' +- return ClusterEntity::Primitive.new(e, crm_dom, nil, cib) ++ return ClusterEntity::Primitive.new(e, rsc_status, nil, operations) + when 'group' +- return ClusterEntity::Group.new(e, crm_dom, nil, cib) ++ return ClusterEntity::Group.new(e, rsc_status, nil, operations) + when 'clone' +- return ClusterEntity::Clone.new(e, crm_dom, nil, cib) ++ return ClusterEntity::Clone.new(e, crm_dom, rsc_status, nil, operations) + when 'master' +- return ClusterEntity::MasterSlave.new(e, crm_dom, nil, cib) ++ return ClusterEntity::MasterSlave.new(e, crm_dom, rsc_status, nil, operations) + else + return nil + end +@@ -1762,7 +1755,7 @@ def node_attrs_to_v2(node_attrs) + all_nodes_attr[node] = [] + attrs.each { |attr| + all_nodes_attr[node] << { +- :id => nil, ++ :id => attr[:id], + :name => attr[:key], + :value => attr[:value] + } +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index 5fec386..bbeed55 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -75,9 +75,9 @@ Pcs = Ember.Application.createWithMixins({ + timeout: 20000, + success: function(data) { + Pcs.clusterController.update(data); +- Ember.run.next(function() { +- correct_visibility_dashboard(Pcs.clusterController.cur_cluster); +- }); ++ if (Pcs.clusterController.get('cur_cluster')) { ++ Pcs.clusterController.update_cur_cluster(Pcs.clusterController.get('cur_cluster').get('name')); ++ } + if (data["not_current_data"]) { + self.update(); + } +@@ -595,30 +595,20 @@ Pcs.ResourceObj = Ember.Object.extend({ + }.property("class_type"), + res_type: Ember.computed.alias('resource_type'), + status_icon: function() { +- var icon_class; +- switch (this.get('status')) { +- case "running": +- icon_class = "check"; +- break; +- case "disabled": +- case "partially running": +- icon_class = "warning"; +- break; +- case "failed": +- case "blocked": +- icon_class = "error"; +- break; +- default: +- icon_class = "x"; +- } ++ var icon_class = get_status_icon_class(this.get("status_val")); + return "
"; + }.property("status_val"), + status_val: function() { +- if (this.get('warning_list').length) +- return get_status_value("warning"); ++ var status_val = get_status_value(this.get('status')); ++ if (this.get('warning_list').length && status_val != get_status_value('disabled')) ++ status_val = get_status_value("warning"); + if (this.get('error_list').length) +- return get_status_value("error"); +- return get_status_value(this.status); ++ status_val = get_status_value("error"); ++ if ((get_status_value(this.get('status')) - status_val) < 0) { ++ return get_status_value(this.get('status')); ++ } else { ++ return status_val; ++ } + }.property('status', 'error_list.@each.message', 'warning_list.@each.message'), + status_color: function() { + return get_status_color(this.get("status_val")); +@@ -996,12 +986,17 @@ Pcs.Clusternode = Ember.Object.extend({ + return this.get('status') == "unknown"; + }.property("status"), + status_val: function() { +- if (this.warnings && this.warnings.length) +- return get_status_value("warning"); +- if (this.errors && this.errors.length) +- return get_status_value("error"); +- return get_status_value(this.status); +- }.property("status"), ++ var status_val = get_status_value(this.get('status')); ++ if (this.get('warning_list').length) ++ status_val = get_status_value("warning"); ++ if (this.get('error_list').length) ++ status_val = get_status_value("error"); ++ if ((get_status_value(this.get('status')) - status_val) < 0) { ++ return get_status_value(this.get('status')); ++ } else { ++ return status_val; ++ } ++ }.property('status', 'error_list.@each.message', 'warning_list.@each.message'), + status_style: function() { + var color = get_status_color(this.get("status_val")); + return "color: " + color + ((color != "green")? "; font-weight: bold;" : ""); +@@ -1011,8 +1006,8 @@ Pcs.Clusternode = Ember.Object.extend({ + return ((this.get("status_val") == get_status_value("ok") || this.status == "standby") ? show + "default-hidden" : ""); + }.property("status_val"), + status_icon: function() { +- var icon_class = {"-1": "x", 1: "error", 2: "warning", 3: "x", 4: "check"}; +- return "
"; ++ var icon_class = get_status_icon_class(this.get("status_val")); ++ return "
"; + }.property("status_val"), + error_list: [], + warning_list: [], +@@ -1158,18 +1153,18 @@ Pcs.Cluster = Ember.Object.extend({ + return out; + }.property("error_list"), + status_icon: function() { +- var icon_class = {"-1": "x", 1: "error", 2: "warning", 3: "x", 4: "check"}; +- return "
"; ++ var icon_class = get_status_icon_class(get_status_value(this.get('status'))); ++ return "
"; + }.property("status"), + quorum_show: function() { +- if (this.status == "unknown") { ++ if (this.get('status') == "unknown") { + return "(quorate unknown)" +- } else if (!this.quorate) { ++ } else if (!this.get('quorate')) { + return "(doesn't have quorum)" + } else { + return "" + } +- }.property("status", "quorum"), ++ }.property("status", "quorate"), + nodes: [], + nodes_failed: 0, + resource_list: [], +@@ -1270,7 +1265,7 @@ Pcs.Cluster = Ember.Object.extend({ + + Pcs.clusterController = Ember.Object.create({ + cluster_list: Ember.ArrayController.create({ +- content: Ember.A(), sortProperties: ['status'], ++ content: Ember.A(), sortProperties: ['status', 'name'], + sortAscending: true, + sortFunction: function(a,b){return status_comparator(a,b);} + }), +@@ -1283,26 +1278,25 @@ Pcs.clusterController = Ember.Object.create({ + num_warning: 0, + num_unknown: 0, + +- update_cur_cluster: function(row) { ++ update_cur_cluster: function(cluster_name) { + var self = this; +- var cluster_name = $(row).attr("nodeID"); +- $("#clusters_list").find("div.arrow").hide(); +- $(row).find("div.arrow").show(); ++ $("#clusters_list div.arrow").hide(); ++ var selected_cluster = null; + + $.each(self.get('cluster_list').get('content'), function(key, cluster) { + if (cluster.get("name") == cluster_name) { +- self.set('cur_cluster', cluster); ++ selected_cluster = cluster; + return false; + } + }); +- correct_visibility_dashboard(self.get('cur_cluster')); + +- $("#node_sub_info").children().each(function (i, val) { +- if ($(val).attr("id") == ("cluster_info_" + cluster_name)) +- $(val).show(); +- else +- $(val).hide(); +- }); ++ self.set('cur_cluster', selected_cluster); ++ if (selected_cluster) { ++ Ember.run.next(function() { ++ $("#clusters_list tr[nodeID=" + cluster_name + "] div.arrow").show(); ++ correct_visibility_dashboard(self.get('cur_cluster')); ++ }); ++ } + }, + + update: function(data) { +@@ -1355,21 +1349,6 @@ Pcs.clusterController = Ember.Object.create({ + }); + } + +- switch (cluster.get('status')) { +- case "ok": +- self.incrementProperty('num_ok'); +- break; +- case "error": +- self.incrementProperty('num_error'); +- break; +- case "warning": +- self.incrementProperty('num_warning'); +- break; +- default: +- self.incrementProperty('num_unknown'); +- break; +- } +- + var nodes_to_auth = []; + $.each(cluster.get('warning_list'), function(key, val){ + if (val.hasOwnProperty("type") && val.type == "nodes_not_authorized"){ +@@ -1398,6 +1377,21 @@ Pcs.clusterController = Ember.Object.create({ + + cluster.set("status", "unknown"); + } ++ ++ switch (get_status_value(cluster.get('status'))) { ++ case get_status_value("ok"): ++ self.incrementProperty('num_ok'); ++ break; ++ case get_status_value("error"): ++ self.incrementProperty('num_error'); ++ break; ++ case get_status_value("warning"): ++ self.incrementProperty('num_warning'); ++ break; ++ default: ++ self.incrementProperty('num_unknown'); ++ break; ++ } + }); + + var to_remove = []; +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index e4830a9..cddf14e 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -1850,10 +1850,10 @@ function get_status_value(status) { + standby: 2, + "partially running": 2, + disabled: 3, +- unknown: 3, +- ok: 4, +- running: 4, +- online: 4 ++ unknown: 4, ++ ok: 5, ++ running: 5, ++ online: 5 + }; + return ((values.hasOwnProperty(status)) ? values[status] : -1); + } +@@ -1866,11 +1866,25 @@ function status_comparator(a,b) { + return valA - valB; + } + ++function get_status_icon_class(status_val) { ++ switch (status_val) { ++ case get_status_value("error"): ++ return "error"; ++ case get_status_value("disabled"): ++ case get_status_value("warning"): ++ return "warning"; ++ case get_status_value("ok"): ++ return "check"; ++ default: ++ return "x"; ++ } ++} ++ + function get_status_color(status_val) { + if (status_val == get_status_value("ok")) { + return "green"; + } +- else if (status_val == get_status_value("warning") || status_val == get_status_value("unknown")) { ++ else if (status_val == get_status_value("warning") || status_val == get_status_value("unknown") || status_val == get_status_value('disabled')) { + return "orange"; + } + return "red"; +diff --git a/pcsd/views/_cluster_list.erb b/pcsd/views/_cluster_list.erb +index 9d719e0..90f084e 100644 +--- a/pcsd/views/_cluster_list.erb ++++ b/pcsd/views/_cluster_list.erb +@@ -22,7 +22,7 @@ + {{/if}} + + {{#each Pcs.clusterController.cluster_list }} +- ++ + + + +@@ -42,7 +42,7 @@ + {{else}} + {{nodes.length}} + {{#if nodes_failed}} +- |
{{nodes_failed}}
++ |
{{nodes_failed}}
+ {{/if}} + {{/if}} + +@@ -52,7 +52,7 @@ + {{else}} + {{resource_list.length}} + {{#if resources_failed}} +- |
{{resources_failed}}
++ |
{{resources_failed}}
+ {{/if}} + {{/if}} + +diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb +index bb4e989..b24c74a 100644 +--- a/pcsd/views/main.erb ++++ b/pcsd/views/main.erb +@@ -151,7 +151,7 @@ + + + +-
++ {{{resource.status_icon}}} + + {{{resource.show_status}}} + +diff --git a/pcsd/views/manage.erb b/pcsd/views/manage.erb +index 79a8637..3620779 100644 +--- a/pcsd/views/manage.erb ++++ b/pcsd/views/manage.erb +@@ -42,131 +42,132 @@ +
INFORMATION ABOUT CLUSTERS
+ +
+-
Select a cluster to view more detailed cluster information
+- {{#each Pcs.clusterController.cluster_list}} +- ++ {{else}} ++
Select a cluster to view more detailed cluster information
++ {{/if}} +
+ + +-- +1.9.1 + diff --git a/SOURCES/bz1158569-01-fixed-a-typo-in-an-error-message.patch b/SOURCES/bz1158569-01-fixed-a-typo-in-an-error-message.patch new file mode 100644 index 0000000..3389bd3 --- /dev/null +++ b/SOURCES/bz1158569-01-fixed-a-typo-in-an-error-message.patch @@ -0,0 +1,25 @@ +From 1307ccbf977dd4ca797a82312631afae03530fbb Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Tue, 8 Sep 2015 09:19:10 +0200 +Subject: [PATCH] fixed a typo in an error message + +--- + pcsd/remote.rb | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index 06947ec..8a71000 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -2127,7 +2127,7 @@ def fix_auth_of_cluster(params, request, session) + tokens_data, true + ) + if retval == 404 +- return [400, "Old version of PCS/PCSD is runnig on cluster nodes. Fixing authentication is not supported. Use 'pcs cluster auth' command to authenticate the nodes."] ++ return [400, "Old version of PCS/PCSD is running on cluster nodes. Fixing authentication is not supported. Use 'pcs cluster auth' command to authenticate the nodes."] + elsif retval != 200 + return [400, "Authentication failed."] + end +-- +1.9.1 + diff --git a/SOURCES/bz1158569-02-fix-authentication-in-web-UI.patch b/SOURCES/bz1158569-02-fix-authentication-in-web-UI.patch new file mode 100644 index 0000000..06f1040 --- /dev/null +++ b/SOURCES/bz1158569-02-fix-authentication-in-web-UI.patch @@ -0,0 +1,125 @@ +From 0b12b5e6212b42a3128d30dbce9371ac361dd865 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Tue, 15 Sep 2015 16:30:23 +0200 +Subject: [PATCH] fix authentication in web UI + +--- + pcsd/public/js/pcsd.js | 10 ++++---- + pcsd/remote.rb | 62 +++++++++++++++++++++++++++++++------------------- + 2 files changed, 45 insertions(+), 27 deletions(-) + +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index 197cdd1..e4830a9 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -719,7 +719,7 @@ function auth_nodes(dialog) { + $("#auth_failed_error_msg").hide(); + $.ajax({ + type: 'POST', +- url: '/remote/auth_nodes', ++ url: '/remote/auth_gui_against_nodes', + data: dialog.find("#auth_nodes_form").serialize(), + timeout: pcs_timeout, + success: function (data) { +@@ -735,9 +735,11 @@ function auth_nodes(dialog) { + function auth_nodes_dialog_update(dialog_obj, data) { + var unauth_nodes = []; + var node; +- for (node in data) { +- if (data[node] != 0) { +- unauth_nodes.push(node); ++ if (data['node_auth_error']) { ++ for (node in data['node_auth_error']) { ++ if (data['node_auth_error'][node] != 0) { ++ unauth_nodes.push(node); ++ } + } + } + +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index 8a71000..e65c8ac 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -60,7 +60,7 @@ def remote(params, request, session) + :cluster_destroy => method(:cluster_destroy), + :get_wizard => method(:get_wizard), + :wizard_submit => method(:wizard_submit), +- :auth_nodes => method(:auth_nodes), ++ :auth_gui_against_nodes => method(:auth_gui_against_nodes), + :get_tokens => method(:get_tokens), + :get_cluster_tokens => method(:get_cluster_tokens), + :save_tokens => method(:save_tokens), +@@ -1994,32 +1994,48 @@ def wizard_submit(params, request, session) + + end + +-def auth_nodes(params, request, session) +- retval = {} +- params.each{|node| +- if node[0].end_with?"-pass" and node[0].length > 5 +- nodename = node[0][0..-6] +- if params.has_key?("all") +- pass = params["pass-all"] +- else +- pass = node[1] +- end +- result, sync_successful, _, _ = pcs_auth( +- session, [nodename], SUPERUSER, pass, true, true +- ) +- if not sync_successful +- retval[nodename] = 1 +- else +- node_status = result[nodename]['status'] +- if 'ok' == node_status or 'already_authorized' == node_status +- retval[nodename] = 0 ++def auth_gui_against_nodes(params, request, session) ++ node_auth_error = {} ++ new_tokens = {} ++ threads = [] ++ params.each { |node| ++ threads << Thread.new { ++ if node[0].end_with?("-pass") and node[0].length > 5 ++ nodename = node[0][0..-6] ++ if params.has_key?("all") ++ pass = params["pass-all"] + else +- retval[nodename] = 1 ++ pass = node[1] ++ end ++ data = { ++ 'node-0' => nodename, ++ 'username' => SUPERUSER, ++ 'password' => pass, ++ 'force' => 1, ++ } ++ node_auth_error[nodename] = 1 ++ code, response = send_request(session, nodename, 'auth', true, data) ++ if 200 == code ++ token = response.strip ++ if not token.empty? ++ new_tokens[nodename] = token ++ node_auth_error[nodename] = 0 ++ end + end + end +- end ++ } + } +- return [200, JSON.generate(retval)] ++ threads.each { |t| t.join } ++ ++ if not new_tokens.empty? ++ cluster_nodes = get_corosync_nodes() ++ tokens_cfg = Cfgsync::PcsdTokens.from_file('') ++ sync_successful, sync_responses = Cfgsync::save_sync_new_tokens( ++ tokens_cfg, new_tokens, cluster_nodes, $cluster_name ++ ) ++ end ++ ++ return [200, JSON.generate({'node_auth_error' => node_auth_error})] + end + + # not used anymore, left here for backward compatability reasons +-- +1.9.1 + diff --git a/SOURCES/bz1158571-01-web-UI-mark-unsaved-permissions-forms.patch b/SOURCES/bz1158571-01-web-UI-mark-unsaved-permissions-forms.patch new file mode 100644 index 0000000..0d9637f --- /dev/null +++ b/SOURCES/bz1158571-01-web-UI-mark-unsaved-permissions-forms.patch @@ -0,0 +1,130 @@ +From 5c62afc314bfbff55e36c0f7f8e9aec0cc9246c4 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Wed, 2 Sep 2015 14:04:55 +0200 +Subject: [PATCH] web UI: mark unsaved permissions forms + +--- + pcsd/public/js/pcsd.js | 36 ++++++++++++++++++++++++++++++++++++ + pcsd/views/_permissions_cluster.erb | 5 ++++- + pcsd/views/permissions.erb | 8 +++++++- + 3 files changed, 47 insertions(+), 2 deletions(-) + +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index 2c71e6b..879b533 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -2205,6 +2205,7 @@ function permissions_load_cluster(cluster_name, callback) { + $("#" + element_id + " :checkbox").each(function(key, checkbox) { + permissions_fix_dependent_checkboxes(checkbox); + }); ++ permissions_cluster_dirty_flag(cluster_name, false); + if (callback) { + callback(); + } +@@ -2259,8 +2260,27 @@ function permissions_save_cluster(form) { + }); + } + ++function permissions_cluster_dirty_flag(cluster_name, flag) { ++ var cluster_row = permissions_get_cluster_row(cluster_name); ++ if (cluster_row) { ++ var dirty_elem = cluster_row.find("span[class=unsaved_changes]"); ++ if (dirty_elem) { ++ if (flag) { ++ dirty_elem.show(); ++ } ++ else { ++ dirty_elem.hide(); ++ } ++ } ++ } ++} ++ + function permission_remove_row(button) { ++ var cluster_name = permissions_get_clustername( ++ $(button).parents("form").first() ++ ); + $(button).parent().parent().remove(); ++ permissions_cluster_dirty_flag(cluster_name, true); + } + + function permissions_add_row(template_row) { +@@ -2268,6 +2288,9 @@ function permissions_add_row(template_row) { + var user_type = permissions_get_row_type(template_row); + var max_key = -1; + var exists = false; ++ var cluster_name = permissions_get_clustername( ++ $(template_row).parents("form").first() ++ ); + + if("" == user_name) { + alert("Please enter the name"); +@@ -2326,6 +2349,8 @@ function permissions_add_row(template_row) { + template_inputs.removeAttr("checked").removeAttr("selected"); + template_inputs.removeAttr("disabled").removeAttr("readonly"); + $(template_row).find(":input[type=text]").val(""); ++ ++ permissions_cluster_dirty_flag(cluster_name, true); + } + + function permissions_get_dependent_checkboxes(checkbox) { +@@ -2400,3 +2425,14 @@ function permissions_get_checkbox_permission(checkbox) { + return ""; + } + ++function permissions_get_cluster_row(cluster_name) { ++ var cluster_row = null; ++ $('#cluster_list td[class=node_name]').each(function(index, elem) { ++ var jq_elem = $(elem); ++ if (jq_elem.text().trim() == cluster_name.trim()) { ++ cluster_row = jq_elem.parents("tr").first(); ++ } ++ }); ++ return cluster_row; ++} ++ +diff --git a/pcsd/views/_permissions_cluster.erb b/pcsd/views/_permissions_cluster.erb +index 232a5de..4048366 100644 +--- a/pcsd/views/_permissions_cluster.erb ++++ b/pcsd/views/_permissions_cluster.erb +@@ -58,7 +58,10 @@ + <% if user['allow'].include?(perm['code']) %> + checked="checked" + <% end %> +- onchange="permissions_fix_dependent_checkboxes(this);" ++ onchange=" ++ permissions_fix_dependent_checkboxes(this); ++ permissions_cluster_dirty_flag('<%= h(@cluster_name) %>', true); ++ " + > + + <% } %> +diff --git a/pcsd/views/permissions.erb b/pcsd/views/permissions.erb +index b02d9d3..1e38d7e 100644 +--- a/pcsd/views/permissions.erb ++++ b/pcsd/views/permissions.erb +@@ -16,7 +16,8 @@ + + + +- ++ ++ + + + <% @clusters.each do |c| %> +@@ -28,6 +29,11 @@ + ++ + +-- +1.9.1 + diff --git a/SOURCES/bz1158571-02-check-and-refresh-user-auth-info-upon-each-request.patch b/SOURCES/bz1158571-02-check-and-refresh-user-auth-info-upon-each-request.patch new file mode 100644 index 0000000..569ebd8 --- /dev/null +++ b/SOURCES/bz1158571-02-check-and-refresh-user-auth-info-upon-each-request.patch @@ -0,0 +1,89 @@ +From 00ef3951514889791a11318124c271309d8b4958 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Fri, 4 Sep 2015 16:01:00 +0200 +Subject: [PATCH] check and refresh user auth info upon each request + +--- + pcs/cluster.py | 2 ++ + pcs/utils.py | 2 ++ + pcsd/auth.rb | 16 ++++++++++++---- + pcsd/test/test_auth.rb | 1 + + 4 files changed, 17 insertions(+), 4 deletions(-) + +diff --git a/pcs/cluster.py b/pcs/cluster.py +index d2a80a8..5a2128a 100644 +--- a/pcs/cluster.py ++++ b/pcs/cluster.py +@@ -235,6 +235,8 @@ def auth_nodes_do(nodes, username, password, force, local): + 'local': local, + } + output, retval = utils.run_pcsdcli('auth', pcsd_data) ++ if retval == 0 and output['status'] == 'access_denied': ++ utils.err('Access denied') + if retval == 0 and output['status'] == 'ok' and output['data']: + failed = False + try: +diff --git a/pcs/utils.py b/pcs/utils.py +index c91b50e..757c159 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -803,6 +803,8 @@ def call_local_pcsd(argv, interactive_auth=False, std_in=None): + return [['Unable to communicate with pcsd'], 1, '', ''] + if output_json['status'] == 'bad_command': + return [['Command not allowed'], 1, '', ''] ++ if output_json['status'] == 'access_denied': ++ return [['Access denied'], 1, '', ''] + if output_json['status'] != "ok" or not output_json["data"]: + return [['Unable to communicate with pcsd'], 1, '', ''] + try: +diff --git a/pcsd/auth.rb b/pcsd/auth.rb +index 22d7868..53712ed 100644 +--- a/pcsd/auth.rb ++++ b/pcsd/auth.rb +@@ -19,7 +19,7 @@ class PCSAuth + + def self.validUser(username, password, generate_token = false) + $logger.info("Attempting login by '#{username}'") +- if not Rpam.auth(username,password, :service => "pcsd") ++ if not Rpam.auth(username, password, :service => "pcsd") + $logger.info("Failed login by '#{username}' (bad username or password)") + return nil + end +@@ -59,7 +59,7 @@ class PCSAuth + return [true, stdout.join(' ').split(nil)] + end + +- def self.isUserAllowedToLogin(username) ++ def self.isUserAllowedToLogin(username, log_success=true) + success, groups = getUsersGroups(username) + if not success + $logger.info( +@@ -73,7 +73,9 @@ class PCSAuth + ) + return false + end +- $logger.info("Successful login by '#{username}'") ++ if log_success ++ $logger.info("Successful login by '#{username}'") ++ end + return true + end + +@@ -131,7 +133,13 @@ class PCSAuth + end + + def self.isLoggedIn(session) +- return session[:username] != nil ++ username = session[:username] ++ if (username != nil) and isUserAllowedToLogin(username, false) ++ success, groups = getUsersGroups(username) ++ session[:usergroups] = success ? groups : [] ++ return true ++ end ++ return false + end + + def self.getSuperuserSession() +-- +1.9.1 + diff --git a/SOURCES/bz1158571-03-fix-checking-user-s-group-membership.patch b/SOURCES/bz1158571-03-fix-checking-user-s-group-membership.patch new file mode 100644 index 0000000..8815b8f --- /dev/null +++ b/SOURCES/bz1158571-03-fix-checking-user-s-group-membership.patch @@ -0,0 +1,26 @@ +From 25a4636078b869779cc6adfac3368a9fc382496d Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Mon, 7 Sep 2015 16:42:02 +0200 +Subject: [PATCH] fix checking user's group membership + +--- + pcsd/pcsd.rb | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb +index da47fb2..9a07ee8 100644 +--- a/pcsd/pcsd.rb ++++ b/pcsd/pcsd.rb +@@ -120,8 +120,7 @@ $thread_cfgsync = Thread.new { + + helpers do + def protected! +- PCSAuth.loginByToken(session, cookies) if not PCSAuth.isLoggedIn(session) +- if not PCSAuth.isLoggedIn(session) ++ if not PCSAuth.loginByToken(session, cookies) and not PCSAuth.isLoggedIn(session) + # If we're on /managec//main we redirect + match_expr = "/managec/(.*)/(.*)" + mymatch = request.path.match(match_expr) +-- +1.9.1 + diff --git a/SOURCES/bz1158577-01-improve-logging-in-pcsd.patch b/SOURCES/bz1158577-01-improve-logging-in-pcsd.patch new file mode 100644 index 0000000..0ef49d8 --- /dev/null +++ b/SOURCES/bz1158577-01-improve-logging-in-pcsd.patch @@ -0,0 +1,24 @@ +From df10fbfd2673523f4cadac4be64cdf97ec9aba6c Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Wed, 12 Aug 2015 15:47:09 +0200 +Subject: [PATCH] improve logging in pcsd + +--- + pcsd/pcs.rb | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index 6c7661a..1cddca8 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -359,6 +359,7 @@ def send_request_with_token(session, node, request, post=false, data={}, remote= + token = additional_tokens[node] || get_node_token(node) + $logger.info "SRWT Node: #{node} Request: #{request}" + if not token ++ $logger.error "Unable to connect to node #{node}, no token available" + return 400,'{"notoken":true}' + end + cookies_data = { +-- +1.9.1 + diff --git a/SOURCES/bz1158577-02-fix-certificates-syncing.patch b/SOURCES/bz1158577-02-fix-certificates-syncing.patch new file mode 100644 index 0000000..21faec1 --- /dev/null +++ b/SOURCES/bz1158577-02-fix-certificates-syncing.patch @@ -0,0 +1,554 @@ +From 8363f06e73bba0a1d3f7d18cf5b1cde5b5080141 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Thu, 27 Aug 2015 14:29:21 +0200 +Subject: [PATCH] fix certificates syncing + +--- + pcs/cluster.py | 16 +++--- + pcs/pcsd.py | 107 ++++++++++++++++++++++++++-------------- + pcs/utils.py | 29 +++++++++++ + pcsd/pcs.rb | 153 ++++++++++++++++++++++++++++++++++++++++++++++++++++----- + pcsd/pcsd.rb | 12 ++++- + pcsd/remote.rb | 12 +++-- + pcsd/ssl.rb | 26 ++++++++-- + 7 files changed, 292 insertions(+), 63 deletions(-) + +diff --git a/pcs/cluster.py b/pcs/cluster.py +index c982ffe..d2a80a8 100644 +--- a/pcs/cluster.py ++++ b/pcs/cluster.py +@@ -345,13 +345,13 @@ def corosync_setup(argv,returnConfig=False): + sync_start(argv, primary_nodes) + if "--enable" in utils.pcs_options: + enable_cluster(primary_nodes) +- pcsd.pcsd_sync_certs([]) ++ pcsd.pcsd_sync_certs([], exit_after_error=False) + return + elif not returnConfig and not "--local" in utils.pcs_options:# and fedora_config: + sync(argv, primary_nodes) + if "--enable" in utils.pcs_options: + enable_cluster(primary_nodes) +- pcsd.pcsd_sync_certs([]) ++ pcsd.pcsd_sync_certs([], exit_after_error=False) + return + else: + nodes = argv[1:] +@@ -1190,15 +1190,17 @@ def cluster_node(argv): + + utils.setCorosyncConfig(node0, corosync_conf) + if "--enable" in utils.pcs_options: +- utils.enableCluster(node0) ++ retval, err = utils.enableCluster(node0) ++ if retval != 0: ++ print("Warning: enable cluster - {0}".format(err)) + if "--start" in utils.pcs_options or utils.is_rhel6(): + # always start new node on cman cluster + # otherwise it will get fenced +- utils.startCluster(node0) ++ retval, err = utils.startCluster(node0) ++ if retval != 0: ++ print("Warning: start cluster - {0}".format(err)) + +- pcsd_data = {'nodes': [node0]} +- utils.run_pcsdcli('send_local_certs', pcsd_data) +- utils.run_pcsdcli('pcsd_restart_nodes', pcsd_data) ++ pcsd.pcsd_sync_certs([node0], exit_after_error=False) + else: + utils.err("Unable to update any nodes") + output, retval = utils.reloadCorosync() +diff --git a/pcs/pcsd.py b/pcs/pcsd.py +index 6002c1a..b1b6be6 100644 +--- a/pcs/pcsd.py ++++ b/pcs/pcsd.py +@@ -36,14 +36,15 @@ def pcsd_certkey(argv): + try: + with open(certfile, 'r') as myfile: + cert = myfile.read() +- except IOError as e: +- utils.err(e) +- +- try: + with open(keyfile, 'r') as myfile: + key = myfile.read() + except IOError as e: + utils.err(e) ++ errors = utils.verify_cert_key_pair(cert, key) ++ if errors: ++ for err in errors: ++ utils.err(err, False) ++ sys.exit(1) + + if not "--force" in utils.pcs_options and (os.path.exists(settings.pcsd_cert_location) or os.path.exists(settings.pcsd_key_location)): + utils.err("certificate and/or key already exists, your must use --force to overwrite") +@@ -70,39 +71,71 @@ def pcsd_certkey(argv): + + print "Certificate and key updated, you may need to restart pcsd (service pcsd restart) for new settings to take effect" + +-def pcsd_sync_certs(argv): +- nodes = utils.getNodesFromCorosyncConf() +- pcsd_data = {'nodes': nodes} +- commands = [ +- { +- "command": "send_local_certs", +- "message": "Synchronizing pcsd certificates on nodes {0}.".format( +- ", ".join(nodes) +- ), +- }, +- { +- "command": "pcsd_restart_nodes", +- "message": "Restaring pcsd on the nodes in order to reload " +- + "the certificates." +- , +- }, +- ] +- for cmd in commands: +- error = '' +- print cmd["message"] +- output, retval = utils.run_pcsdcli(cmd["command"], pcsd_data) +- if retval == 0 and output['status'] == 'ok' and output['data']: +- try: +- if output['data']['status'] != 'ok' and output['data']['text']: +- error = output['data']['text'] +- except KeyError: +- error = 'Unable to communicate with pcsd' +- else: +- error = 'Unable to sync pcsd certificates' +- if error: +- # restart pcsd even if sync failed in order to reload +- # the certificates on nodes where it succeded +- utils.err(error, False) ++def pcsd_sync_certs(argv, exit_after_error=True): ++ error = False ++ nodes_sync = argv if argv else utils.getNodesFromCorosyncConf() ++ nodes_restart = [] ++ ++ print("Synchronizing pcsd certificates on nodes {0}...".format( ++ ", ".join(nodes_sync) ++ )) ++ pcsd_data = { ++ "nodes": nodes_sync, ++ } ++ output, retval = utils.run_pcsdcli("send_local_certs", pcsd_data) ++ if retval == 0 and output["status"] == "ok" and output["data"]: ++ try: ++ sync_result = output["data"] ++ if sync_result["node_status"]: ++ for node, status in sync_result["node_status"].items(): ++ print("{0}: {1}".format(node, status["text"])) ++ if status["status"] == "ok": ++ nodes_restart.append(node) ++ else: ++ error = True ++ if sync_result["status"] != "ok": ++ error = True ++ utils.err(sync_result["text"], False) ++ if error and not nodes_restart: ++ if exit_after_error: ++ sys.exit(1) ++ else: ++ return ++ print ++ except (KeyError, AttributeError): ++ utils.err("Unable to communicate with pcsd", exit_after_error) ++ return ++ else: ++ utils.err("Unable to sync pcsd certificates", exit_after_error) ++ return ++ ++ print("Restaring pcsd on the nodes in order to reload the certificates...") ++ pcsd_data = { ++ "nodes": nodes_restart, ++ } ++ output, retval = utils.run_pcsdcli("pcsd_restart_nodes", pcsd_data) ++ if retval == 0 and output["status"] == "ok" and output["data"]: ++ try: ++ restart_result = output["data"] ++ if restart_result["node_status"]: ++ for node, status in restart_result["node_status"].items(): ++ print("{0}: {1}".format(node, status["text"])) ++ if status["status"] != "ok": ++ error = True ++ if restart_result["status"] != "ok": ++ error = True ++ utils.err(restart_result["text"], False) ++ if error: ++ if exit_after_error: ++ sys.exit(1) ++ else: ++ return ++ except (KeyError, AttributeError): ++ utils.err("Unable to communicate with pcsd", exit_after_error) ++ return ++ else: ++ utils.err("Unable to restart pcsd", exit_after_error) ++ return + + def pcsd_clear_auth(argv): + output = [] +diff --git a/pcs/utils.py b/pcs/utils.py +index 761723b..c91b50e 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -1880,6 +1880,35 @@ def is_iso8601_date(var): + output, retVal = run(["iso8601", "-d", var]) + return retVal == 0 + ++def verify_cert_key_pair(cert, key): ++ errors = [] ++ cert_modulus = "" ++ key_modulus = "" ++ ++ output, retval = run( ++ ["/usr/bin/openssl", "x509", "-modulus", "-noout"], ++ string_for_stdin=cert ++ ) ++ if retval != 0: ++ errors.append("Invalid certificate: {0}".format(output.strip())) ++ else: ++ cert_modulus = output.strip() ++ ++ output, retval = run( ++ ["/usr/bin/openssl", "rsa", "-modulus", "-noout"], ++ string_for_stdin=key ++ ) ++ if retval != 0: ++ errors.append("Invalid key: {0}".format(output.strip())) ++ else: ++ key_modulus = output.strip() ++ ++ if not errors and cert_modulus and key_modulus: ++ if cert_modulus != key_modulus: ++ errors.append("Certificate does not match the key") ++ ++ return errors ++ + # Does pacemaker consider a variable as true in cib? + # See crm_is_true in pacemaker/lib/common/utils.c + def is_cib_true(var): +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index 1cddca8..37f6b83 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -1215,29 +1215,84 @@ def send_local_configs_to_nodes( + end + + def send_local_certs_to_nodes(session, nodes) +- data = { +- 'ssl_cert' => File.read(CRT_FILE), +- 'ssl_key' => File.read(KEY_FILE), +- 'cookie_secret' => File.read(COOKIE_FILE), +- } ++ begin ++ data = { ++ 'ssl_cert' => File.read(CRT_FILE), ++ 'ssl_key' => File.read(KEY_FILE), ++ 'cookie_secret' => File.read(COOKIE_FILE), ++ } ++ rescue => e ++ return { ++ 'status' => 'error', ++ 'text' => "Unable to read certificates: #{e}", ++ 'node_status' => {}, ++ } ++ end ++ ++ crt_errors = verify_cert_key_pair(data['ssl_cert'], data['ssl_key']) ++ if crt_errors and not crt_errors.empty? ++ return { ++ 'status' => 'error', ++ 'text' => "Invalid certificate and/or key: #{crt_errors.join}", ++ 'node_status' => {}, ++ } ++ end ++ secret_errors = verify_cookie_secret(data['cookie_secret']) ++ if secret_errors and not secret_errors.empty? ++ return { ++ 'status' => 'error', ++ 'text' => "Invalid cookie secret: #{secret_errors.join}", ++ 'node_status' => {}, ++ } ++ end ++ + node_response = {} + threads = [] + nodes.each { |node| + threads << Thread.new { +- code, _ = send_request_with_token(session, node, '/set_certs', true, data) +- node_response[node] = 200 == code ? 'ok' : 'error' ++ code, response = send_request_with_token( ++ session, node, '/set_certs', true, data ++ ) ++ node_response[node] = [code, response] + } + } + threads.each { |t| t.join } + + node_error = [] ++ node_status = {} + node_response.each { |node, response| +- node_error << node if response != 'ok' ++ if response[0] == 200 ++ node_status[node] = { ++ 'status' => 'ok', ++ 'text' => 'Success', ++ } ++ else ++ text = response[1] ++ if response[0] == 401 ++ text = "Unable to authenticate, try running 'pcs cluster auth'" ++ elsif response[0] == 400 ++ begin ++ parsed_response = JSON.parse(response[1], {:symbolize_names => true}) ++ if parsed_response[:noresponse] ++ text = "Unable to connect" ++ elsif parsed_response[:notoken] or parsed_response[:notauthorized] ++ text = "Unable to authenticate, try running 'pcs cluster auth'" ++ end ++ rescue JSON::ParserError ++ end ++ end ++ node_status[node] = { ++ 'status' => 'error', ++ 'text' => text ++ } ++ node_error << node ++ end + } + return { + 'status' => node_error.empty?() ? 'ok' : 'error', + 'text' => node_error.empty?() ? 'Success' : \ + "Unable to save pcsd certificates to nodes: #{node_error.join(', ')}", ++ 'node_status' => node_status, + } + end + +@@ -1246,20 +1301,49 @@ def pcsd_restart_nodes(session, nodes) + threads = [] + nodes.each { |node| + threads << Thread.new { +- code, _ = send_request_with_token(session, node, '/pcsd_restart', true) +- node_response[node] = 200 == code ? 'ok' : 'error' ++ code, response = send_request_with_token( ++ session, node, '/pcsd_restart', true ++ ) ++ node_response[node] = [code, response] + } + } + threads.each { |t| t.join } + + node_error = [] ++ node_status = {} + node_response.each { |node, response| +- node_error << node if response != 'ok' ++ if response[0] == 200 ++ node_status[node] = { ++ 'status' => 'ok', ++ 'text' => 'Success', ++ } ++ else ++ text = response[1] ++ if response[0] == 401 ++ text = "Unable to authenticate, try running 'pcs cluster auth'" ++ elsif response[0] == 400 ++ begin ++ parsed_response = JSON.parse(response[1], {:symbolize_names => true}) ++ if parsed_response[:noresponse] ++ text = "Unable to connect" ++ elsif parsed_response[:notoken] or parsed_response[:notauthorized] ++ text = "Unable to authenticate, try running 'pcs cluster auth'" ++ end ++ rescue JSON::ParserError ++ end ++ end ++ node_status[node] = { ++ 'status' => 'error', ++ 'text' => text ++ } ++ node_error << node ++ end + } + return { + 'status' => node_error.empty?() ? 'ok' : 'error', + 'text' => node_error.empty?() ? 'Success' : \ + "Unable to restart pcsd on nodes: #{node_error.join(', ')}", ++ 'node_status' => node_status, + } + end + +@@ -1280,6 +1364,53 @@ def write_file_lock(path, perm, data) + end + end + ++def verify_cert_key_pair(cert, key) ++ errors = [] ++ cert_modulus = nil ++ key_modulus = nil ++ ++ stdout, stderr, retval = run_cmd_options( ++ PCSAuth.getSuperuserSession(), ++ { ++ 'stdin' => cert, ++ }, ++ '/usr/bin/openssl', 'x509', '-modulus', '-noout' ++ ) ++ if retval != 0 ++ errors << "Invalid certificate: #{stderr.join}" ++ else ++ cert_modulus = stdout.join.strip ++ end ++ ++ stdout, stderr, retval = run_cmd_options( ++ PCSAuth.getSuperuserSession(), ++ { ++ 'stdin' => key, ++ }, ++ '/usr/bin/openssl', 'rsa', '-modulus', '-noout' ++ ) ++ if retval != 0 ++ errors << "Invalid key: #{stderr.join}" ++ else ++ key_modulus = stdout.join.strip ++ end ++ ++ if errors.empty? and cert_modulus and key_modulus ++ if cert_modulus != key_modulus ++ errors << 'Certificate does not match the key' ++ end ++ end ++ ++ return errors ++end ++ ++def verify_cookie_secret(secret) ++ if secret.empty? ++ return ['Cookie secret is empty'] ++ end ++ return [] ++end ++ + def cluster_status_from_nodes(session, cluster_nodes, cluster_name) + node_map = {} + forbidden_nodes = {} +diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb +index 1f26fe5..da47fb2 100644 +--- a/pcsd/pcsd.rb ++++ b/pcsd/pcsd.rb +@@ -25,10 +25,20 @@ Dir["wizards/*.rb"].each {|file| require file} + + use Rack::CommonLogger + ++def generate_cookie_secret ++ return SecureRandom.hex(30) ++end ++ + begin + secret = File.read(COOKIE_FILE) ++ secret_errors = verify_cookie_secret(secret) ++ if secret_errors and not secret_errors.empty? ++ secret_errors.each { |err| $logger.error err } ++ $logger.error "Invalid cookie secret, using temporary one" ++ secret = generate_cookie_secret() ++ end + rescue Errno::ENOENT +- secret = SecureRandom.hex(30) ++ secret = generate_cookie_secret() + File.open(COOKIE_FILE, 'w', 0700) {|f| f.write(secret)} + end + +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index 4655756..22af38a 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -584,15 +584,19 @@ def set_certs(params, request, session) + return [400, 'cannot save ssl key without ssl certificate'] + end + if !ssl_cert.empty? and !ssl_key.empty? ++ ssl_errors = verify_cert_key_pair(ssl_cert, ssl_key) ++ if ssl_errors and !ssl_errors.empty? ++ return [400, ssl_errors.join] ++ end + begin + write_file_lock(CRT_FILE, 0700, ssl_cert) + write_file_lock(KEY_FILE, 0700, ssl_key) +- rescue ++ rescue => e + # clean the files if we ended in the middle + # the files will be regenerated on next pcsd start + FileUtils.rm(CRT_FILE, {:force => true}) + FileUtils.rm(KEY_FILE, {:force => true}) +- return [400, 'cannot save ssl files'] ++ return [400, "cannot save ssl files: #{e}"] + end + end + +@@ -601,8 +605,8 @@ def set_certs(params, request, session) + if !cookie_secret.empty? + begin + write_file_lock(COOKIE_FILE, 0700, cookie_secret) +- rescue +- return [400, 'cannot save cookie secret'] ++ rescue => e ++ return [400, "cannot save cookie secret: #{e}"] + end + end + end +diff --git a/pcsd/ssl.rb b/pcsd/ssl.rb +index 02372f6..e948aef 100644 +--- a/pcsd/ssl.rb ++++ b/pcsd/ssl.rb +@@ -5,10 +5,12 @@ require 'openssl' + require 'rack' + + require 'bootstrap.rb' ++require 'pcs.rb' + + server_name = WEBrick::Utils::getservername ++$logger = configure_logger('/var/log/pcsd/pcsd.log') + +-if not File.exists?(CRT_FILE) or not File.exists?(KEY_FILE) ++def generate_cert_key_pair(server_name) + name = "/C=US/ST=MN/L=Minneapolis/O=pcsd/OU=pcsd/CN=#{server_name}" + ca = OpenSSL::X509::Name.parse(name) + key = OpenSSL::PKey::RSA.new(2048) +@@ -21,9 +23,27 @@ if not File.exists?(CRT_FILE) or not File.exists?(KEY_FILE) + crt.not_before = Time.now + crt.not_after = Time.now + 10 * 365 * 24 * 60 * 60 # 10 year + crt.sign(key, OpenSSL::Digest::SHA256.new) ++ return crt, key ++end + ++if not File.exists?(CRT_FILE) or not File.exists?(KEY_FILE) ++ crt, key = generate_cert_key_pair(server_name) + File.open(CRT_FILE, 'w',0700) {|f| f.write(crt)} + File.open(KEY_FILE, 'w',0700) {|f| f.write(key)} ++else ++ crt, key = nil, nil ++ begin ++ crt = File.read(CRT_FILE) ++ key = File.read(KEY_FILE) ++ rescue => e ++ $logger.error "Unable to read certificate or key: #{e}" ++ end ++ crt_errors = verify_cert_key_pair(crt, key) ++ if crt_errors and not crt_errors.empty? ++ crt_errors.each { |err| $logger.error err } ++ $logger.error "Invalid certificate and/or key, using temporary ones" ++ crt, key = generate_cert_key_pair(server_name) ++ end + end + + webrick_options = { +@@ -32,8 +52,8 @@ webrick_options = { + :Host => '::', + :SSLEnable => true, + :SSLVerifyClient => OpenSSL::SSL::VERIFY_NONE, +- :SSLCertificate => OpenSSL::X509::Certificate.new(File.open(CRT_FILE).read), +- :SSLPrivateKey => OpenSSL::PKey::RSA.new(File.open(KEY_FILE).read()), ++ :SSLCertificate => OpenSSL::X509::Certificate.new(crt), ++ :SSLPrivateKey => OpenSSL::PKey::RSA.new(key), + :SSLCertName => [[ "CN", server_name ]], + :SSLOptions => OpenSSL::SSL::OP_NO_SSLv2 | OpenSSL::SSL::OP_NO_SSLv3, + } +-- +1.9.1 + diff --git a/SOURCES/bz1170150-Fix-displaying-globally-unique-clones-in-GUI.patch b/SOURCES/bz1170150-Fix-displaying-globally-unique-clones-in-GUI.patch deleted file mode 100644 index e1c23ee..0000000 --- a/SOURCES/bz1170150-Fix-displaying-globally-unique-clones-in-GUI.patch +++ /dev/null @@ -1,41 +0,0 @@ -From af67ec81f2276e58795da944c1352b62356cc051 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Mon, 1 Dec 2014 11:46:11 +0100 -Subject: [PATCH] Fix displaying globally-unique clones in GUI - ---- - pcsd/resource.rb | 7 +++++-- - 1 file changed, 5 insertions(+), 2 deletions(-) - -diff --git a/pcsd/resource.rb b/pcsd/resource.rb -index 387e791..1577e58 100644 ---- a/pcsd/resource.rb -+++ b/pcsd/resource.rb -@@ -359,19 +359,22 @@ class Resource - :options, :group, :clone, :stonith, :ms, :operations, - :instance_attr, :meta_attr, :clone_id, :ms_id - def initialize(e, group = nil, clone = false, ms = false) -- @id = e.attributes["id"] -+ # Strip ':' from resource name (for clones & master/slave) -+ @id = e.attributes["id"].sub(/(.*):.*/, '\1') - @agentname = e.attributes["resource_agent"] - @active = e.attributes["active"] == "true" ? true : false - @orphaned = e.attributes["orphaned"] == "true" ? true : false - @failed = e.attributes["failed"] == "true" ? true : false - @active = e.attributes["active"] == "true" ? true : false - @nodes = [] -- @group = group -+ # Strip ':' from group name (for clones & master/slave created from a group) -+ @group = group ? group.sub(/(.*):.*/, '\1') : group - @clone = clone - @ms = ms - @clone_id = nil - @ms_id = nil - @stonith = false -+ @options = {} - @instance_attr = {} - @meta_attr = {} - @operations = {} --- -1.9.1 - diff --git a/SOURCES/bz1179023-Added-support-for-resource-discovery-on-location-con-2.patch b/SOURCES/bz1179023-Added-support-for-resource-discovery-on-location-con-2.patch deleted file mode 100644 index 12200a8..0000000 --- a/SOURCES/bz1179023-Added-support-for-resource-discovery-on-location-con-2.patch +++ /dev/null @@ -1,149 +0,0 @@ -From 07cd5b51fe4d9b23effc6d9122b0c9e59c6ae030 Mon Sep 17 00:00:00 2001 -From: Chris Feist -Date: Wed, 7 Jan 2015 18:34:06 -0600 -Subject: [PATCH] Allow configuring resource-discovery on advanced location - constraints - ---- - pcs/constraint.py | 35 ++++++++++++++++++++++++++++++----- - pcs/pcs.8 | 2 +- - pcs/test/test_constraints.py | 28 ++++++++++++++++++++++++++++ - pcs/test/test_resource.py | 8 ++++---- - pcs/usage.py | 2 +- - 5 files changed, 64 insertions(+), 11 deletions(-) - -diff --git a/pcs/constraint.py b/pcs/constraint.py -index ff5b583..c7950d8 100644 ---- a/pcs/constraint.py -+++ b/pcs/constraint.py -@@ -788,6 +788,7 @@ def location_show(argv): - lc_score = rsc_loc.getAttribute("score") - lc_role = rsc_loc.getAttribute("role") - lc_name = "Resource: " + lc_rsc -+ lc_resource_discovery = rsc_loc.getAttribute("resource-discovery") - - for child in rsc_loc.childNodes: - if child.nodeType == child.ELEMENT_NODE and child.tagName == "rule": -@@ -815,14 +816,14 @@ def location_show(argv): - rschash = rschashoff - - if lc_node in nodeshash: -- nodeshash[lc_node].append((lc_id,lc_rsc,lc_score, lc_role)) -+ nodeshash[lc_node].append((lc_id,lc_rsc,lc_score, lc_role, lc_resource_discovery)) - else: -- nodeshash[lc_node] = [(lc_id, lc_rsc,lc_score, lc_role)] -+ nodeshash[lc_node] = [(lc_id, lc_rsc,lc_score, lc_role, lc_resource_discovery)] - - if lc_rsc in rschash: -- rschash[lc_rsc].append((lc_id,lc_node,lc_score, lc_role)) -+ rschash[lc_rsc].append((lc_id,lc_node,lc_score, lc_role, lc_resource_discovery)) - else: -- rschash[lc_rsc] = [(lc_id,lc_node,lc_score, lc_role)] -+ rschash[lc_rsc] = [(lc_id,lc_node,lc_score, lc_role, lc_resource_discovery)] - - nodelist = list(set(nodehashon.keys() + nodehashoff.keys())) - rsclist = list(set(rschashon.keys() + rschashoff.keys())) -@@ -840,6 +841,8 @@ def location_show(argv): - print " " + options[1] + " (" + options[0] + ")", - if (options[3] != ""): - print "(role: "+options[3]+")", -+ if (options[4] != ""): -+ print "(resource-discovery="+options[4]+")", - print "Score: "+ options[2] - - if (node in nodehashoff): -@@ -848,6 +851,8 @@ def location_show(argv): - print " " + options[1] + " (" + options[0] + ")", - if (options[3] != ""): - print "(role: "+options[3]+")", -+ if (options[4] != ""): -+ print "(resource-discovery="+options[4]+")", - print "Score: "+ options[2] - show_location_rules(ruleshash,showDetail) - else: -@@ -866,6 +871,8 @@ def location_show(argv): - print "(score:"+options[2]+")", - if (options[3] != ""): - print "(role: "+options[3]+")", -+ if (options[4] != ""): -+ print "(resource-discovery="+options[4]+")", - if showDetail: - print "(id:"+options[0]+")", - print -@@ -876,6 +883,8 @@ def location_show(argv): - print "(score:"+options[2]+")", - if (options[3] != ""): - print "(role: "+options[3]+")", -+ if (options[4] != ""): -+ print "(resource-discovery="+options[4]+")", - if showDetail: - print "(id:"+options[0]+")", - print -@@ -943,7 +952,7 @@ def location_prefer(argv): - - - def location_add(argv,rm=False): -- if len(argv) != 4 and (rm == False or len(argv) < 1): -+ if len(argv) < 4 and (rm == False or len(argv) < 1): - usage.constraint() - sys.exit(1) - -@@ -961,6 +970,20 @@ def location_add(argv,rm=False): - resource_name = argv.pop(0) - node = argv.pop(0) - score = argv.pop(0) -+ options = [] -+ # For now we only allow setting resource-discovery -+ if len(argv) > 0: -+ for arg in argv: -+ if '=' in arg: -+ options.append(arg.split('=',1)) -+ else: -+ print "Error: bad option '%s'" % arg -+ usage.constraint(["location add"]) -+ sys.exit(1) -+ if options[-1][0] != "resource-discovery" and "--force" not in utils.pcs_options: -+ utils.err("bad option '%s', use --force to override" % options[-1][0]) -+ -+ - resource_valid, resource_error, correct_id \ - = utils.validate_constraint_resource( - utils.get_cib_dom(), resource_name -@@ -996,6 +1019,8 @@ def location_add(argv,rm=False): - element.setAttribute("rsc",resource_name) - element.setAttribute("node",node) - element.setAttribute("score",score) -+ for option in options: -+ element.setAttribute(option[0], option[1]) - constraintsElement.appendChild(element) - - utils.replace_cib_configuration(dom) -diff --git a/pcs/pcs.8 b/pcs/pcs.8 -index dccc3bb..00ac11b 100644 ---- a/pcs/pcs.8 -+++ b/pcs/pcs.8 -@@ -393,7 +393,7 @@ where duration options and date spec options are: hours, monthdays, weekdays, ye - location show [resources|nodes [node id|resource id]...] [\fB\-\-full\fR] - List all the current location constraints, if 'resources' is specified location constraints are displayed per resource (default), if 'nodes' is specified location constraints are displayed per node. If specific nodes or resources are specified then we only show information about them. If \fB\-\-full\fR is specified show the internal constraint id's as well. - .TP --location add -+location add [resource-discovery= +
CLUSTER NAME CLUSTER NAME
+
+
++ ++ + <%= h(c.name) %> +
+-- +1.9.1 + diff --git a/SOURCES/bz1189857-02-fix-tree-view-of-resources-in-web-UI.patch b/SOURCES/bz1189857-02-fix-tree-view-of-resources-in-web-UI.patch new file mode 100644 index 0000000..7e34f88 --- /dev/null +++ b/SOURCES/bz1189857-02-fix-tree-view-of-resources-in-web-UI.patch @@ -0,0 +1,735 @@ +From 85ea8bf4630bd3760ab935c24c7b78cdd255f55b Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Wed, 26 Aug 2015 10:55:57 +0200 +Subject: [PATCH] fix tree view of resources in web UI + +--- + pcsd/cluster_entity.rb | 15 +- + pcsd/pcs.rb | 30 ++- + pcsd/public/js/nodes-ember.js | 34 +++- + pcsd/remote.rb | 12 +- + pcsd/views/nodes.erb | 457 +++++++++++++++++++++--------------------- + 5 files changed, 284 insertions(+), 264 deletions(-) + +diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb +index 182969f..b291937 100644 +--- a/pcsd/cluster_entity.rb ++++ b/pcsd/cluster_entity.rb +@@ -895,7 +895,7 @@ module ClusterEntity + class Node < JSONable + attr_accessor :id, :error_list, :warning_list, :status, :quorum, :uptime, + :name, :corosync, :pacemaker, :cman, :corosync_enabled, +- :pacemaker_enabled, :pcsd_enabled, :attr, :fence_levels ++ :pacemaker_enabled, :pcsd_enabled + + def initialize + @id = nil +@@ -911,8 +911,6 @@ module ClusterEntity + @corosync_enabled = false + @pacemaker_enabled = false + @pcsd_enabled = false +- @attr = ClusterEntity::NvSet.new +- @fence_levels = {} + end + + def self.load_current_node(session, crm_dom=nil) +@@ -923,7 +921,6 @@ module ClusterEntity + node.pacemaker_enabled = pacemaker_enabled? + node.cman = cman_running? + node.pcsd_enabled = pcsd_enabled? +- node.fence_levels = get_fence_levels(session) + + node_online = (node.corosync and node.pacemaker) + node.status = node_online ? 'online' : 'offline' +@@ -939,16 +936,6 @@ module ClusterEntity + node.status = 'online' + end + node.quorum = !!crm_dom.elements['//current_dc[@with_quorum="true"]'] +- +- node_name = get_current_node_name() +- all_nodes_attr = get_node_attributes(session) +- if all_nodes_attr[node_name] +- all_nodes_attr[node_name].each { |pair| +- node.attr << ClusterEntity::NvPair.new( +- nil, pair[:key], pair[:value] +- ) +- } +- end + else + node.status = 'offline' + end +diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb +index 37f6b83..1fe9b99 100644 +--- a/pcsd/pcs.rb ++++ b/pcsd/pcs.rb +@@ -1624,8 +1624,11 @@ def get_node_status(session, cib_dom) + :need_ring1_address => need_ring1_address?, + :is_cman_with_udpu_transport => is_cman_with_udpu_transport?, + :acls => get_acls(session), +- :username => session[:username] ++ :username => session[:username], ++ :fence_levels => get_fence_levels(session), ++ :node_attr => node_attrs_to_v2(get_node_attributes(session)) + } ++ + nodes = get_nodes_status() + + known_nodes = [] +@@ -1742,14 +1745,31 @@ def get_cib_dom(session) + return nil + end + ++def node_attrs_to_v2(node_attrs) ++ all_nodes_attr = {} ++ node_attrs.each { |node, attrs| ++ all_nodes_attr[node] = [] ++ attrs.each { |attr| ++ all_nodes_attr[node] << { ++ :id => nil, ++ :name => attr[:key], ++ :value => attr[:value] ++ } ++ } ++ } ++ return all_nodes_attr ++end ++ + def status_v1_to_v2(status) + new_status = status.select { |k,_| + [:cluster_name, :username, :is_cman_with_udpu_transport, + :need_ring1_address, :cluster_settings, :constraints, :groups, + :corosync_online, :corosync_offline, :pacemaker_online, :pacemaker_standby, +- :pacemaker_offline, :acls ++ :pacemaker_offline, :acls, :fence_levels + ].include?(k) + } ++ new_status[:node_attr] = node_attrs_to_v2(status[:node_attr]) ++ + resources = ClusterEntity::make_resources_tree( + ClusterEntity::get_primitives_from_status_v1(status[:resources]) + ) +@@ -1764,15 +1784,9 @@ def status_v1_to_v2(status) + ].include?(k) + } + +- node_attr = ClusterEntity::NvSet.new +- status[:node_attr].each { |k,v| +- node_attr << ClusterEntity::NvPair.new(nil, k, v) +- } + new_status[:node].update( + { + :id => status[:node_id], +- :attr => node_attr.to_status, +- :fence_levels => status[:fence_levels], + :quorum => nil, + :warning_list => [], + :error_list => [], +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index 46e34fa..1f60adc 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -170,7 +170,7 @@ Pcs = Ember.Application.createWithMixins({ + tree_view_onclick(self.get('cur_resource').get('id'), true); + if (!fence_change && self.get('cur_fence')) + tree_view_select(self.get('cur_fence').get('id')); +- if (!resource_change && self.get('cur_fence')) ++ if (!resource_change && self.get('cur_resource')) + tree_view_select(self.get('cur_resource').get('id')); + Pcs.selectedNodeController.reset(); + setup_node_links(); +@@ -932,6 +932,9 @@ Pcs.Setting = Ember.Object.extend({ + Pcs.Clusternode = Ember.Object.extend({ + name: null, + status: null, ++ status_unknown: function() { ++ return this.get('status') == "unknown"; ++ }.property("status"), + status_val: function() { + if (this.warnings && this.warnings.length) + return get_status_value("warning"); +@@ -1013,6 +1016,10 @@ Pcs.Clusternode = Ember.Object.extend({ + return "color:red"; + } + }.property("up","pacemaker_standby"), ++ pacemaker_standby: null, ++ corosync_enabled: null, ++ pacemaker_enabled: null, ++ pcsd_enabled: null, + standby_style: function () { + if (this.pacemaker_standby) + return "display: none;"; +@@ -1043,7 +1050,12 @@ Pcs.Clusternode = Ember.Object.extend({ + else + return "Disabled"; + }.property("pcsd_enabled"), +- location_constraints: null ++ location_constraints: null, ++ node_attrs: [], ++ fence_levels: [], ++ pcsd: null, ++ corosync_daemon: null, ++ pacemaker_daemon: null, + }); + + Pcs.Aclrole = Ember.Object.extend({ +@@ -1509,8 +1521,8 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({ + cur_node: null, + cur_node_attr: function () { + var nc = this; +- if (nc.cur_node && "node_attrs" in nc.cur_node) { +- return nc.cur_node.node_attrs; ++ if (nc.get('cur_node')) { ++ return nc.get('cur_node').get('node_attrs'); + } + return []; + }.property("cur_node", "content.@each.node_attrs"), +@@ -1599,7 +1611,7 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({ + pacemaker_standby = false; + } + +- if (node_obj["noresponse"] == true) { ++ if (node_obj["status"] == 'unknown') { + pcsd_daemon = false + } else { + pcsd_daemon = true +@@ -1618,9 +1630,9 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({ + up_status = false; + } + +- var node_attr = {}; +- if (node_obj["attr"]) { +- node_attr = node_obj["attr"]; ++ var node_attr = []; ++ if (data["node_attr"] && data["node_attr"][node_id]) { ++ node_attr = data["node_attr"][node_id]; + } + + found = false; +@@ -1646,7 +1658,8 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({ + node.set("uptime", node_obj["uptime"]); + node.set("node_id", node_obj["id"]); + node.set("node_attrs", node_attr); +- node.set("fence_levels", node_obj["fence_levels"]); ++ node.set("fence_levels", data["fence_levels"]); ++ node.set("status", node_obj["status"]); + } + }); + +@@ -1670,7 +1683,8 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({ + uptime: node_obj["uptime"], + node_id: node_obj["id"], + node_attrs: node_attr, +- fence_levels: node_obj["fence_levels"] ++ fence_levels: data["fence_levels"], ++ status: node_obj["status"] + }); + } + var pathname = window.location.pathname.split('/'); +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index 22af38a..a40c1c7 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -1014,8 +1014,14 @@ def node_status(params, request, session) + status[:cluster_settings] + + node_attr = {} +- node.attr.each { |v| +- node_attr[v.name.to_sym] = v.value ++ status[:node_attr].each { |node, attrs| ++ node_attr[node] = [] ++ attrs.each { |attr| ++ node_attr[node] << { ++ :key => attr[:name], ++ :value => attr[:value] ++ } ++ } + } + + old_status = { +@@ -1038,7 +1044,7 @@ def node_status(params, request, session) + :cluster_settings => cluster_settings, + :node_id => node.id, + :node_attr => node_attr, +- :fence_levels => node.fence_levels, ++ :fence_levels => status[:fence_levels], + :need_ring1_address => status[:need_ring1_address], + :is_cman_with_udpu_transport => status[:is_cman_with_udpu_transport], + :acls => status[:acls], +diff --git a/pcsd/views/nodes.erb b/pcsd/views/nodes.erb +index b8ecf6d..19bba62 100644 +--- a/pcsd/views/nodes.erb ++++ b/pcsd/views/nodes.erb +@@ -40,242 +40,241 @@ +
+ + +-
+-
+-
+-
Edit Node 
+-
+-{{Pcs.nodesController.cur_node.name}} +-
+- +-
+- +-
+- +- +- +- +- +- +- +- +- +- +-
+- +-
+- +- {{#if Pcs.nodesController.cur_node.pacemaker}} +-
+- Pacemaker Connected +- {{else}} +- {{#if Pcs.nodesController.cur_node.pacemaker_standby}} +-
+- Pacemaker Standby +- {{else}} +-
+- Pacemaker Not Connected +- {{/if}} +- {{/if}} +-
+-
+- {{#if Pcs.nodesController.cur_node.corosync}} +-
+- Corosync Connected +- {{else}} +-
+- Corosync Not Connected +- {{/if}} +-
+-
+-
+- +-
+- +- +- +- +- +-
+- +-
+- +-
+- +- +- +- +- +- +-
Node ID:
{{Pcs.nodesController.cur_node.node_id}}
Uptime:
{{Pcs.nodesController.cur_node.uptime}}
+-
++
++
++
++
Edit Node 
++
++ {{Pcs.nodesController.cur_node.name}} ++
++
+ +- +- +- +- +-
Cluster Daemons
+-
+- +- +- +- +- +-
NAMESTATUS
pacemaker
+-{{#if Pcs.nodesController.cur_node.pacemaker_daemon}} +-Running ({{Pcs.nodesController.cur_node.pacemaker_startup}}) +-{{else}} +-{{#if Pcs.nodesController.cur_node.pcsd}} +-Stopped ({{Pcs.nodesController.cur_node.pacemaker_startup}}) +-{{else}} +-Unknown ({{Pcs.nodesController.cur_node.pacemaker_startup}}) +-{{/if}} +-{{/if}} +-
corosync
+-{{#if Pcs.nodesController.cur_node.corosync_daemon}} +-Running ({{Pcs.nodesController.cur_node.corosync_startup}}) +-{{else}} +-{{#if Pcs.nodesController.cur_node.pcsd}} +-Stopped ({{Pcs.nodesController.cur_node.corosync_startup}}) +-{{else}} +-Unknown ({{Pcs.nodesController.cur_node.corosync_startup}}) +-{{/if}} +-{{/if}} +-
pcsd
+-{{#if Pcs.nodesController.cur_node.pcsd}} +-Running ({{Pcs.nodesController.cur_node.pcsd_startup}}) +-{{else}} +- {{#if Pcs.nodesController.cur_node.authorized}} +- Stopped ({{Pcs.nodesController.cur_node.pcsd_startup}}) +- {{else}} +- Running (not Authorized) ({{Pcs.nodesController.cur_node.pcsd_startup}}) +- {{/if}} +-{{/if}} +-
+-
+-
+- +- +- +- +-
Running Resources
+-
+- +- +- {{#if Pcs.nodesController.cur_node.running_resources}} +- {{#each res in Pcs.nodesController.cur_node.running_resources}} +- +- {{/each}} +- {{else}} +- +- {{/if}} +-
NAME
+- {{#unless res.stonith}} +- {{#link-to 'Resources.index' res}}{{res.name}} ({{res.res_type}}){{/link-to}} +- {{/unless}} +-
NONE
+-
+-
+- +- +- +- +-
Resource Location Preferences
+-
+- +- +- {{#if Pcs.nodesController.cur_node.location_constraints}} +- {{#each Pcs.nodesController.cur_node.location_constraints}} +- +- {{/each}} +- {{else}} +- +- {{/if}} +-
NAMEScore
{{rsc}}{{score}}
NONE
+-
+-
+- +- +- +- +- +- +- +- ++ ++
Node Attributes ({{#if Pcs.nodesController.cur_node_attr.length}}{{Pcs.nodesController.cur_node_attr.length}}{{else}}0{{/if}})
+-
+- +- +- {{#each Pcs.nodesController.cur_node_attr}} +- +- +- +- +-
AttributeValueRemove
{{this.name}}{{this.value}} +- X ++
++ ++ ++ ++ ++ + +- {{/each}} +- {{#unless Pcs.nodesController.cur_node_attr}} +- +- {{/unless}} +- +- +- +- ++ ++ ++ + ++
++ ++
++ {{#if Pcs.nodesController.cur_node.pacemaker}} ++
++ Pacemaker Connected ++
++ {{else}} ++ {{#if Pcs.nodesController.cur_node.pacemaker_standby}} ++
++ Pacemaker Standby ++
++ {{else}} ++
++ Pacemaker Not Connected ++
++ {{/if}} ++ {{/if}} +
NONE
++ {{#if Pcs.nodesController.cur_node.corosync}} ++
++ Corosync Connected ++
++ {{else}} ++
++ Corosync Not Connected ++
++ {{/if}} ++
++
++ ++
++ ++ ++ ++ ++ ++
++ ++
+ ++
++ {{#unless Pcs.nodesController.cur_node.status_unknown}} ++ ++ ++ ++ ++ ++ +
Node ID:
{{Pcs.nodesController.cur_node.node_id}}
Uptime:
{{Pcs.nodesController.cur_node.uptime}}
++ {{/unless}} +
+-
+- +- +- +- +-
Fence Levels ({{#if Pcs.nodesController.cur_node_fence_levels.length}}{{Pcs.nodesController.cur_node_fence_levels.length}}{{else}}0{{/if}})
+-
+- +- +- {{#each Pcs.nodesController.cur_node_fence_levels}} +- +- +- +- +- +- {{/each}} +- {{#unless Pcs.nodesController.cur_node_fence_levels}} +- +- {{/unless}} +- +- +- +- +- +-
LevelFence DevicesRemove
{{this.level}}{{this.devices}} +- X +-
NONE
+-
+-
+-
++ ++ ++ ++
Cluster Daemons
++
++ ++ ++ ++ ++ ++
NAMESTATUS
pacemaker
++ {{#if Pcs.nodesController.cur_node.pacemaker_daemon}} ++ Running ({{Pcs.nodesController.cur_node.pacemaker_startup}}) ++ {{else}} ++ {{#if Pcs.nodesController.cur_node.pcsd}} ++ Stopped ({{Pcs.nodesController.cur_node.pacemaker_startup}}) ++ {{else}} ++ Unknown ({{Pcs.nodesController.cur_node.pacemaker_startup}}) ++ {{/if}} ++ {{/if}} ++
corosync
++ {{#if Pcs.nodesController.cur_node.corosync_daemon}} ++ Running ({{Pcs.nodesController.cur_node.corosync_startup}}) ++ {{else}} ++ {{#if Pcs.nodesController.cur_node.pcsd}} ++ Stopped ({{Pcs.nodesController.cur_node.corosync_startup}}) ++ {{else}} ++ Unknown ({{Pcs.nodesController.cur_node.corosync_startup}}) ++ {{/if}} ++ {{/if}} ++
pcsd
++ {{#if Pcs.nodesController.cur_node.pcsd}} ++ Running ({{Pcs.nodesController.cur_node.pcsd_startup}}) ++ {{else}} ++ {{#if Pcs.nodesController.cur_node.authorized}} ++ Stopped ({{Pcs.nodesController.cur_node.pcsd_startup}}) ++ {{else}} ++ Running (not Authorized) ({{Pcs.nodesController.cur_node.pcsd_startup}}) ++ {{/if}} ++ {{/if}} ++
++
++
++ ++ ++ ++ ++
Running Resources
++
++ ++ ++ {{#if Pcs.nodesController.cur_node.running_resources}} ++ {{#each res in Pcs.nodesController.cur_node.running_resources}} ++ ++ {{/each}} ++ {{else}} ++ ++ {{/if}} ++
NAME
++ {{#unless res.stonith}} ++ {{#link-to 'Resources.index' res}}{{res.name}} ({{res.res_type}}){{/link-to}} ++ {{/unless}} ++
NONE
++
++
++ ++ ++ ++ ++
Resource Location Preferences
++
++ ++ ++ {{#if Pcs.nodesController.cur_node.location_constraints}} ++ {{#each Pcs.nodesController.cur_node.location_constraints}} ++ ++ {{/each}} ++ {{else}} ++ ++ {{/if}} ++
NAMEScore
{{rsc}}{{score}}
NONE
++
++
++ ++ ++ ++ ++
Node Attributes ({{#if Pcs.nodesController.cur_node_attr.length}}{{Pcs.nodesController.cur_node_attr.length}}{{else}}0{{/if}})
++
++ ++ ++ {{#each attr in Pcs.nodesController.cur_node_attr}} ++ ++ ++ ++ ++ {{else}} ++ ++ {{/each}} ++ ++ ++ ++ ++ ++
AttributeValueRemove
{{attr.name}}{{attr.value}} ++ X ++
NONE
++
++
++ ++ ++ ++ ++
Fence Levels ({{#if Pcs.nodesController.cur_node_fence_levels.length}}{{Pcs.nodesController.cur_node_fence_levels.length}}{{else}}0{{/if}})
++
++ ++ ++ {{#each Pcs.nodesController.cur_node_fence_levels}} ++ ++ ++ ++ ++ ++ {{/each}} ++ {{#unless Pcs.nodesController.cur_node_fence_levels}} ++ ++ {{/unless}} ++ ++ ++ ++ ++ ++
LevelFence DevicesRemove
{{this.level}}{{this.devices}} ++ X ++
NONE
++
++
++
++
++ ++ + <%= erb :_configure %> + <%= erb :_acls %> + <%= erb :_wizards %> +-- +1.9.1 + diff --git a/SOURCES/bz1189857-03-web-UI-prevents-running-update-multiple-times-at-onc.patch b/SOURCES/bz1189857-03-web-UI-prevents-running-update-multiple-times-at-onc.patch new file mode 100644 index 0000000..9398777 --- /dev/null +++ b/SOURCES/bz1189857-03-web-UI-prevents-running-update-multiple-times-at-onc.patch @@ -0,0 +1,173 @@ +From 032a2571656c646f17bb3453b6a7d4883241ad46 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Tue, 1 Sep 2015 12:06:20 +0200 +Subject: [PATCH] web UI: prevents running update multiple times at once + +--- + pcsd/public/js/nodes-ember.js | 106 ++++++++++++++++++++++++++++++++++++------ + 1 file changed, 91 insertions(+), 15 deletions(-) + +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index 172c00a..d2f85bd 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -49,22 +49,25 @@ Pcs = Ember.Application.createWithMixins({ + }); + return retArray; + }, +- update_timeout: null, +- update: function(first_run) { ++ updater: null, ++ ++ update: function() { ++ Pcs.get('updater').update(); ++ }, ++ ++ _update: function(first_run) { + if (window.location.pathname.lastIndexOf('/manage', 0) !== 0) { + return; + } +- clearTimeout(Pcs.get('update_timeout')); +- Pcs.set('update_timeout', null); ++ if (first_run) { ++ show_loading_screen(); ++ } + var self = Pcs; + var cluster_name = self.cluster_name; + if (cluster_name == null) { + if (location.pathname.indexOf("/manage") != 0) { + return; + } +- if (first_run) { +- show_loading_screen(); +- } + Ember.debug("Empty Cluster Name"); + $.ajax({ + url: "/clusters_overview", +@@ -77,8 +80,6 @@ Pcs = Ember.Application.createWithMixins({ + }); + if (data["not_current_data"]) { + self.update(); +- } else { +- Pcs.set('update_timeout', window.setTimeout(self.update,20000)); + } + hide_loading_screen(); + }, +@@ -93,15 +94,14 @@ Pcs = Ember.Application.createWithMixins({ + console.log("Error: Unable to parse json for clusters_overview"); + } + } +- Pcs.set('update_timeout', window.setTimeout(self.update,20000)); + hide_loading_screen(); ++ }, ++ complete: function() { ++ Pcs.get('updater').update_finished(); + } + }); + return; + } +- if (first_run) { +- show_loading_screen(); +- } + $.ajax({ + url: "cluster_status", + dataType: "json", +@@ -191,12 +191,84 @@ Pcs = Ember.Application.createWithMixins({ + }, + complete: function() { + hide_loading_screen(); +- Pcs.update_timeout = window.setTimeout(Pcs.update,20000); ++ Pcs.get('updater').update_finished(); + } + }); + } + }); + ++Pcs.Updater = Ember.Object.extend({ ++ timeout: 20000, ++ first_run: true, ++ async: true, ++ autostart: true, ++ started: false, ++ in_progress: false, ++ waiting: false, ++ update_function: null, ++ update_target: null, ++ timer: null, ++ ++ start: function() { ++ this.set('started', true); ++ this.update(); ++ }, ++ ++ stop: function() { ++ this.set('started', false); ++ this.cancel_timer(); ++ }, ++ ++ cancel_timer: function() { ++ var self = this; ++ var timer = self.get('timer'); ++ if (timer) { ++ self.set('timer', null); ++ Ember.run.cancel(timer); ++ } ++ }, ++ ++ update: function() { ++ var self = this; ++ if (!self.get('update_function')) { ++ console.log('No update_function defined!'); ++ return; ++ } ++ self.cancel_timer(); ++ self.set('waiting', false); ++ if (self.get('in_progress')) { ++ self.set('waiting', true); ++ } else { ++ self.set('in_progress', true); ++ self.get('update_function').apply(self.get('update_target'), [self.get('first_run')]); ++ self.set('first_run', false); ++ if (!self.get('async')) { ++ self.update_finished(); ++ } ++ } ++ }, ++ ++ update_finished: function() { ++ var self = this; ++ if (self.get('waiting')) { ++ Ember.run.next(self, self.update); ++ } else if (self.get('started')) { ++ self.set('timer', Ember.run.later(self, self.update, self.get('timeout'))); ++ } ++ self.set('in_progress', false); ++ }, ++ ++ init: function() { ++ var self = this; ++ if (!self.get('update_target')) { ++ self.set('update_target', self); ++ } ++ if (self.get('autostart')) { ++ self.start(); ++ } ++ } ++}); ++ + Pcs.resourcesContainer = Ember.Object.create({ + resource_map: {}, + top_level_resource_map: {}, +@@ -1742,4 +1814,8 @@ function myUpdate() { + // window.setTimeout(myUpdate,4000); + } + +-Pcs.update(true); ++Pcs.set('updater', Pcs.Updater.create({ ++ timeout: 20000, ++ update_function: Pcs._update, ++ update_target: Pcs ++})); +-- +1.9.1 + diff --git a/SOURCES/bz1189857-04-fix-constraints-removing-in-web-UI.patch b/SOURCES/bz1189857-04-fix-constraints-removing-in-web-UI.patch new file mode 100644 index 0000000..34ec1a9 --- /dev/null +++ b/SOURCES/bz1189857-04-fix-constraints-removing-in-web-UI.patch @@ -0,0 +1,32 @@ +From 7e92db5789ad09f0e1184691ba69fb087402f24c Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Wed, 2 Sep 2015 11:16:14 +0200 +Subject: [PATCH] fix constraints removing in web UI + +--- + pcsd/public/js/nodes-ember.js | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index d2f85bd..0943c65 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -562,9 +562,12 @@ Pcs.resourcesContainer = Ember.Object.create({ + self.set('constraints', constraints); + var resource_map = self.get('resource_map'); + $.each(constraints, function(const_type, cons) { +- $.each(cons, function(resource_id, cons_list) { +- if (resource_id in resource_map) +- resource_map[resource_id].set(const_type, cons_list); ++ $.each(resource_map, function(resource_id, resource_obj) { ++ if (resource_id in cons) { ++ resource_obj.set(const_type, cons[resource_id]); ++ } else { ++ resource_obj.set(const_type, []); ++ } + }); + }); + } +-- +1.9.1 + diff --git a/SOURCES/bz1189857-05-remove-removing-constriants-from-client-side-javascr.patch b/SOURCES/bz1189857-05-remove-removing-constriants-from-client-side-javascr.patch new file mode 100644 index 0000000..d702fe2 --- /dev/null +++ b/SOURCES/bz1189857-05-remove-removing-constriants-from-client-side-javascr.patch @@ -0,0 +1,73 @@ +From 41e2d3e4f5ae0331d7984612485b3bbb84d41304 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Wed, 2 Sep 2015 12:39:06 +0200 +Subject: [PATCH] remove removing constriants from client-side (javascript) + +All changes are displayed after update. +--- + pcsd/public/js/nodes-ember.js | 24 ------------------------ + pcsd/public/js/pcsd.js | 6 ------ + 2 files changed, 30 deletions(-) + +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index 0943c65..5fec386 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -390,30 +390,6 @@ Pcs.resourcesContainer = Ember.Object.create({ + }; + }, + +- remove_constraint: function(constraint_id) { +- $.each(this.get('resource_map'), function(key, resource) { +- $.each( +- [ +- "location_constraints", +- "ordering_constraints", +- "ordering_set_constraints", +- "colocation_constraints" +- ], +- function(_, constraint_type) { +- if (resource.get(constraint_type)) { +- resource.set( +- constraint_type, +- $.grep( +- resource.get(constraint_type), +- function(value2, key) { return value2.id != constraint_id; } +- ) +- ); +- } +- } +- ); +- }); +- }, +- + update_meta_attr: function(resource_id, attr, value) { + value = typeof value !== 'undefined' ? value.trim() : ""; + var data = { +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index 879b533..197cdd1 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -1595,9 +1595,6 @@ function remove_constraint(id) { + url: get_cluster_remote_url() + 'remove_constraint_remote', + data: {"constraint_id": id}, + timeout: pcs_timeout, +- success: function (data) { +- Pcs.resourcesContainer.remove_constraint(id); +- }, + error: function (xhr, status, error) { + alert( + "Error removing constraint " +@@ -1617,9 +1614,6 @@ function remove_constraint_rule(id) { + url: get_cluster_remote_url() + 'remove_constraint_rule_remote', + data: {"rule_id": id}, + timeout: pcs_timeout, +- success: function (data) { +- Pcs.resourcesContainer.remove_constraint(id); +- }, + error: function (xhr, status, error) { + alert( + "Error removing constraint rule " +-- +1.9.1 + diff --git a/SOURCES/bz1189857-06-web-UI-fixes-in-nodes-resources-fence-devices.patch b/SOURCES/bz1189857-06-web-UI-fixes-in-nodes-resources-fence-devices.patch new file mode 100644 index 0000000..eb0098a --- /dev/null +++ b/SOURCES/bz1189857-06-web-UI-fixes-in-nodes-resources-fence-devices.patch @@ -0,0 +1,228 @@ +From 7c12321d187ce5919ea5e443612321b404be8cab Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Tue, 15 Sep 2015 11:03:59 +0200 +Subject: [PATCH] web UI: fixes in nodes, resources, fence devices + +- fix creating disabled resource +- add sorting for cluster list, resource list and fence device list +- hide resource (fence device) details when there is no resource (fence device) +- in resource list color of resource name depends on its status +- fix group selector +- disabled autocorrect for ordering set constraints +- fix status detection of master/slave resources +--- + pcsd/cluster_entity.rb | 2 +- + pcsd/pcsd.rb | 2 +- + pcsd/public/css/style.css | 8 +++++++ + pcsd/public/js/nodes-ember.js | 56 +++++++++++++++++++++++++++++++------------ + pcsd/public/js/pcsd.js | 5 +++- + pcsd/views/main.erb | 4 +++- + 6 files changed, 58 insertions(+), 19 deletions(-) + +diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb +index b5d2719..8f29a40 100644 +--- a/pcsd/cluster_entity.rb ++++ b/pcsd/cluster_entity.rb +@@ -914,7 +914,7 @@ module ClusterEntity + end + @masters, @slaves = get_masters_slaves(primitive_list) + if (@masters.empty? and +- @member.status != ClusterEntity::ResourceStatus.new(:disabled) ++ @member.status == ClusterEntity::ResourceStatus.new(:running) + ) + @status = ClusterEntity::ResourceStatus.new(:partially_running) + end +diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb +index 9a07ee8..b7c2a49 100644 +--- a/pcsd/pcsd.rb ++++ b/pcsd/pcsd.rb +@@ -178,7 +178,7 @@ helpers do + param_line << "#{myparam}=#{val}" + end + if param == "disabled" +- meta_options << "meta target-role=Stopped" ++ meta_options << 'meta' << 'target-role=Stopped' + end + } + return param_line + meta_options +diff --git a/pcsd/public/css/style.css b/pcsd/public/css/style.css +index a3f6638..1c003bd 100644 +--- a/pcsd/public/css/style.css ++++ b/pcsd/public/css/style.css +@@ -778,3 +778,11 @@ li.menuheader { + .issue_table { + margin-top: 1.5em; + } ++ ++.status-error { ++ color: red; ++} ++ ++.status-warning { ++ color: #ff6600; ++} +diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js +index bbeed55..1e00a94 100644 +--- a/pcsd/public/js/nodes-ember.js ++++ b/pcsd/public/js/nodes-ember.js +@@ -147,8 +147,10 @@ Pcs = Ember.Application.createWithMixins({ + } else { + if (self.get('fence_list').length > 0) { + cur_fence = self.get('fence_list')[0]; +- fence_change = true; ++ } else { ++ cur_fence = null; + } ++ fence_change = true; + } + + if (cur_resource && cur_resource.get('id') in resource_map) { +@@ -158,22 +160,28 @@ Pcs = Ember.Application.createWithMixins({ + } else { + if (self.get('resource_list').length > 0) { + cur_resource = self.get('resource_list')[0]; +- resource_change = true; ++ } else { ++ cur_resource = null; + } ++ resource_change = true; + } + + self.set('cur_fence', cur_fence); + self.set('cur_resource', cur_resource); + + Ember.run.scheduleOnce('afterRender', Pcs, function () { +- if (fence_change) +- tree_view_onclick(self.get('cur_fence').get('id'), true); +- if (resource_change) +- tree_view_onclick(self.get('cur_resource').get('id'), true); +- if (!fence_change && self.get('cur_fence')) +- tree_view_select(self.get('cur_fence').get('id')); +- if (!resource_change && self.get('cur_resource')) +- tree_view_select(self.get('cur_resource').get('id')); ++ if (self.get('cur_fence')) { ++ if (fence_change) ++ tree_view_onclick(self.get('cur_fence').get('id'), true); ++ else ++ tree_view_select(self.get('cur_fence').get('id')); ++ } ++ if (self.get('cur_resource')) { ++ if (resource_change) ++ tree_view_onclick(self.get('cur_resource').get('id'), true); ++ else ++ tree_view_select(self.get('cur_resource').get('id')); ++ } + Pcs.selectedNodeController.reset(); + disable_checkbox_clicks(); + }); +@@ -546,6 +554,11 @@ Pcs.resourcesContainer = Ember.Object.create({ + } + }); + }); ++ $.each(resource_map, function(resource_id, resource_obj) { ++ resource_obj.set('group_list', self.get('group_list')); ++ }); ++ self.set('resource_list', Ember.copy(self.get('resource_list')).sort(function(a,b){return a.get('id').localeCompare(b.get('id'))})); ++ self.set('fence_list', Ember.copy(self.get('fence_list')).sort(function(a,b){return a.get('id').localeCompare(b.get('id'))})); + } + }); + +@@ -565,6 +578,7 @@ Pcs.ResourceObj = Ember.Object.extend({ + disabled: false, + error_list: [], + warning_list: [], ++ group_list: [], + get_group_id: function() { + var self = this; + var p = self.get('parent'); +@@ -577,7 +591,7 @@ Pcs.ResourceObj = Ember.Object.extend({ + var self = this; + var cur_group = self.get('get_group_id'); + var html = ' + + {{{node.status_icon}}} +- {{node._id}} ++ ++ {{node._id}} ++ + {{node.resource_type}} + + +-- +1.9.1 + diff --git a/SOURCES/bz1189857-07-web-UI-fixes.patch b/SOURCES/bz1189857-07-web-UI-fixes.patch new file mode 100644 index 0000000..9df183e --- /dev/null +++ b/SOURCES/bz1189857-07-web-UI-fixes.patch @@ -0,0 +1,99 @@ +From c601e0f7e93db3e136eb9080fc2d4f4a0c999360 Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Mon, 21 Sep 2015 17:53:51 +0200 +Subject: [PATCH] web UI fixes + +- fix loading resource optional argument form +- fix master/slave resource status from old pcsd +- fix status of failed resource +--- + pcsd/cluster_entity.rb | 10 ++++++---- + pcsd/public/js/pcsd.js | 4 +++- + pcsd/views/main.erb | 2 ++ + 3 files changed, 11 insertions(+), 5 deletions(-) + +diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb +index 8f29a40..c746544 100644 +--- a/pcsd/cluster_entity.rb ++++ b/pcsd/cluster_entity.rb +@@ -191,6 +191,7 @@ module ClusterEntity + mi = ClusterEntity::Clone.new + else + mi = ClusterEntity::MasterSlave.new ++ mi.masters_unknown = true + end + mi.id = mi_id + mi.meta_attr = ClusterEntity::get_meta_attr_from_status_v1( +@@ -539,7 +540,7 @@ module ClusterEntity + status = ClusterEntity::ResourceStatus.new(:disabled) + elsif running > 0 + status = ClusterEntity::ResourceStatus.new(:running) +- elsif failed > 0 ++ elsif failed > 0 or @error_list.length > 0 + status = ClusterEntity::ResourceStatus.new(:failed) + else + status = ClusterEntity::ResourceStatus.new(:blocked) +@@ -854,10 +855,11 @@ module ClusterEntity + + + class MasterSlave < MultiInstance +- attr_accessor :masters, :slaves ++ attr_accessor :masters, :slaves, :masters_unknown + + def initialize(master_cib_element=nil, crm_dom=nil, rsc_status=nil, parent=nil, operations=nil) + super(master_cib_element, crm_dom, rsc_status, parent, operations) ++ @masters_unknown = false + @class_type = 'master' + @masters = [] + @slaves = [] +@@ -869,7 +871,7 @@ module ClusterEntity + primitive_list = @member.members + end + @masters, @slaves = get_masters_slaves(primitive_list) +- if (@masters.empty? and ++ if (@masters.empty? and !@masters_unknown and + @status != ClusterEntity::ResourceStatus.new(:disabled) + ) + @warning_list << { +@@ -913,7 +915,7 @@ module ClusterEntity + primitive_list = @member.members + end + @masters, @slaves = get_masters_slaves(primitive_list) +- if (@masters.empty? and ++ if (@masters.empty? and !@masters_unknown and + @member.status == ClusterEntity::ResourceStatus.new(:running) + ) + @status = ClusterEntity::ResourceStatus.new(:partially_running) +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index 23fd316..04bee0f 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -2010,7 +2010,9 @@ function tree_view_onclick(resource_id, auto) { + + tree_view_select(resource_id); + +- load_agent_form(resource_id, resource_obj.get('stonith')); ++ Ember.run.next(Pcs, function() { ++ load_agent_form(resource_id, resource_obj.get('stonith')); ++ }); + } + + function tree_view_select(element_id) { +diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb +index e7e611d..b7260ad 100644 +--- a/pcsd/views/main.erb ++++ b/pcsd/views/main.erb +@@ -277,8 +277,10 @@ + {{#if stonith}} +
+ {{else}} ++ {{#if resource.is_primitive}} +
+ {{/if}} ++ {{/if}} + {{else}} + {{#if stonith}} + NO FENCE DEVICE IN CLUSTER +-- +1.9.1 + diff --git a/SOURCES/bz1198640-01-web-UI-allows-spaces-in-optional-arguments-when-crea.patch b/SOURCES/bz1198640-01-web-UI-allows-spaces-in-optional-arguments-when-crea.patch new file mode 100644 index 0000000..69de2d1 --- /dev/null +++ b/SOURCES/bz1198640-01-web-UI-allows-spaces-in-optional-arguments-when-crea.patch @@ -0,0 +1,36 @@ +From 906780d7d61fef803c5e1adfa9d156e07e67c26a Mon Sep 17 00:00:00 2001 +From: Ondrej Mular +Date: Tue, 15 Sep 2015 11:14:04 +0200 +Subject: [PATCH] web UI: allows spaces in optional arguments when creating new + resource + +--- + pcsd/public/js/pcsd.js | 7 +++---- + 1 file changed, 3 insertions(+), 4 deletions(-) + +diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js +index cddf14e..84db292 100644 +--- a/pcsd/public/js/pcsd.js ++++ b/pcsd/public/js/pcsd.js +@@ -284,15 +284,14 @@ function disable_spaces(item) { + } + + function load_resource_form(item, ra, stonith) { +- data = { "new": true, resourcename: ra}; ++ var data = { new: true, resourcename: ra}; ++ var command; + if (!stonith) + command = "resource_metadata"; + else + command = "fence_device_metadata"; + +- item.load(get_cluster_remote_url() + command, data, function() { +- disable_spaces(this); +- }); ++ item.load(get_cluster_remote_url() + command, data); + } + + function update_resource_form_groups(form, group_list) { +-- +1.9.1 + diff --git a/SOURCES/bz1205848-Do-not-set-two_node-in-corosync-if-auto_tie_breaker-is-on.patch b/SOURCES/bz1205848-Do-not-set-two_node-in-corosync-if-auto_tie_breaker-is-on.patch deleted file mode 100644 index c5c63d3..0000000 --- a/SOURCES/bz1205848-Do-not-set-two_node-in-corosync-if-auto_tie_breaker-is-on.patch +++ /dev/null @@ -1,122 +0,0 @@ -From e751df5b0e5d6849fa5a8332c7a0fed53c5b5141 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Tue, 3 Mar 2015 15:21:29 +0100 -Subject: [PATCH] Do not set two_node in corosync if auto_tie_breaker is on - ---- - pcs/cluster.py | 23 ++++++-- - pcs/utils.py | 26 +++++++-- - 3 files changed, 170 insertions(+), 14 deletions(-) - -diff --git a/pcs/cluster.py b/pcs/cluster.py -index 8569b92..c4a9b4c 100644 ---- a/pcs/cluster.py -+++ b/pcs/cluster.py -@@ -351,6 +351,20 @@ def corosync_setup(argv,returnConfig=False): - cib_path = os.path.join(settings.cib_dir, "cib.xml") - if os.path.exists(cib_path) and not "--force" in utils.pcs_options: - utils.err("%s already exists, use --force to overwrite" % cib_path) -+ -+ for opt in ["--wait_for_all", "--auto_tie_breaker", "--last_man_standing"]: -+ if ( -+ opt in utils.pcs_options -+ and -+ utils.pcs_options[opt] not in ["0", "1"] -+ ): -+ utils.err( -+ "'%s' is not a valid value for %s, use 0 or 1" -+ % (utils.pcs_options[opt], opt) -+ ) -+ -+ auto_tie_breaker = False -+ - if "--corosync_conf" not in utils.pcs_options: - cluster_destroy([]) - -@@ -372,20 +386,21 @@ def corosync_setup(argv,returnConfig=False): - new_nodes_section += " }\n" - i = i+1 - -- two_node_section = "" -- if len(nodes) == 2: -- two_node_section = "two_node: 1" -- - quorum_options = "" - if "--wait_for_all" in utils.pcs_options: - quorum_options += "wait_for_all: " + utils.pcs_options["--wait_for_all"] + "\n" - if "--auto_tie_breaker" in utils.pcs_options: - quorum_options += "auto_tie_breaker: " + utils.pcs_options["--auto_tie_breaker"] + "\n" -+ if utils.pcs_options["--auto_tie_breaker"] == "1": -+ auto_tie_breaker = True - if "--last_man_standing" in utils.pcs_options: - quorum_options += "last_man_standing: " + utils.pcs_options["--last_man_standing"] + "\n" - if "--last_man_standing_window" in utils.pcs_options: - quorum_options += "last_man_standing_window: " + utils.pcs_options["--last_man_standing_window"] + "\n" - -+ two_node_section = "" -+ if len(nodes) == 2 and not auto_tie_breaker: -+ two_node_section = "two_node: 1" - - transport = "udpu" - if "--transport" in utils.pcs_options: -diff --git a/pcs/utils.py b/pcs/utils.py -index d35db1d..6911f0c 100644 ---- a/pcs/utils.py -+++ b/pcs/utils.py -@@ -527,7 +527,8 @@ def removeNodeFromCorosync(node): - node0 = node - node1 = None - -- for c_node in getNodesFromCorosyncConf(): -+ corosync_conf = getCorosyncConf() -+ for c_node in getNodesFromCorosyncConf(corosync_conf): - if c_node == node0: - node_found = True - num_nodes_in_conf = num_nodes_in_conf + 1 -@@ -539,7 +540,7 @@ def removeNodeFromCorosync(node): - in_node = False - node_match = False - node_buffer = [] -- for line in getCorosyncConf().split("\n"): -+ for line in corosync_conf.split("\n"): - if in_node: - node_buffer.append(line) - if ( -@@ -562,7 +563,8 @@ def removeNodeFromCorosync(node): - new_corosync_conf = "\n".join(new_corosync_conf_lines) + "\n" - - if removed_node: -- if num_nodes_in_conf == 3: -+ auto_tie_breaker = getQuorumOption(corosync_conf, "auto_tie_breaker") -+ if num_nodes_in_conf == 3 and auto_tie_breaker != "1": - new_corosync_conf = addQuorumOption(new_corosync_conf,("two_node","1")) - setCorosyncConf(new_corosync_conf) - reloadCorosync() -@@ -640,6 +642,24 @@ def rmQuorumOption(corosync_conf,option): - - return output.rstrip('\n') + "\n" - -+def getQuorumOption(corosync_conf, option): -+ lines = corosync_conf.split("\n") -+ value = None -+ -+ inQuorum = False -+ for line in lines: -+ line = line.strip() -+ if line.startswith("#"): -+ continue -+ if inQuorum and "}" in line: -+ inQuorum = False -+ elif inQuorum and line.split(":", 1)[0].strip() == option: -+ value = line.split(":", 1)[1].strip() -+ elif line.startswith("quorum {"): -+ inQuorum = True -+ -+ return value -+ - def getNextNodeID(corosync_conf): - currentNodes = [] - highest = 0 --- -1.9.1 - diff --git a/SOURCES/bz1218478-fix-cluster-property-name-validation.patch b/SOURCES/bz1218478-fix-cluster-property-name-validation.patch deleted file mode 100644 index 274470b..0000000 --- a/SOURCES/bz1218478-fix-cluster-property-name-validation.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 289af7fd608a67d02494859302e465f19bb269bd Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Thu, 7 May 2015 13:15:55 +0200 -Subject: [PATCH] fix cluster property name validation - ---- - pcs/prop.py | 5 ++--- - 2 files changed, 4 insertions(+), 5 deletions(-) - -diff --git a/pcs/prop.py b/pcs/prop.py -index 3b1c15a..6d99032 100644 ---- a/pcs/prop.py -+++ b/pcs/prop.py -@@ -32,9 +32,8 @@ def set_property(argv): - if "--node" in utils.pcs_options: - utils.set_node_attribute(args[0], args[1], utils.pcs_options["--node"]) - elif ("--force" in utils.pcs_options) or utils.is_valid_property(args[0]): -- id_valid, id_error = utils.validate_xml_id(args[0], 'property name') -- if not id_valid: -- utils.err(id_error) -+ if not args[0]: -+ utils.err("property name cannot be empty") - utils.set_cib_property(args[0],args[1]) - else: - utils.err("unknown cluster property: '%s', (use --force to override)" % args[0]) --- -1.9.1 - diff --git a/SOURCES/bz1235022-01-add-nagios-support-to-pcs-resource-list-and-web-UI.patch b/SOURCES/bz1235022-01-add-nagios-support-to-pcs-resource-list-and-web-UI.patch new file mode 100644 index 0000000..1850941 --- /dev/null +++ b/SOURCES/bz1235022-01-add-nagios-support-to-pcs-resource-list-and-web-UI.patch @@ -0,0 +1,189 @@ +From 082be752ee38c8d1314c2130a029e60648f7896b Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Tue, 11 Aug 2015 16:34:02 +0200 +Subject: [PATCH] add nagios support to 'pcs resource list' and web UI + +--- + pcs/resource.py | 58 ++++++++++++++++++++++++++++++++++++++++++-------------- + pcsd/remote.rb | 4 ++++ + pcsd/resource.rb | 23 ++++++++++++++++++---- + pcsd/settings.rb | 1 + + 4 files changed, 68 insertions(+), 18 deletions(-) + +diff --git a/pcs/resource.py b/pcs/resource.py +index f7d8821..8e05aeb 100644 +--- a/pcs/resource.py ++++ b/pcs/resource.py +@@ -198,13 +198,28 @@ def parse_resource_options(argv, with_clone=False): + # List available resources + # TODO make location more easily configurable + def resource_list_available(argv): ++ def get_name_and_desc(full_res_name, metadata): ++ sd = "" ++ try: ++ dom = parseString(metadata) ++ shortdesc = dom.documentElement.getElementsByTagName("shortdesc") ++ if len(shortdesc) > 0: ++ sd = " - " + format_desc( ++ len(full_res_name + " - "), ++ shortdesc[0].firstChild.nodeValue.strip().replace("\n", " ") ++ ) ++ except xml.parsers.expat.ExpatError: ++ sd = "" ++ finally: ++ return full_res_name + sd + "\n" ++ + ret = "" + if len(argv) != 0: + filter_string = argv[0] + else: + filter_string = "" + +-# ocf agents ++ # ocf agents + os.environ['OCF_ROOT'] = "/usr/lib/ocf/" + providers = sorted(os.listdir("/usr/lib/ocf/resource.d")) + for provider in providers: +@@ -223,32 +238,47 @@ def resource_list_available(argv): + metadata = utils.get_metadata("/usr/lib/ocf/resource.d/" + provider + "/" + resource) + if metadata == False: + continue +- sd = "" +- try: +- dom = parseString(metadata) +- shortdesc = dom.documentElement.getElementsByTagName("shortdesc") +- if len(shortdesc) > 0: +- sd = " - " + format_desc(full_res_name.__len__() + 3, shortdesc[0].firstChild.nodeValue.strip().replace("\n", " ")) +- except xml.parsers.expat.ExpatError: +- sd = "" +- finally: +- ret += full_res_name + sd + "\n" +-# lsb agents ++ ret += get_name_and_desc( ++ "ocf:" + provider + ":" + resource, ++ metadata ++ ) ++ ++ # lsb agents + lsb_dir = "/etc/init.d/" + agents = sorted(os.listdir(lsb_dir)) + for agent in agents: + if os.access(lsb_dir + agent, os.X_OK): + ret += "lsb:" + agent + "\n" +-# systemd agents ++ ++ # systemd agents + if utils.is_systemctl(): + agents, retval = utils.run(["systemctl", "list-unit-files", "--full"]) + agents = agents.split("\n") +- + for agent in agents: + match = re.search(r'^([\S]*)\.service',agent) + if match: + ret += "systemd:" + match.group(1) + "\n" + ++ # nagios metadata ++ nagios_metadata_path = "/usr/share/pacemaker/nagios/plugins-metadata" ++ for metadata_file in sorted(os.listdir(nagios_metadata_path)): ++ if metadata_file.startswith("."): ++ continue ++ full_res_name = "nagios:" + metadata_file ++ if full_res_name.lower().endswith(".xml"): ++ full_res_name = full_res_name[:-len(".xml")] ++ if "--nodesc" in utils.pcs_options: ++ ret += full_res_name + "\n" ++ continue ++ try: ++ ret += get_name_and_desc( ++ full_res_name, ++ open(os.path.join(nagios_metadata_path, metadata_file), "r").read() ++ ) ++ except EnvironmentError as e: ++ pass ++ ++ # output + if not ret: + utils.err( + "No resource agents available. " +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index 5b7c753..cb5b176 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -1373,6 +1373,8 @@ def resource_form(params, request, session) + @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(HEARTBEAT_AGENTS_DIR + @cur_resource.type) + elsif @cur_resource.provider == 'pacemaker' + @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(PACEMAKER_AGENTS_DIR + @cur_resource.type) ++ elsif @cur_resource._class == 'nagios' ++ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(NAGIOS_METADATA_DIR + @cur_resource.type + '.xml') + end + @existing_resource = true + if @resource +@@ -1546,6 +1548,8 @@ def resource_metadata(params, request, session) + @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(HEARTBEAT_AGENTS_DIR + resource_name) + elsif class_provider == "ocf:pacemaker" + @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(PACEMAKER_AGENTS_DIR + resource_name) ++ elsif class_provider == 'nagios' ++ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(NAGIOS_METADATA_DIR + resource_name + '.xml') + end + @new_resource = params[:new] + @resources, @groups = getResourcesGroups(session) +diff --git a/pcsd/resource.rb b/pcsd/resource.rb +index f375bae..c6b513b 100644 +--- a/pcsd/resource.rb ++++ b/pcsd/resource.rb +@@ -303,13 +303,28 @@ def getColocationConstraints(session, resource_id) + end + + def getResourceMetadata(resourcepath) +- ENV['OCF_ROOT'] = OCF_ROOT +- metadata = `#{resourcepath} meta-data` +- doc = REXML::Document.new(metadata) + options_required = {} + options_optional = {} + long_desc = "" + short_desc = "" ++ ++ if resourcepath.end_with?('.xml') ++ begin ++ metadata = IO.read(resourcepath) ++ rescue ++ metadata = "" ++ end ++ else ++ ENV['OCF_ROOT'] = OCF_ROOT ++ metadata = `#{resourcepath} meta-data` ++ end ++ ++ begin ++ doc = REXML::Document.new(metadata) ++ rescue REXML::ParseException ++ return [options_required, options_optional, [short_desc, long_desc]] ++ end ++ + doc.elements.each('resource-agent/longdesc') {|ld| + long_desc = ld.text ? ld.text.strip : ld.text + } +@@ -345,7 +360,7 @@ def getResourceMetadata(resourcepath) + options_optional[param.attributes["name"]] = temp_array + end + } +- [options_required, options_optional, [short_desc,long_desc]] ++ [options_required, options_optional, [short_desc, long_desc]] + end + + def getResourceAgents(session, resource_agent=nil) +diff --git a/pcsd/settings.rb b/pcsd/settings.rb +index 0cd3109..4cea800 100644 +--- a/pcsd/settings.rb ++++ b/pcsd/settings.rb +@@ -8,6 +8,7 @@ COOKIE_FILE = PCSD_VAR_LOCATION + 'pcsd.cookiesecret' + OCF_ROOT = "/usr/lib/ocf" + HEARTBEAT_AGENTS_DIR = "/usr/lib/ocf/resource.d/heartbeat/" + PACEMAKER_AGENTS_DIR = "/usr/lib/ocf/resource.d/pacemaker/" ++NAGIOS_METADATA_DIR = '/usr/share/pacemaker/nagios/plugins-metadata/' + PENGINE = "/usr/libexec/pacemaker/pengine" + CRM_MON = "/usr/sbin/crm_mon" + CRM_NODE = "/usr/sbin/crm_node" +-- +1.9.1 + diff --git a/SOURCES/bz1235022-02-fix-crash-when-missing-nagios-metadata.patch b/SOURCES/bz1235022-02-fix-crash-when-missing-nagios-metadata.patch new file mode 100644 index 0000000..9fc4091 --- /dev/null +++ b/SOURCES/bz1235022-02-fix-crash-when-missing-nagios-metadata.patch @@ -0,0 +1,59 @@ +From 2c269bd74344dab5b55f398c90ab0077b3d31e21 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Fri, 4 Sep 2015 12:59:41 +0200 +Subject: [PATCH] fix crash when missing nagios-metadata + +--- + pcs/resource.py | 36 ++++++++++++++++++++---------------- + 1 file changed, 20 insertions(+), 16 deletions(-) + +diff --git a/pcs/resource.py b/pcs/resource.py +index 8e05aeb..2dcddc3 100644 +--- a/pcs/resource.py ++++ b/pcs/resource.py +@@ -261,22 +261,26 @@ def resource_list_available(argv): + + # nagios metadata + nagios_metadata_path = "/usr/share/pacemaker/nagios/plugins-metadata" +- for metadata_file in sorted(os.listdir(nagios_metadata_path)): +- if metadata_file.startswith("."): +- continue +- full_res_name = "nagios:" + metadata_file +- if full_res_name.lower().endswith(".xml"): +- full_res_name = full_res_name[:-len(".xml")] +- if "--nodesc" in utils.pcs_options: +- ret += full_res_name + "\n" +- continue +- try: +- ret += get_name_and_desc( +- full_res_name, +- open(os.path.join(nagios_metadata_path, metadata_file), "r").read() +- ) +- except EnvironmentError as e: +- pass ++ if os.path.isdir(nagios_metadata_path): ++ for metadata_file in sorted(os.listdir(nagios_metadata_path)): ++ if metadata_file.startswith("."): ++ continue ++ full_res_name = "nagios:" + metadata_file ++ if full_res_name.lower().endswith(".xml"): ++ full_res_name = full_res_name[:-len(".xml")] ++ if "--nodesc" in utils.pcs_options: ++ ret += full_res_name + "\n" ++ continue ++ try: ++ ret += get_name_and_desc( ++ full_res_name, ++ open( ++ os.path.join(nagios_metadata_path, metadata_file), ++ "r" ++ ).read() ++ ) ++ except EnvironmentError as e: ++ pass + + # output + if not ret: +-- +1.9.1 + diff --git a/SOURCES/bz1245264-01-Added-more-detailed-warnings-for-pcs-stonith-confirm.patch b/SOURCES/bz1245264-01-Added-more-detailed-warnings-for-pcs-stonith-confirm.patch new file mode 100644 index 0000000..509061d --- /dev/null +++ b/SOURCES/bz1245264-01-Added-more-detailed-warnings-for-pcs-stonith-confirm.patch @@ -0,0 +1,44 @@ +From fc89908d91a2438f59dd08cf79aedfb85512091b Mon Sep 17 00:00:00 2001 +From: Chris Feist +Date: Fri, 18 Sep 2015 16:29:58 -0500 +Subject: [PATCH] Added more detailed warnings for 'pcs stonith confirm' + +--- + pcs/pcs.8 | 4 +++- + pcs/usage.py | 5 ++++- + 2 files changed, 7 insertions(+), 2 deletions(-) + +diff --git a/pcs/pcs.8 b/pcs/pcs.8 +index 70f0f6c..e89c813 100644 +--- a/pcs/pcs.8 ++++ b/pcs/pcs.8 +@@ -333,7 +333,9 @@ fence [\fB\-\-off\fR] + Fence the node specified (if \fB\-\-off\fR is specified, use the 'off' API call to stonith which will turn the node off instead of rebooting it) + .TP + confirm +-Confirm that the host specified is currently down. WARNING: if this node is not actually down data corruption/cluster failure can occur. ++Confirm that the host specified is currently down. This command should \fBONLY\fR be used when the node specified has already been confirmed to be down. ++ ++.B WARNING: if this node is not actually down data corruption/cluster failure can occur. + .SS "acl" + .TP + [show] +diff --git a/pcs/usage.py b/pcs/usage.py +index c430965..63baa76 100644 +--- a/pcs/usage.py ++++ b/pcs/usage.py +@@ -833,7 +833,10 @@ Commands: + call to stonith which will turn the node off instead of rebooting it) + + confirm +- Confirm that the host specified is currently down. ++ Confirm that the host specified is currently down. This command ++ should ONLY be used when the node specified has already been ++ confirmed to be down. ++ + WARNING: if this node is not actually down data corruption/cluster + failure can occur. + +-- +1.9.1 + diff --git a/SOURCES/bz1253289-fixed-session-and-cookies-processing.patch b/SOURCES/bz1253289-fixed-session-and-cookies-processing.patch deleted file mode 100644 index 1d11c0d..0000000 --- a/SOURCES/bz1253289-fixed-session-and-cookies-processing.patch +++ /dev/null @@ -1,185 +0,0 @@ -From e338fc2e3eab278aa012f058472ba34b98ae80a3 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Fri, 31 Jul 2015 17:34:46 +0200 -Subject: [PATCH] fixed session and cookies processing - ---- - pcsd/auth.rb | 24 +++++++++++------------- - pcsd/pcs.rb | 8 ++++---- - pcsd/pcsd.rb | 14 ++++++++++---- - pcsd/remote.rb | 2 +- - 4 files changed, 26 insertions(+), 22 deletions(-) - -diff --git a/pcsd/auth.rb b/pcsd/auth.rb -index 8953d60..05bfadf 100644 ---- a/pcsd/auth.rb -+++ b/pcsd/auth.rb -@@ -3,9 +3,8 @@ require 'pp' - require 'securerandom' - require 'rpam' - --class PCSAuth - # Ruby 1.8.7 doesn't implement SecureRandom.uuid -- def self.uuid -+ def pcsauth_uuid - if defined? SecureRandom.uuid - return SecureRandom.uuid - else -@@ -16,7 +15,7 @@ class PCSAuth - end - end - -- def self.validUser(username, password, generate_token = false, request = nil) -+ def pcsauth_validUser(username, password, generate_token = false, request = nil) - $logger.info("Attempting login by '#{username}'") - if not Rpam.auth(username,password, :service => "pcsd") - $logger.info("Failed login by '#{username}' (bad username or password)") -@@ -37,7 +36,7 @@ class PCSAuth - $logger.info("Successful login by '#{username}'") - - if generate_token -- token = PCSAuth.uuid -+ token = pcsauth_uuid - begin - password_file = File.open($user_pass_file, File::RDWR|File::CREAT) - password_file.flock(File::LOCK_EX) -@@ -57,7 +56,7 @@ class PCSAuth - return true - end - -- def self.validToken(token) -+ def pcsauth_validToken(token) - begin - json = File.read($user_pass_file) - users = JSON.parse(json) -@@ -73,10 +72,10 @@ class PCSAuth - return false - end - -- def self.isLoggedIn(session, cookies) -- if username = validToken(cookies["token"]) -- if username == "hacluster" and $cookies.key?(:CIB_user) and $cookies.key?(:CIB_user) != "" -- $session[:username] = $cookies[:CIB_user] -+ def pcsauth_isLoggedIn(session, cookies) -+ if username = pcsauth_validToken(cookies["token"]) -+ if username == "hacluster" and cookies.key?('CIB_user') and cookies['CIB_user'] != "" -+ session[:username] = cookies['CIB_user'] - end - return true - else -@@ -85,11 +84,11 @@ class PCSAuth - end - - # Always an admin until we implement groups -- def self.isAdmin(session) -+ def pcsauth_isAdmin(session) - true - end - -- def self.createUser(username, password) -+ def pcsauth_createUser(username, password) - begin - json = File.read($user_pass_file) - users = JSON.parse(json) -@@ -97,7 +96,7 @@ class PCSAuth - users = [] - end - -- token = PCSAuth.uuid -+ token = pcsauth_uuid - - users.delete_if{|u| u["username"] == username} - users << {"username" => username, "password" => password, "token" => token} -@@ -105,5 +104,4 @@ class PCSAuth - f.write(JSON.pretty_generate(users)) - end - end --end - -diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb -index 3fad833..d8e27b3 100644 ---- a/pcsd/pcs.rb -+++ b/pcsd/pcs.rb -@@ -306,7 +306,7 @@ def send_request_with_token(node, request, post=false, data={}, remote=true, raw - req.set_form_data(data) - end - cookies_to_send = [CGI::Cookie.new("name" => 'token', "value" => token).to_s] -- cookies_to_send << CGI::Cookie.new("name" => "CIB_user", "value" => $session[:username].to_s).to_s -+ cookies_to_send << CGI::Cookie.new("name" => "CIB_user", "value" => get_session()[:username].to_s).to_s - req.add_field("Cookie",cookies_to_send.join(";")) - myhttp = Net::HTTP.new(uri.host, uri.port) - myhttp.use_ssl = true -@@ -623,10 +623,10 @@ def run_cmd(*args) - start = Time.now - out = "" - errout = "" -- if $session[:username] == "hacluster" -- ENV['CIB_user'] = $cookies[:CIB_user] -+ if get_session()[:username] == "hacluster" -+ ENV['CIB_user'] = get_cookies()[:CIB_user] - else -- ENV['CIB_user'] = $session[:username] -+ ENV['CIB_user'] = get_session()[:username] - end - $logger.debug("CIB USER: #{ENV['CIB_user'].to_s}") - status = Open4::popen4(*args) do |pid, stdin, stdout, stderr| -diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb -index 5a0928a..2633360 100644 ---- a/pcsd/pcsd.rb -+++ b/pcsd/pcsd.rb -@@ -49,8 +49,6 @@ also_reload 'auth.rb' - also_reload 'wizard.rb' - - before do -- $session = session -- $cookies = cookies - if request.path != '/login' and not request.path == "/logout" and not request.path == '/remote/auth' - protected! - end -@@ -117,7 +115,7 @@ set :run, false - - helpers do - def protected! -- if not PCSAuth.isLoggedIn(session, request.cookies) -+ if not pcsauth_isLoggedIn(session, request.cookies) - # If we're on /managec//main we redirect - match_expr = "/managec/(.*)/(.*)" - mymatch = request.path.match(match_expr) -@@ -198,7 +196,7 @@ if not DISABLE_GUI - end - - post '/login' do -- if PCSAuth.validUser(params['username'],params['password']) -+ if pcsauth_validUser(params['username'],params['password']) - session["username"] = params['username'] - # Temporarily ignore pre_login_path until we come up with a list of valid - # paths to redirect to (to prevent status_all issues) -@@ -741,4 +739,12 @@ helpers do - def h(text) - Rack::Utils.escape_html(text) - end -+ -+ def get_session -+ session -+ end -+ -+ def get_cookies -+ return cookies -+ end - end -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index b98e9a9..69142e4 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -628,7 +628,7 @@ def status_all(params, nodes = []) - end - - def auth(params,request) -- token = PCSAuth.validUser(params['username'],params['password'], true, request) -+ token = pcsauth_validUser(params['username'],params['password'], true, request) - # If we authorized to this machine, attempt to authorize everywhere - node_list = [] - if token and params["bidirectional"] --- -1.9.1 - diff --git a/SOURCES/bz1253293-fixed-command-injection-vulnerability.patch b/SOURCES/bz1253293-fixed-command-injection-vulnerability.patch deleted file mode 100644 index 4fc4142..0000000 --- a/SOURCES/bz1253293-fixed-command-injection-vulnerability.patch +++ /dev/null @@ -1,94 +0,0 @@ -From 6386c5826e7c57a2ab54933bfb9914346763ea27 Mon Sep 17 00:00:00 2001 -From: Tomas Jelinek -Date: Thu, 13 Aug 2015 15:17:24 +0200 -Subject: [PATCH] fixed command injection vulnerability - ---- - pcsd/fenceagent.rb | 19 +++++++++++++------ - pcsd/remote.rb | 1 + - pcsd/resource.rb | 11 ++++++----- - 3 files changed, 20 insertions(+), 11 deletions(-) - -diff --git a/pcsd/fenceagent.rb b/pcsd/fenceagent.rb -index 8b37147..22efbf0 100644 ---- a/pcsd/fenceagent.rb -+++ b/pcsd/fenceagent.rb -@@ -18,12 +18,6 @@ def getFenceAgents(fence_agent = nil) - end - - def getFenceAgentMetadata(fenceagentname) -- # There are bugs in stonith_admin & the new fence_agents interaction -- # eventually we'll want to switch back to this, but for now we directly -- # call the agent to get metadata -- #metadata = `stonith_admin --metadata -a #{fenceagentname}` -- metadata = `/usr/sbin/#{fenceagentname} -o metadata` -- doc = REXML::Document.new(metadata) - options_required = {} - options_optional = {} - options_advanced = { -@@ -39,6 +33,19 @@ def getFenceAgentMetadata(fenceagentname) - options_advanced["pcmk_" + a + "_timeout"] = "" - options_advanced["pcmk_" + a + "_retries"] = "" - end -+ -+ # There are bugs in stonith_admin & the new fence_agents interaction -+ # eventually we'll want to switch back to this, but for now we directly -+ # call the agent to get metadata -+ #metadata = `stonith_admin --metadata -a #{fenceagentname}` -+ if not fenceagentname.start_with?('fence_') or fenceagentname.include?('/') -+ return [options_required, options_optional, options_advanced] -+ end -+ stdout, stderr, retval = run_cmd( -+ "/usr/sbin/#{fenceagentname}", '-o', 'metadata' -+ ) -+ doc = REXML::Document.new(stdout.join) -+ - doc.elements.each('resource-agent/parameters/parameter') { |param| - temp_array = [] - if param.elements["shortdesc"] -diff --git a/pcsd/remote.rb b/pcsd/remote.rb -index 69142e4..f8fde98 100644 ---- a/pcsd/remote.rb -+++ b/pcsd/remote.rb -@@ -874,6 +874,7 @@ def resource_metadata (params) - return 200 if not params[:resourcename] or params[:resourcename] == "" - resource_name = params[:resourcename][params[:resourcename].rindex(':')+1..-1] - class_provider = params[:resourcename][0,params[:resourcename].rindex(':')] -+ return [400, 'Invalid resource agent name'] if resource_name.include?('/') - - @resource = ResourceAgent.new(params[:resourcename]) - if class_provider == "ocf:heartbeat" -diff --git a/pcsd/resource.rb b/pcsd/resource.rb -index 1577e58..01b64fa 100644 ---- a/pcsd/resource.rb -+++ b/pcsd/resource.rb -@@ -103,11 +103,12 @@ def getResourceOptions(resource_id,stonith=false) - - ret = {} - if stonith -- resource_options = `#{PCS} stonith show #{resource_id}` -+ command = [PCS, 'stonith', 'show', resource_id] - else -- resource_options = `#{PCS} resource show #{resource_id}` -+ command = [PCS, 'resource', 'show', resource_id] - end -- resource_options.each_line { |line| -+ stdout, stderr, retval = run_cmd(*command) -+ stdout.each { |line| - keyval = line.strip.split(/: /,2) - if keyval[0] == "Attributes" then - options = keyval[1].split(/ /) -@@ -281,8 +282,8 @@ end - - def getResourceMetadata(resourcepath) - ENV['OCF_ROOT'] = OCF_ROOT -- metadata = `#{resourcepath} meta-data` -- doc = REXML::Document.new(metadata) -+ stdout, stderr, retval = run_cmd(resourcepath, 'meta-data') -+ doc = REXML::Document.new(stdout.join) - options_required = {} - options_optional = {} - long_desc = "" --- -1.9.1 - diff --git a/SOURCES/bz1253294-01-fixed-command-injection-vulnerability.patch b/SOURCES/bz1253294-01-fixed-command-injection-vulnerability.patch new file mode 100644 index 0000000..1b6aa4f --- /dev/null +++ b/SOURCES/bz1253294-01-fixed-command-injection-vulnerability.patch @@ -0,0 +1,259 @@ +From b47f6196aaf405f17197d4bb312d94ec84042343 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Tue, 25 Aug 2015 16:46:46 +0200 +Subject: [PATCH] fixed command injection vulnerability + +--- + pcsd/fenceagent.rb | 53 ++++++++++++++++++++++++++++++++++------------------- + pcsd/pcsd.rb | 6 +++--- + pcsd/remote.rb | 18 +++++++++--------- + pcsd/resource.rb | 27 +++++++++++++++++++++++---- + 4 files changed, 69 insertions(+), 35 deletions(-) + +diff --git a/pcsd/fenceagent.rb b/pcsd/fenceagent.rb +index b7674fd..b52ad6f 100644 +--- a/pcsd/fenceagent.rb ++++ b/pcsd/fenceagent.rb +@@ -1,4 +1,4 @@ +-def getFenceAgents(fence_agent = nil) ++def getFenceAgents(session, fence_agent = nil) + fence_agent_list = {} + agents = Dir.glob('/usr/sbin/fence_' + '*') + agents.each { |a| +@@ -7,7 +7,7 @@ def getFenceAgents(fence_agent = nil) + next if fa.name == "fence_ack_manual" + + if fence_agent and a.sub(/.*\//,"") == fence_agent.sub(/.*:/,"") +- required_options, optional_options, advanced_options, info = getFenceAgentMetadata(fa.name) ++ required_options, optional_options, advanced_options, info = getFenceAgentMetadata(session, fa.name) + fa.required_options = required_options + fa.optional_options = optional_options + fa.advanced_options = advanced_options +@@ -18,13 +18,42 @@ def getFenceAgents(fence_agent = nil) + fence_agent_list + end + +-def getFenceAgentMetadata(fenceagentname) ++def getFenceAgentMetadata(session, fenceagentname) ++ options_required = {} ++ options_optional = {} ++ options_advanced = { ++ "priority" => "", ++ "pcmk_host_argument" => "", ++ "pcmk_host_map" => "", ++ "pcmk_host_list" => "", ++ "pcmk_host_check" => "" ++ } ++ for a in ["reboot", "list", "status", "monitor", "off"] ++ options_advanced["pcmk_" + a + "_action"] = "" ++ options_advanced["pcmk_" + a + "_timeout"] = "" ++ options_advanced["pcmk_" + a + "_retries"] = "" ++ end ++ + # There are bugs in stonith_admin & the new fence_agents interaction + # eventually we'll want to switch back to this, but for now we directly + # call the agent to get metadata + #metadata = `stonith_admin --metadata -a #{fenceagentname}` +- metadata = `/usr/sbin/#{fenceagentname} -o metadata` +- doc = REXML::Document.new(metadata) ++ if not fenceagentname.start_with?('fence_') or fenceagentname.include?('/') ++ $logger.error "Invalid fence agent '#{fenceagentname}'" ++ return [options_required, options_optional, options_advanced] ++ end ++ stdout, stderr, retval = run_cmd( ++ session, "/usr/sbin/#{fenceagentname}", '-o', 'metadata' ++ ) ++ metadata = stdout.join ++ begin ++ doc = REXML::Document.new(metadata) ++ rescue REXML::ParseException => e ++ $logger.error( ++ "Unable to parse metadata of fence agent '#{resourcepath}': #{e}" ++ ) ++ return [options_required, options_optional, options_advanced] ++ end + + short_desc = "" + long_desc = "" +@@ -40,20 +69,6 @@ def getFenceAgentMetadata(fenceagentname) + long_desc = ld.text ? ld.text.strip : ld.text + } + +- options_required = {} +- options_optional = {} +- options_advanced = { +- "priority" => "", +- "pcmk_host_argument" => "", +- "pcmk_host_map" => "", +- "pcmk_host_list" => "", +- "pcmk_host_check" => "" +- } +- for a in ["reboot", "list", "status", "monitor", "off"] +- options_advanced["pcmk_" + a + "_action"] = "" +- options_advanced["pcmk_" + a + "_timeout"] = "" +- options_advanced["pcmk_" + a + "_retries"] = "" +- end + doc.elements.each('resource-agent/parameters/parameter') { |param| + temp_array = [] + if param.elements["shortdesc"] +diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb +index e4b4c25..1f26fe5 100644 +--- a/pcsd/pcsd.rb ++++ b/pcsd/pcsd.rb +@@ -401,7 +401,7 @@ if not DISABLE_GUI + + if @resources.length == 0 + @cur_resource = nil +- @resource_agents = getFenceAgents() ++ @resource_agents = getFenceAgents(session) + else + @cur_resource = @resources[0] + if params[:fencedevice] +@@ -413,7 +413,7 @@ if not DISABLE_GUI + end + end + @cur_resource.options = getResourceOptions(session, @cur_resource.id) +- @resource_agents = getFenceAgents(@cur_resource.agentname) ++ @resource_agents = getFenceAgents(session, @cur_resource.agentname) + end + erb :fencedevices, :layout => :main + end +@@ -477,7 +477,7 @@ if not DISABLE_GUI + # } + # } + @resource_agents = getResourceAgents(session) +- @stonith_agents = getFenceAgents() ++ @stonith_agents = getFenceAgents(session) + # @nodes = @nodes.sort_by{|k,v|k} + erb :nodes, :layout => :main + end +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index cb5b176..4655756 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -1370,11 +1370,11 @@ def resource_form(params, request, session) + @cur_resource_ms = @cur_resource.get_master + @resource = ResourceAgent.new(@cur_resource.agentname) + if @cur_resource.provider == 'heartbeat' +- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(HEARTBEAT_AGENTS_DIR + @cur_resource.type) ++ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, HEARTBEAT_AGENTS_DIR + @cur_resource.type) + elsif @cur_resource.provider == 'pacemaker' +- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(PACEMAKER_AGENTS_DIR + @cur_resource.type) ++ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, PACEMAKER_AGENTS_DIR + @cur_resource.type) + elsif @cur_resource._class == 'nagios' +- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(NAGIOS_METADATA_DIR + @cur_resource.type + '.xml') ++ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, NAGIOS_METADATA_DIR + @cur_resource.type + '.xml') + end + @existing_resource = true + if @resource +@@ -1395,7 +1395,7 @@ def fence_device_form(params, request, session) + @cur_resource = get_resource_by_id(params[:resource], get_cib_dom(session)) + + if @cur_resource.instance_of?(ClusterEntity::Primitive) and @cur_resource.stonith +- @resource_agents = getFenceAgents(@cur_resource.agentname) ++ @resource_agents = getFenceAgents(session, @cur_resource.agentname) + @existing_resource = true + @fenceagent = @resource_agents[@cur_resource.type] + erb :fenceagentform +@@ -1531,7 +1531,7 @@ def get_avail_fence_agents(params, request, session) + if not allowed_for_local_cluster(session, Permissions::READ) + return 403, 'Permission denied' + end +- agents = getFenceAgents() ++ agents = getFenceAgents(session) + return JSON.generate(agents) + end + +@@ -1545,11 +1545,11 @@ def resource_metadata(params, request, session) + + @resource = ResourceAgent.new(params[:resourcename]) + if class_provider == "ocf:heartbeat" +- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(HEARTBEAT_AGENTS_DIR + resource_name) ++ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, HEARTBEAT_AGENTS_DIR + resource_name) + elsif class_provider == "ocf:pacemaker" +- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(PACEMAKER_AGENTS_DIR + resource_name) ++ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, PACEMAKER_AGENTS_DIR + resource_name) + elsif class_provider == 'nagios' +- @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(NAGIOS_METADATA_DIR + resource_name + '.xml') ++ @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, NAGIOS_METADATA_DIR + resource_name + '.xml') + end + @new_resource = params[:new] + @resources, @groups = getResourcesGroups(session) +@@ -1563,7 +1563,7 @@ def fence_device_metadata(params, request, session) + end + return 200 if not params[:resourcename] or params[:resourcename] == "" + @fenceagent = FenceAgent.new(params[:resourcename]) +- @fenceagent.required_options, @fenceagent.optional_options, @fenceagent.advanced_options, @fenceagent.info = getFenceAgentMetadata(params[:resourcename]) ++ @fenceagent.required_options, @fenceagent.optional_options, @fenceagent.advanced_options, @fenceagent.info = getFenceAgentMetadata(session, params[:resourcename]) + @new_fenceagent = params[:new] + + erb :fenceagentform +diff --git a/pcsd/resource.rb b/pcsd/resource.rb +index c6b513b..6f8f7fe 100644 +--- a/pcsd/resource.rb ++++ b/pcsd/resource.rb +@@ -1,4 +1,5 @@ + require 'pp' ++require 'pathname' + + def getResourcesGroups(session, get_fence_devices = false, get_all_options = false, + get_operations=false +@@ -302,12 +303,24 @@ def getColocationConstraints(session, resource_id) + return together,apart + end + +-def getResourceMetadata(resourcepath) ++def getResourceMetadata(session, resourcepath) + options_required = {} + options_optional = {} + long_desc = "" + short_desc = "" + ++ resourcepath = Pathname.new(resourcepath).cleanpath.to_s ++ resource_dirs = [ ++ HEARTBEAT_AGENTS_DIR, PACEMAKER_AGENTS_DIR, NAGIOS_METADATA_DIR, ++ ] ++ if not resource_dirs.any? { |allowed| resourcepath.start_with?(allowed) } ++ $logger.error( ++ "Unable to get metadata of resource agent '#{resourcepath}': " + ++ 'path not allowed' ++ ) ++ return [options_required, options_optional, [short_desc, long_desc]] ++ end ++ + if resourcepath.end_with?('.xml') + begin + metadata = IO.read(resourcepath) +@@ -316,12 +329,16 @@ def getResourceMetadata(resourcepath) + end + else + ENV['OCF_ROOT'] = OCF_ROOT +- metadata = `#{resourcepath} meta-data` ++ stdout, stderr, retval = run_cmd(session, resourcepath, 'meta-data') ++ metadata = stdout.join + end + + begin + doc = REXML::Document.new(metadata) +- rescue REXML::ParseException ++ rescue REXML::ParseException => e ++ $logger.error( ++ "Unable to parse metadata of resource agent '#{resourcepath}': #{e}" ++ ) + return [options_required, options_optional, [short_desc, long_desc]] + end + +@@ -381,7 +398,9 @@ def getResourceAgents(session, resource_agent=nil) + if resource_agent and (a.start_with?("ocf:heartbeat:") or a.start_with?("ocf:pacemaker:")) + split_agent = ra.name.split(/:/) + path = OCF_ROOT + '/resource.d/' + split_agent[1] + "/" + split_agent[2] +- required_options, optional_options, resource_info = getResourceMetadata(path) ++ required_options, optional_options, resource_info = getResourceMetadata( ++ session, path ++ ) + ra.required_options = required_options + ra.optional_options = optional_options + ra.info = resource_info +-- +1.9.1 + diff --git a/SOURCES/bz1253491-01-fix-pcs-pcsd-path-detection.patch b/SOURCES/bz1253491-01-fix-pcs-pcsd-path-detection.patch new file mode 100644 index 0000000..3483ad3 --- /dev/null +++ b/SOURCES/bz1253491-01-fix-pcs-pcsd-path-detection.patch @@ -0,0 +1,46 @@ +From 7323d4fb2454d65bb26839fd6fb4809d19258d34 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Tue, 25 Aug 2015 14:51:19 +0200 +Subject: [PATCH] fix pcs/pcsd path detection + +--- + pcs/utils.py | 2 +- + pcsd/bootstrap.rb | 4 +++- + 2 files changed, 4 insertions(+), 2 deletions(-) + +diff --git a/pcs/utils.py b/pcs/utils.py +index cd33a27..761723b 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -736,7 +736,7 @@ def run_pcsdcli(command, data=None): + env_var = dict() + if "--debug" in pcs_options: + env_var["PCSD_DEBUG"] = "true" +- pcs_dir = os.path.dirname(sys.argv[0]) ++ pcs_dir = os.path.realpath(os.path.dirname(sys.argv[0])) + if pcs_dir == "/usr/sbin": + pcsd_dir_path = settings.pcsd_exec_location + else: +diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb +index 07a7d27..64c3b98 100644 +--- a/pcsd/bootstrap.rb ++++ b/pcsd/bootstrap.rb +@@ -1,4 +1,5 @@ + require 'logger' ++require 'pathname' + + require 'settings.rb' + +@@ -32,7 +33,8 @@ def is_systemctl() + end + + def get_pcs_path(pcsd_path) +- if PCSD_EXEC_LOCATION == pcsd_path or PCSD_EXEC_LOCATION == (pcsd_path + '/') ++ real_path = Pathname.new(pcsd_path).realpath.to_s ++ if PCSD_EXEC_LOCATION == real_path or PCSD_EXEC_LOCATION == (real_path + '/') + return '/usr/sbin/pcs' + else + return '../pcs/pcs' +-- +1.9.1 + diff --git a/SOURCES/bz1257369-01-always-print-output-of-crm_resource-cleanup.patch b/SOURCES/bz1257369-01-always-print-output-of-crm_resource-cleanup.patch new file mode 100644 index 0000000..a3c5cec --- /dev/null +++ b/SOURCES/bz1257369-01-always-print-output-of-crm_resource-cleanup.patch @@ -0,0 +1,33 @@ +From 122c7b6b5d31fdc0cf997aeb01252fb4c8801da5 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Fri, 4 Sep 2015 17:12:27 +0200 +Subject: [PATCH] always print output of crm_resource --cleanup + +--- + pcs/resource.py | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/pcs/resource.py b/pcs/resource.py +index 2dcddc3..be1f1ba 100644 +--- a/pcs/resource.py ++++ b/pcs/resource.py +@@ -2559,14 +2559,14 @@ def resource_cleanup(res_id): + if retval != 0: + utils.err("Unable to cleanup resource: %s" % res_id + "\n" + output) + else: +- print "Resource: %s successfully cleaned up" % res_id ++ print output + + def resource_cleanup_all(): + (output, retval) = utils.run(["crm_resource", "-C"]) + if retval != 0: + utils.err("Unexpected error occured. 'crm_resource -C' err_code: %s\n%s" % (retval, output)) + else: +- print "All resources/stonith devices successfully cleaned up" ++ print output + + def resource_history(args): + dom = utils.get_cib_dom() +-- +1.9.1 + diff --git a/SOURCES/bz1258619-01-fix-ruby-traceback-on-pcsd-startup.patch b/SOURCES/bz1258619-01-fix-ruby-traceback-on-pcsd-startup.patch new file mode 100644 index 0000000..ae03878 --- /dev/null +++ b/SOURCES/bz1258619-01-fix-ruby-traceback-on-pcsd-startup.patch @@ -0,0 +1,37 @@ +commit 4d4ad9fc870998f4e70256ef62371f38da3a4855 +Author: Chris Feist +AuthorDate: Mon Aug 31 15:13:46 2015 -0500 +Commit: Chris Feist +CommitDate: Mon Aug 31 15:13:46 2015 -0500 + + Fix tracebacks during pcsd shutdowns + +diff --git a/pcsd/ssl.rb b/pcsd/ssl.rb +index e948aef..97d131e 100644 +--- a/pcsd/ssl.rb ++++ b/pcsd/ssl.rb +@@ -67,14 +67,20 @@ end + server = ::Rack::Handler::WEBrick + trap(:INT) do + puts "Shutting down (INT)" +- server.shutdown +- #exit ++ if server.instance_variable_get("@server") ++ server.shutdown ++ else ++ exit ++ end + end + + trap(:TERM) do + puts "Shutting down (TERM)" +- server.shutdown +- #exit ++ if server.instance_variable_get("@server") ++ server.shutdown ++ else ++ exit ++ end + end + + require 'pcsd' diff --git a/SOURCES/bz1265425-01-Fix-for-crm_node-l-output-change.patch b/SOURCES/bz1265425-01-Fix-for-crm_node-l-output-change.patch new file mode 100644 index 0000000..9b3a454 --- /dev/null +++ b/SOURCES/bz1265425-01-Fix-for-crm_node-l-output-change.patch @@ -0,0 +1,30 @@ +From 2d28901bb2eac1329e935b0d7f8418a27c0b0067 Mon Sep 17 00:00:00 2001 +From: Chris Feist +Date: Tue, 22 Sep 2015 17:19:37 -0500 +Subject: [PATCH] Fix for crm_node -l output change + +- crm_node -l now outputs a status after the node id and node name we + now ignore lines where the 3rd field is "lost". +--- + pcs/utils.py | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/pcs/utils.py b/pcs/utils.py +index 0b8d03f..88362b3 100644 +--- a/pcs/utils.py ++++ b/pcs/utils.py +@@ -1706,8 +1706,9 @@ def getPacemakerNodesID(allow_failure=False): + + pm_nodes = {} + for line in output.rstrip().split("\n"): +- node_info = line.rstrip().split(" ",1) +- pm_nodes[node_info[0]] = node_info[1] ++ node_info = line.rstrip().split(" ") ++ if len(node_info) <= 2 or node_info[2] != "lost": ++ pm_nodes[node_info[0]] = node_info[1] + + return pm_nodes + +-- +1.9.1 + diff --git a/SOURCES/bz1268801-Fixed-issue-with-resource-manage-not-removing-meta-a.patch b/SOURCES/bz1268801-Fixed-issue-with-resource-manage-not-removing-meta-a.patch new file mode 100644 index 0000000..72a7b03 --- /dev/null +++ b/SOURCES/bz1268801-Fixed-issue-with-resource-manage-not-removing-meta-a.patch @@ -0,0 +1,33 @@ +From fedadee0788fc4841bd4b2df03cabd35c57d0f2d Mon Sep 17 00:00:00 2001 +From: Chris Feist +Date: Tue, 6 Oct 2015 15:54:25 -0500 +Subject: [PATCH] Fixed issue with 'resource manage' not removing meta + attribute from clones or masters + +--- + pcs/resource.py | 8 +++- + pcs/test/test_resource.py | 108 ++++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 114 insertions(+), 2 deletions(-) + +diff --git a/pcs/resource.py b/pcs/resource.py +index 4c4b8ee..e50e20b 100644 +--- a/pcs/resource.py ++++ b/pcs/resource.py +@@ -2349,11 +2349,15 @@ def resource_manage(argv, set_managed): + if retval != 0: + utils.err("error attempting to unmanage resource: %s" % output) + else: +- xpath = "(//primitive|//group)[@id='"+resource+"']/meta_attributes/nvpair[@name='is-managed']" ++ # Remove the meta attribute from the id specified ++ xpath = "(//primitive|//group|//clone|//master)[@id='"+resource+"']/meta_attributes/nvpair[@name='is-managed']" ++ utils.run(["cibadmin", "-D", "--xpath", xpath]) ++ # Remove the meta attribute from the parent of the id specified, if the parent is a clone or master ++ xpath = "(//master|//clone)[primitive[contains(@id, '"+resource+"')]]/meta_attributes/nvpair[@name='is-managed']" + utils.run(["cibadmin", "-D", "--xpath", xpath]) + if isGroup: + for res in res_to_manage: +- xpath = "(//primitive|//group)[@id='"+res+"']/meta_attributes/nvpair[@name='is-managed']" ++ xpath = "(//primitive|//group|//clone|//master)[@id='"+res+"']/meta_attributes/nvpair[@name='is-managed']" + utils.run(["cibadmin", "-D", "--xpath", xpath]) + + def is_managed(resource_id): diff --git a/SOURCES/bz1268801-Fixes-for-managing-special-cases-of-unmanaged-resour-2.patch b/SOURCES/bz1268801-Fixes-for-managing-special-cases-of-unmanaged-resour-2.patch new file mode 100644 index 0000000..9577232 --- /dev/null +++ b/SOURCES/bz1268801-Fixes-for-managing-special-cases-of-unmanaged-resour-2.patch @@ -0,0 +1,26 @@ +From ecef0b8d8be8f20225351b5af4448104937b2fea Mon Sep 17 00:00:00 2001 +From: Chris Feist +Date: Wed, 7 Oct 2015 17:54:13 -0500 +Subject: [PATCH] Remove all is-managed meta attributes when managing a + resource + +--- + pcs/resource.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/pcs/resource.py b/pcs/resource.py +index 7f2d4c3..c1c5f50 100644 +--- a/pcs/resource.py ++++ b/pcs/resource.py +@@ -2351,7 +2351,7 @@ def resource_manage(argv, set_managed): + else: + # Remove the meta attribute from the id specified (and all children) + xpath = "(//primitive|//group|//clone|//master)[@id='"+resource+"']//meta_attributes/nvpair[@name='is-managed']" +- utils.run(["cibadmin", "-D", "--xpath", xpath]) ++ utils.run(["cibadmin", "-d", "--xpath", xpath, "--force"]) + # Remove the meta attribute from the parent of the id specified, if the parent is a clone or master + xpath = "(//master|//clone)[(group|primitive)[@id='"+resource+"']]/meta_attributes/nvpair[@name='is-managed']" + utils.run(["cibadmin", "-D", "--xpath", xpath]) +-- +2.4.3 + diff --git a/SOURCES/bz1268801-Fixes-for-managing-special-cases-of-unmanaged-resour.patch b/SOURCES/bz1268801-Fixes-for-managing-special-cases-of-unmanaged-resour.patch new file mode 100644 index 0000000..ccb6efd --- /dev/null +++ b/SOURCES/bz1268801-Fixes-for-managing-special-cases-of-unmanaged-resour.patch @@ -0,0 +1,31 @@ +From 82f9be59e55c5e5fa01242fb7da59e3610da3674 Mon Sep 17 00:00:00 2001 +From: Chris Feist +Date: Wed, 7 Oct 2015 09:11:43 -0500 +Subject: [PATCH] Fixes for managing special cases of unmanaged resources + +--- + pcs/resource.py | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/pcs/resource.py b/pcs/resource.py +index e50e20b..7f2d4c3 100644 +--- a/pcs/resource.py ++++ b/pcs/resource.py +@@ -2349,11 +2349,11 @@ def resource_manage(argv, set_managed): + if retval != 0: + utils.err("error attempting to unmanage resource: %s" % output) + else: +- # Remove the meta attribute from the id specified +- xpath = "(//primitive|//group|//clone|//master)[@id='"+resource+"']/meta_attributes/nvpair[@name='is-managed']" ++ # Remove the meta attribute from the id specified (and all children) ++ xpath = "(//primitive|//group|//clone|//master)[@id='"+resource+"']//meta_attributes/nvpair[@name='is-managed']" + utils.run(["cibadmin", "-D", "--xpath", xpath]) + # Remove the meta attribute from the parent of the id specified, if the parent is a clone or master +- xpath = "(//master|//clone)[primitive[contains(@id, '"+resource+"')]]/meta_attributes/nvpair[@name='is-managed']" ++ xpath = "(//master|//clone)[(group|primitive)[@id='"+resource+"']]/meta_attributes/nvpair[@name='is-managed']" + utils.run(["cibadmin", "-D", "--xpath", xpath]) + if isGroup: + for res in res_to_manage: +-- +2.4.3 + diff --git a/SOURCES/bz1272412-01-fix-setting-cluster-properties-in-web-UI.patch b/SOURCES/bz1272412-01-fix-setting-cluster-properties-in-web-UI.patch new file mode 100644 index 0000000..a2e4508 --- /dev/null +++ b/SOURCES/bz1272412-01-fix-setting-cluster-properties-in-web-UI.patch @@ -0,0 +1,172 @@ +From 35bb4addbc04e8a8dea26aa2099d852ce084ec14 Mon Sep 17 00:00:00 2001 +From: Tomas Jelinek +Date: Wed, 21 Oct 2015 10:46:17 +0200 +Subject: [PATCH] fix setting cluster properties in web UI + +- set the properties correctly even if it is not possible to load the + properties' current values +- do not depend on cluster being imported in pcsd when loading the + properties +- fix loading default values of cluster properties +--- + pcsd/pcsd.rb | 39 +++++++++++++++----------------- + pcsd/remote.rb | 11 ++++++--- + pcsd/settings.rb | 1 + + pcsd/settings.rb.i386-linux-gnu.debian | 1 + + pcsd/settings.rb.x86_64-linux-gnu.debian | 1 + + 5 files changed, 29 insertions(+), 24 deletions(-) + +diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb +index b7c2a49..c42abb8 100644 +--- a/pcsd/pcsd.rb ++++ b/pcsd/pcsd.rb +@@ -573,7 +573,7 @@ if not DISABLE_GUI + end + @resource_agents = get_resource_agents_avail(session) + @stonith_agents = get_stonith_agents_avail(session) +- @config_options = getConfigOptions2(session, @cluster_name) ++ @config_options = getConfigOptions2(session, @nodes) + + erb :nodes, :layout => :main + end +@@ -895,7 +895,7 @@ def getLocationDeps(session, cur_node) + [deps_allow, deps_disallow] + end + +-def getConfigOptions2(session, cluster_name) ++def getConfigOptions2(session, cluster_nodes) + config_options = {} + general_page = [] + # general_page << ConfigOption.new("Cluster Delay Time", "cluster-delay", "int", 4, "Seconds") +@@ -933,7 +933,7 @@ If checked, the cluster will refuse to start resources unless one or more STONIT + allconfigoptions = [] + config_options.each { |i,k| k.each { |j| allconfigoptions << j } } + ConfigOption.getDefaultValues(allconfigoptions) +- ConfigOption.loadValues(session, allconfigoptions, cluster_name) ++ ConfigOption.loadValues(session, allconfigoptions, cluster_nodes) + return config_options + end + +@@ -1005,16 +1005,8 @@ class ConfigOption + @desc = desc + end + +- def self.loadValues(session, cos, cluster_name, node_list=nil) +- if node_list +- code, output = send_nodes_request_with_token( +- session, node_list, "get_cib" +- ) +- else +- code, output = send_cluster_request_with_token( +- session, cluster_name, "get_cib" +- ) +- end ++ def self.loadValues(session, cos, node_list) ++ code, output = send_nodes_request_with_token(session, node_list, "get_cib") + $logger.info(code) + if code != 200 + $logger.info "Error: unable to load cib" +@@ -1037,14 +1029,19 @@ class ConfigOption + end + + def self.getDefaultValues(cos) +- metadata = `#{PENGINE} metadata` +- doc = REXML::Document.new(metadata) +- +- cos.each { |co| +- doc.elements.each("resource-agent/parameters/parameter[@name='#{co.configname}']/content") { |e| +- co.default = e.attributes["default"] +- break +- } ++ [PENGINE, CIB_BINARY].each { |command| ++ metadata = `#{command} metadata` ++ begin ++ doc = REXML::Document.new(metadata) ++ cos.each { |co| ++ doc.elements.each("resource-agent/parameters/parameter[@name='#{co.configname}']/content") { |e| ++ co.default = e.attributes["default"] ++ break ++ } ++ } ++ rescue ++ $logger.error("Failed to parse #{command} metadata") ++ end + } + end + +diff --git a/pcsd/remote.rb b/pcsd/remote.rb +index e65c8ac..dc90fc9 100644 +--- a/pcsd/remote.rb ++++ b/pcsd/remote.rb +@@ -1921,9 +1921,13 @@ def update_cluster_settings(params, request, session) + binary_settings = [] + changed_settings = [] + old_settings = {} +- getConfigOptions2(PCSAuth.getSuperuserSession(), $cluster_name).values().flatten().each { |opt| ++ getConfigOptions2( ++ PCSAuth.getSuperuserSession(), get_nodes().flatten() ++ ).values().flatten().each { |opt| ++ binary_settings << opt.configname if "check" == opt.type ++ # if we don't know current value of an option, consider it changed ++ next if opt.value.nil? + if "check" == opt.type +- binary_settings << opt.configname + old_settings[opt.configname] = is_cib_true(opt.value) + else + old_settings[opt.configname] = opt.value +@@ -1931,6 +1935,7 @@ def update_cluster_settings(params, request, session) + } + settings.each { |key, val| + new_val = binary_settings.include?(key) ? is_cib_true(val) : val ++ # if we don't know current value of an option, consider it changed + if (not old_settings.key?(key)) or (old_settings[key] != new_val) + changed_settings << key.downcase() + end +@@ -1940,7 +1945,7 @@ def update_cluster_settings(params, request, session) + return 403, 'Permission denied' + end + end +- if changed_settings.count { |x| x != 'enable-acl'} > 0 ++ if changed_settings.count { |x| x != 'enable-acl' } > 0 + if not allowed_for_local_cluster(session, Permissions::WRITE) + return 403, 'Permission denied' + end +diff --git a/pcsd/settings.rb b/pcsd/settings.rb +index 4cea800..ff056a4 100644 +--- a/pcsd/settings.rb ++++ b/pcsd/settings.rb +@@ -10,6 +10,7 @@ HEARTBEAT_AGENTS_DIR = "/usr/lib/ocf/resource.d/heartbeat/" + PACEMAKER_AGENTS_DIR = "/usr/lib/ocf/resource.d/pacemaker/" + NAGIOS_METADATA_DIR = '/usr/share/pacemaker/nagios/plugins-metadata/' + PENGINE = "/usr/libexec/pacemaker/pengine" ++CIB_BINARY = '/usr/libexec/pacemaker/cib' + CRM_MON = "/usr/sbin/crm_mon" + CRM_NODE = "/usr/sbin/crm_node" + CRM_ATTRIBUTE = "/usr/sbin/crm_attribute" +diff --git a/pcsd/settings.rb.i386-linux-gnu.debian b/pcsd/settings.rb.i386-linux-gnu.debian +index 6366651..4db23e4 100644 +--- a/pcsd/settings.rb.i386-linux-gnu.debian ++++ b/pcsd/settings.rb.i386-linux-gnu.debian +@@ -7,6 +7,7 @@ OCF_ROOT = "/usr/lib/ocf" + HEARTBEAT_AGENTS_DIR = "/usr/lib/ocf/resource.d/heartbeat/" + PACEMAKER_AGENTS_DIR = "/usr/lib/ocf/resource.d/pacemaker/" + PENGINE = "/usr/lib/i386-linux-gnu/pacemaker/pengine" ++CIB_BINARY = '/usr/lib/i386-linux-gnu/pacemaker/cib' + CRM_NODE = "/usr/sbin/crm_node" + CRM_ATTRIBUTE = "/usr/sbin/crm_attribute" + COROSYNC_BINARIES = "/usr/sbin/" +diff --git a/pcsd/settings.rb.x86_64-linux-gnu.debian b/pcsd/settings.rb.x86_64-linux-gnu.debian +index 23a71ab..3f6d5c0 100644 +--- a/pcsd/settings.rb.x86_64-linux-gnu.debian ++++ b/pcsd/settings.rb.x86_64-linux-gnu.debian +@@ -7,6 +7,7 @@ OCF_ROOT = "/usr/lib/ocf" + HEARTBEAT_AGENTS_DIR = "/usr/lib/ocf/resource.d/heartbeat/" + PACEMAKER_AGENTS_DIR = "/usr/lib/ocf/resource.d/pacemaker/" + PENGINE = "/usr/lib/x86_64-linux-gnu/pacemaker/pengine" ++CIB_BINARY = '/usr/lib/x86_64-linux-gnu/pacemaker/cib' + CRM_NODE = "/usr/sbin/crm_node" + CRM_ATTRIBUTE = "/usr/sbin/crm_attribute" + COROSYNC_BINARIES = "/usr/sbin/" +-- +1.9.1 + diff --git a/SOURCES/secure-cookie.patch b/SOURCES/secure-cookie.patch deleted file mode 100644 index d7d802f..0000000 --- a/SOURCES/secure-cookie.patch +++ /dev/null @@ -1,22 +0,0 @@ ---- pcs-0.9.137/pcsd/pcsd.rb.secure_fix 2015-03-30 13:48:50.209887370 -0500 -+++ pcs-0.9.137/pcsd/pcsd.rb 2015-03-30 13:50:47.321660377 -0500 -@@ -32,7 +32,9 @@ end - - use Rack::Session::Cookie, - :expire_after => 60 * 60, -- :secret => secret -+ :secret => secret, -+ :secure => true, # only send over HTTPS -+ :httponly => true # don't provide to javascript - - #use Rack::SSL - -@@ -46,8 +48,6 @@ also_reload 'pcs.rb' - also_reload 'auth.rb' - also_reload 'wizard.rb' - --enable :sessions -- - before do - $session = session - $cookies = cookies diff --git a/SPECS/pcs.spec b/SPECS/pcs.spec index daf1e22..bb6ed76 100644 --- a/SPECS/pcs.spec +++ b/SPECS/pcs.spec @@ -1,130 +1,81 @@ -%bcond_without clufter -%if %{with clufter} -%{!?clufter_name: %global clufter_name clufter} -%{!?clufter_pkg_name:%global clufter_pkg_name python-clufter} -%{!?clufter_version: %global clufter_version 0.3.0} -%{!?clufter_source: %global clufter_source %{clufter_name}-%{clufter_version}} -%{!?clufter_script: %global clufter_script %{_libexecdir}/%{clufter_name}} -%{!?clufter_bashcomp:%global clufter_bashcomp %{_sysconfdir}/bash_completion.d/%(basename "%{clufter_script}")} -%{!?clufter_check: %global clufter_check 1} - -%{!?clufter_ccs_flatten: %global clufter_ccs_flatten %{_libexecdir}/%{clufter_source}/ccs_flatten} -%{!?clufter_editor: %global clufter_editor %{_bindir}/nano} -%{!?clufter_ra_metadata_dir: %global clufter_ra_metadata_dir %{_datadir}/cluster} -%{!?clufter_ra_metadata_ext: %global clufter_ra_metadata_ext metadata} -%endif - Name: pcs -Version: 0.9.137 -Release: 13%{?dist}.4 +Version: 0.9.143 +Release: 15%{?dist} License: GPLv2 URL: http://github.com/feist/pcs Group: System Environment/Base #BuildArch: x86_64 BuildRequires: python2-devel Summary: Pacemaker Configuration System -Source0: http://people.redhat.com/cfeist/pcs/pcs-withgems-%{version}.tar.gz +Source0: https://tojeline.fedorapeople.org/pkgs/pcs/pcs-withgems-%{version}.tar.gz Source1: HAM-logo.png -Patch0: bz1115537-Improve-error-messages-for-scoped-cib-operations.patch -Patch1: bz1156311-Fix-waiting-for-resource-operations.patch -Patch2: bz1170150-Fix-displaying-globally-unique-clones-in-GUI.patch -Patch3: bz1054491-Fix-acl-add-duplicate-names-and-remove-roles-in-GUI.patch -Patch4: bz1179023-Added-support-for-resource-discovery-on-location-con.patch -Patch5: bz1054491-Delete-a-user-group-when-deleting-its-last-role-in-GUI.patch -Patch6: bz1179023-Added-support-for-resource-discovery-on-location-con-2.patch -Patch7: bz1054491-Add-acl-enable-and-disable-commands-3.patch -Patch8: bz1180390-Stop-deleted-resource-before-removing-its-constraint.patch -Patch9: bz1180506-stop-cluster-nodes-in-parallel.patch -Patch10: bz1180506-Warn-if-nodes-stop-will-cause-a-loss-of-the-quorum.patch -Patch11: bz1180506-3-Keep-cluster-quorate-during-destruction-as-long-as-possible.patch -Patch12: bz1205848-Do-not-set-two_node-in-corosync-if-auto_tie_breaker-is-on.patch -Patch13: secure-cookie.patch -Patch14: bz1218478-fix-cluster-property-name-validation.patch -Patch15: bz1253289-fixed-session-and-cookies-processing.patch -Patch16: bz1253293-fixed-command-injection-vulnerability.patch - -# NOTE: Source20 and Patch200+ belong to python-clufter +Patch0: bz1122818-01-fix-resource-relocation-of-globally-unique-clones.patch +Patch1: bz1158577-01-improve-logging-in-pcsd.patch +Patch2: bz1189857-01-fix-Add-Resource-form-in-web-UI.patch +Patch3: bz1235022-01-add-nagios-support-to-pcs-resource-list-and-web-UI.patch +Patch4: bz1122818-02-fix-resource-relocate-for-remote-nodes.patch +Patch5: bz1253491-01-fix-pcs-pcsd-path-detection.patch +Patch6: bz1253294-01-fixed-command-injection-vulnerability.patch +Patch7: bz1258619-01-fix-ruby-traceback-on-pcsd-startup.patch +Patch8: bz1158577-02-fix-certificates-syncing.patch +Patch9: bz1189857-02-fix-tree-view-of-resources-in-web-UI.patch +Patch10: bz1158566-01-fix-dashboard-in-web-UI.patch +Patch11: bz1189857-03-web-UI-prevents-running-update-multiple-times-at-onc.patch +Patch12: bz1189857-04-fix-constraints-removing-in-web-UI.patch +Patch13: bz1158571-01-web-UI-mark-unsaved-permissions-forms.patch +Patch14: bz1189857-05-remove-removing-constriants-from-client-side-javascr.patch +Patch15: bz1235022-02-fix-crash-when-missing-nagios-metadata.patch +Patch16: bz1158571-02-check-and-refresh-user-auth-info-upon-each-request.patch +Patch17: bz1257369-01-always-print-output-of-crm_resource-cleanup.patch +Patch18: bz1158566-02-fix-loading-cluster-status-for-web-UI.patch +Patch19: bz1158569-01-fixed-a-typo-in-an-error-message.patch +Patch20: bz1158571-03-fix-checking-user-s-group-membership.patch +Patch21: bz1188361-01-Make-port-parameter-of-fence-agents-optional.patch +Patch22: bz1158569-02-fix-authentication-in-web-UI.patch +Patch23: bz1158566-03-web-UI-multiple-fixes-in-the-dashboard.patch +Patch24: bz1198640-01-web-UI-allows-spaces-in-optional-arguments-when-crea.patch +Patch25: bz1189857-06-web-UI-fixes-in-nodes-resources-fence-devices.patch +Patch26: bz1245264-01-Added-more-detailed-warnings-for-pcs-stonith-confirm.patch +Patch27: bz1189857-07-web-UI-fixes.patch +Patch28: bz1265425-01-Fix-for-crm_node-l-output-change.patch +Patch29: bz1268801-Fixed-issue-with-resource-manage-not-removing-meta-a.patch +Patch30: bz1268801-Fixes-for-managing-special-cases-of-unmanaged-resour.patch +Patch31: bz1268801-Fixes-for-managing-special-cases-of-unmanaged-resour-2.patch +Patch32: bz1272412-01-fix-setting-cluster-properties-in-web-UI.patch BuildRequires: ruby >= 2.0.0 ruby-devel rubygems pam-devel git BuildRequires: systemd-units rubygem-bundler Requires(post): systemd Requires(preun): systemd Requires(postun): systemd -Requires: pacemaker-cli corosync ruby >= 2.0.0 pacemaker -%if %{with clufter} -Requires: %{clufter_pkg_name} -%endif +Requires: pacemaker-cli corosync ruby >= 2.0.0 pacemaker python-clufter +Requires: psmisc initscripts openssl + +Provides: bundled(rubygem-backports) = 3.6.4 +Provides: bundled(rubygem-eventmachine) = 1.0.7 +Provides: bundled(rubygem-monkey-lib) = 0.5.4 +Provides: bundled(rubygem-multi_json) = 1.11.1 +Provides: bundled(rubygem-open4) = 1.3.4 +Provides: bundled(rubygem-orderedhash) = 0.0.6 +Provides: bundled(rubygem-rack) = 1.6.4 +Provides: bundled(rubygem-rack-protection) = 1.5.3 +Provides: bundled(rubygem-rack-test) = 0.6.3 +Provides: bundled(rubygem-rpam-ruby19) = 1.2.1 +Provides: bundled(rubygem-sinatra) = 1.4.6 +Provides: bundled(rubygem-sinatra-contrib) = 1.4.4 +Provides: bundled(rubygem-sinatra-sugar) = 0.5.1 +Provides: bundled(rubygem-tilt) = 1.4.1 %description pcs is a corosync and pacemaker configuration tool. It permits users to easily view, modify and created pacemaker based clusters. - -# subpackage metadata begin -%if %{with clufter} -%package -n %{clufter_pkg_name} -Group: System Environment/Base -Summary: Tool/library for transforming/analyzing cluster configuration formats -License: GPLv2+ -URL: https://github.com/jnpkrn/%{clufter_name} -# clufter as such module ccs_flatten -BuildRequires: python-setuptools -%if %{clufter_check} -BuildRequires: python-lxml -%endif -Requires: python-lxml -# ccs_flatten -BuildRequires: libxml2-devel -Requires: libxml2 -# "extras" -Requires: %{clufter_editor} -Source20: https://people.redhat.com/jpokorny/pkgs/%{clufter_name}/%{clufter_source}.tar.gz - -%description -n %{clufter_pkg_name} -While primarily aimed at (CMAN,rgmanager)->(Corosync/CMAN,Pacemaker) cluster -stacks configuration conversion (as per RHEL trend), the command-filter-format -framework (capable of XSLT) offers also other uses through its plugin library. -# subpackage metadata end -%endif - - %prep -%if %{with clufter} -%autosetup -a20 -p1 -S git - -# for some esoteric reason, the line above has to be empty -ln -s "%{clufter_source}" "%{clufter_name}" -%else %autosetup -p1 -S git -# ditto as previous comment -%endif cp -f %SOURCE1 pcsd/public/images -%if %{with clufter} -pushd "%{clufter_name}" >/dev/null -%{__python} setup.py saveopts -f setup.cfg pkg_prepare \ - --ccs-flatten="%{clufter_ccs_flatten}" \ - --editor="%{clufter_editor}" \ - --ra-metadata-dir="%{clufter_ra_metadata_dir}" \ - --ra-metadata-ext="%{clufter_ra_metadata_ext}" -popd >/dev/null -%endif - %build -%if %{with clufter} -pushd "%{clufter_name}" >/dev/null -%{__python} setup.py build -%if "x%{clufter_script}" == "x" -%else -%if "x%{clufter_bashcomp}" == "x" -%else -./run-dev --completion-bash \ - | sed 's|run[-_]dev|%(basename %{clufter_bashcomp})|g' > .bashcomp -%endif -%endif -popd >/dev/null -%endif %install rm -rf $RPM_BUILD_ROOT @@ -136,63 +87,9 @@ chmod 755 $RPM_BUILD_ROOT/%{python_sitelib}/pcs/pcs.py # Temporary fix for ruby-2.0.0 and rpam #cp $RPM_BUILD_ROOT/usr/lib/pcsd/gemhome/gems/rpam-ruby19-1.2.1/ext/Rpam/rpam_ext.so $RPM_BUILD_ROOT/usr/lib/pcsd/gemhome/gems/rpam-ruby19-1.2.1/lib -%if %{with clufter} -pushd "%{clufter_name}" >/dev/null -# '--root' implies setuptools involves distutils to do old-style install -%{__python} setup.py install --skip-build --root "%{buildroot}" -%if "x%{clufter_script}" == "x" -%else -# %{_bindir}/%{clufter_name} should have been created -# by install_scripts of setuptools; this hiding from PATH is for TP only -%{__mkdir_p} "%{buildroot}$(dirname "%{clufter_script}")" -%{__mv} -- "%{buildroot}%{_bindir}/%{clufter_name}" "%{buildroot}%{clufter_script}" -%if "x%{clufter_bashcomp}" == "x" -%else -%{__mkdir_p} "$(dirname "%{clufter_bashcomp}")" -%{__install} -- .bashcomp "%{buildroot}%{clufter_bashcomp}" -%endif -%endif -%{__mkdir_p} "%{buildroot}%{_defaultdocdir}/%{clufter_source}" -%{__install} -m 644 -- gpl-2.0.txt doc/*.txt "%{buildroot}%{_defaultdocdir}/%{clufter_source}" -popd >/dev/null -%endif - -%check || : -%if %{with clufter} -%if %{clufter_check} -# just a basic sanity check -pushd "%{clufter_name}" >/dev/null -# we need to massage RA metadata files and PATH so the local run works -# XXX we could also inject buildroot's site_packages dir to PYTHONPATH -declare ret=0 ccs_flatten_dir="$(dirname "%{buildroot}%{clufter_ccs_flatten}")" - -ln -s "%{buildroot}%{clufter_ra_metadata_dir}"/*."%{clufter_ra_metadata_ext}" \ - "${ccs_flatten_dir}" -PATH="${PATH:+${PATH}:}$(dirname "%{buildroot}%{clufter_ccs_flatten}")" \ -./run-check -ret=$? -%{__rm} -f -- "${ccs_flatten_dir}"/*."%{clufter_ra_metadata_ext}" -popd >/dev/null -[ ${ret} = 0 ] || exit "${ret}" -%endif -%endif - %post %systemd_post pcsd.service -%if %{with clufter} -%post -n %{clufter_pkg_name} -%if "x%{clufter_bashcomp}" == "x" -%else -%if "x%{clufter_script}" == "x" -%{__python} -m %{clufter_name}.__main__ --completion-bash 2>/dev/null \ - | sed 's|%(basename "%{__python}") [-_]m ||g' > "%{clufter_bashcomp}" || : -%else -%{clufter_script} --completion-bash > "%{clufter_bashcomp}" 2>/dev/null || : -%endif -%endif -%endif - %preun %systemd_preun pcsd.service @@ -206,7 +103,6 @@ popd >/dev/null /usr/sbin/pcs /usr/lib/pcsd/* /usr/lib/pcsd/.bundle/config -/usr/lib/pcsd/.gitignore /usr/lib/systemd/system/pcsd.service /var/lib/pcsd /etc/pam.d/pcsd @@ -215,46 +111,164 @@ popd >/dev/null %dir /var/log/pcsd /etc/sysconfig/pcsd %{_mandir}/man8/pcs.* +%exclude /usr/lib/pcsd/*.debian %doc COPYING README -%if %{with clufter} -%files -n %{clufter_pkg_name} -%defattr(-,root,root,-) -%{python2_sitelib}/%{clufter_name} -%{python2_sitelib}/%{clufter_name}-%{clufter_version}-*.egg-info -%{clufter_ccs_flatten} -%{clufter_ra_metadata_dir} - -%if "x%{clufter_script}" == "x" -%else -%if "x%{clufter_bashcomp}" == "x" -%else -%verify(not size md5 mtime) %{clufter_bashcomp} -%endif -%{clufter_script} -%endif -%doc %{_defaultdocdir}/%{clufter_source}/* -%endif - - %changelog -* Fri Aug 14 2015 Tomas Jelinek - 0.9.137-13.el7_1.4 -- Fixed session and cookies processing +* Wed Oct 21 2015 Tomas Jelinek - 0.9.143-15 +- Fixed setting cluster properties in web UI +- Resolves: rhbz#1272412 + +* Wed Oct 07 2015 Chris Feist - 0.9.143-14 +- Fixed remaining issues when managing resources/groups/etc. that were + previously unmanaged +- Resolves: rhbz#1268801 + +* Tue Oct 06 2015 Chris Feist - 0.9.143-12 +- Fixed issue managing resources that were clones and had the unmanaged + meta attribute set under the clone/master +- Resolves: rhbz#1268801 + +* Wed Sep 23 2015 Tomas Jelinek - 0.9.143-11 +- Fix for crm_node -l output change +- Resolves: rhbz#1265425 + +* Tue Sep 22 2015 Tomas Jelinek - 0.9.143-10 +- Web UI fixes +- Added more detailed warnings for 'pcs stonith confirm' +- Resolves: rhbz#1189857 rhbz#1245264 + +* Wed Sep 16 2015 Tomas Jelinek - 0.9.143-9 +- Multiple fixes in web UI (dashboard, nodes, resources, fence devices) +- Fixed an authentication issue in web UI +- Port parameter of fence agents is now considered optional +- Resolves: rhbz#1158566 rhbz#1188361 rhbz#1189857 + +* Tue Sep 08 2015 Tomas Jelinek - 0.9.143-8 +- Fixes in loading cluster status for web UI +- Fixed checking user/group membership +- Fixed a typo in an error message +- Resolves: #rhbz1158566 #rhbz1158569 #rhbz1158571 + +* Mon Sep 07 2015 Tomas Jelinek - 0.9.143-7 +- Multiple fixes in web UI +- Fixed crash on missing nagios agents metadata +- Check user/group membership on each request +- Print output of crm_resource in pcs resource cleanup +- Resolves: #rhbz1158571 #rhbz1189857 #rhbz1235022 #rhbz1257369 + +* Tue Sep 01 2015 Tomas Jelinek - 0.9.143-6 +- Added missing dependency on openssl +- Resolves: #rhbz1158577 + +* Tue Sep 01 2015 Tomas Jelinek - 0.9.143-5 +- Fixed pcsd certificates synchronization +- Multiple fixes in web UI +- Resolves: #rhbz1158566 #rhbz1158577 #rhbz1189857 + +* Mon Aug 31 2015 Chris Feist - 0.9.143-4 +- Fixed issue causing traceback on pcsd stop +- Resolves: #rhbz#1258619 + +* Wed Aug 26 2015 Tomas Jelinek - 0.9.143-3 +- Fixed relocation of remote nodes to their optimal node +- Fixed pcs/pcsd path detection - Fixed command injection vulnerability -- Resolves: rhbz#1253289 rhbz#1253293 - -* Wed Jun 10 2015 Tomas Jelinek - 0.9.137-13.el7_1.3 -- Fixed cluster property name validation -- Resolves: rhbz#1229868 - -* Wed Apr 15 2015 Tomas Jelinek - 0.9.137-13.el7_1.2 +- Resolves: #rhbz1122818 #rhbz1253294 #rhbz1253491 + +* Fri Aug 14 2015 Tomas Jelinek - 0.9.143-2 +- Fixed relocation of unique clone resources to their optimal node +- Improved logging of node to node communication +- Fixed 'Add resource' form in web UI +- Fixed support for nagios agents +- Resolves: rhbz#1122818 rhbz#1158577 rhbz#1189857 rhbz#1235022 + +* Mon Aug 10 2015 Tomas Jelinek - 0.9.143-1 +- Added support for setting permissions for users and groups to clusters managed by web UI +- Resources are now displayed in a tree (clone, master/slave, group, primitive) in web UI +- Renamed 'pcs resource relocate clean' command to 'pcs resource relocate clear' +- Improved logging of config files synchronization +- Various fixes in Resources tab in web UI +- Added missing dependecy on initscripts to the spec file +- Fixed traceback when running 'pcs resource enable clvmd --wait' +- Resolves: rhbz#1122818 rhbz#1158571 rhbz#1158577 rhbz#1182119 rhbz#1189857 rhbz#1198640 rhbz#1219574 rhbz#1243579 rhbz#1247818 rhbz#1250720 + +* Fri Jul 10 2015 Chris Feist - 0.9.142-2 +- Cleaned up tarball + +* Thu Jul 09 2015 Chris Feist - 0.9.142-1 +- Rebase to latest upstream sources +- Added ability to set hostname when using IP address to create a cluster +- Added ability to clear out tokens with pcs pcsd clear-auth +- Added ability to use nagios agents +- Fixed issue with orphaned resources causing GUI to fail to work properly +- More dashboard fixes +- Synchronize files between pcsd instances in a cluster to allow for HA pcsd +- ACL role fixes for pcs/pcsd +- Resolves: rhbz#118310 rhbz#1207805 rhbz#1235022 rhbz#1198222 rhbz#1158566 rhbz#1158577 rhbz#1166160 + +* Tue Jun 23 2015 Tomas Jelinek - 0.9.141-1 +- Rebased to latest upstream packages +- Added a command to relocate resources to their preferred host +- Fixed the dashboard in web UI +- Configure corosync to log to a file +- Added warning when creating a duplicate resource operation +- Added support for debugging resource agents +- Do not automatically use --force when removing a resource using web UI +- Fixed pcsd communication when one of the nodes is not authenticated +- Updated ruby gems +- Spec file fixes +- Resolves: rhbz#1198265 rhbz#1122818 rhbz#1158566 rhbz#1163671 rhbz#1175400 rhbz#1185096 rhbz#1198274 rhbz#1213429 rhbz#1231987 rhbz#1232644 rhbz#1233574 + +* Wed Jun 03 2015 Tomas Jelinek - 0.9.140-1 +- Rebased to latest upstream packages +- Added a note to man page and help poiting to cluster properties description +- Fixed parsing of the corosync.conf file +- Fixed diferences between the 'pcs cluster status' and 'pcs status cluster' commands as one is documented to be an alias of the other +- Do not remove constraints referencing a group when removing a resource from the group +- Added dashboard showing status of clusters to web UI +- Added node authentication dialog to web UI +- Added synchronization of web UI configuration files across cluster nodes +- Fixed node authentication when one of the nodes is unreachable +- Fixed an error message in the 'pcs config restore' command if a node is not authenticated +- Fixed parsing of 'pcs acl role create' command's parameters +- Properly overwrite a tokens file if its contents is unparsable +- The 'pcs config' command now displays resources defaults and operations defaults +- Show a useful error message when attempting to add a duplicate fence level in web UI +- Added the require-all parameter to ordering constraints listing +- Fixed VirtualDomain resource removal when there are constraints for the resource +- Added a warning when removing a cluster node may cause a loss of the quorum +- Fixed an error when uncloning a non-cloned resource +- Fixed an error when removing a resource from a cloned group +- Fixed waiting for resource commands to finish +- Fixed 'pcs cluster start' and similar commands when run under a non-root account +- Fixed parsing of 'pcs constraint order set' command's parameters +- Fixed an error when creating a resource with an id which already exists +- Improved man page and help for the 'pcs resource move' and 'pcs resource ban' commands +- Fixed an error when referencing a non-existing acl role in 'pcs acl' commands +- Fixed an error when adding an invalid stonith level +- Fixed constraints removal and node standby / unstandby using remote web UI +- Fixed formatting of resource / fence agent description +- Fence agent description now contains information about the agent +- The 'pcs status --full' command now displays node attributes and migration summary +- Clufter moved to a standalone package +- Fixed pcsd communication when one of the nodes is not authenticated +- Fixed a timeout value in the fence_xvm agent form +- Fixed the 'pcs resource enable' command when working with clones and multi-state resources +- Resolves: rhbz#1198265 rhbz#1121791 rhbz#1134426 rhbz#1158491 rhbz#1158537 rhbz#1158566 rhbz#1158569 rhbz#1158577 rhbz#1163682 rhbz#1165803 rhbz#1166160 rhbz#1170205 rhbz#1176687 rhbz#1182793 rhbz#1182986 rhbz#1183752 rhbz#1186692 rhbz#1187320 rhbz#1187571 rhbz#1188571 rhbz#1196412 rhbz#1197758 rhbz#1199073 rhbz#1201452 rhbz#1202457 rhbz#1204880 rhbz#1205653 rhbz#1206214 rhbz#1206219 rhbz#1206223 rhbz#1212904 rhbz#1213429 rhbz#1215198 rhbz#1218979 + +* Tue Jun 02 2015 Tomas Jelinek - 0.9.137-16 +- Fixes cluster property name validation +- Resolves: rhbz#1218478 + +* Wed Apr 15 2015 Tomas Jelinek - 0.9.137-15 - Fixes issues with cookie signing in pcsd -- Resolves: rhbz#1211567 +- Resolves: rhbz#1211568 -* Thu Mar 26 2015 Tomas Jelinek - 0.9.137-13.el7_1.1 +* Mon Mar 09 2015 Tomas Jelinek - 0.9.137-14 - Do not set two_nodes=1 in corosync.conf when auto_tie_breaker=1 is set -- Resolves: rhbz#1205848 +- Resolves: rhbz#1197770 * Tue Jan 20 2015 Tomas Jelinek - 0.9.137-13 - Keep cluster quorate during destruction as long as possible