diff --git a/.gitignore b/.gitignore index ab4a36e..2aa3c88 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ SOURCES/nagios-agents-metadata-105ab8a.tar.gz -SOURCES/pacemaker-44eb2ddf8d4f8fc05256aae2abc9fbf3ae4d1fbc.tar.gz +SOURCES/pacemaker-e174ec8.tar.gz diff --git a/.pacemaker.metadata b/.pacemaker.metadata index db08ad9..713bb38 100644 --- a/.pacemaker.metadata +++ b/.pacemaker.metadata @@ -1,2 +1,2 @@ ea6c0a27fd0ae8ce02f84a11f08a0d79377041c3 SOURCES/nagios-agents-metadata-105ab8a.tar.gz -116bb67b5d40329efa75d7c06a3360e2b7d51413 SOURCES/pacemaker-44eb2ddf8d4f8fc05256aae2abc9fbf3ae4d1fbc.tar.gz +c0f38a765988fc742462ff1b7ecb4b219b61388f SOURCES/pacemaker-e174ec8.tar.gz diff --git a/SOURCES/0004-Fix-crm_resource-Correctly-check-if-a-resource-is-un.patch b/SOURCES/0004-Fix-crm_resource-Correctly-check-if-a-resource-is-un.patch deleted file mode 100644 index 1ef6a11..0000000 --- a/SOURCES/0004-Fix-crm_resource-Correctly-check-if-a-resource-is-un.patch +++ /dev/null @@ -1,82 +0,0 @@ -From: Andrew Beekhof -Date: Fri, 14 Aug 2015 09:43:32 +1000 -Subject: [PATCH] Fix: crm_resource: Correctly check if a resource is unmanaged - or has a target-role - -(cherry picked from commit 3ff29dbe2cab872b452c4580736d23d1f69736fa) ---- - tools/crm_resource.c | 2 +- - tools/crm_resource_runtime.c | 31 ++++++++++++++++++------------- - 2 files changed, 19 insertions(+), 14 deletions(-) - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 2fce3b7..156bbea 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -888,7 +888,7 @@ main(int argc, char **argv) - rsc = uber_parent(rsc); - } - -- crm_debug("Re-checking the state of %s on %s", rsc_id, host_uname); -+ crm_debug("Re-checking the state of %s for %s on %s", rsc->id, rsc_id, host_uname); - if(rsc) { - crmd_replies_needed = 0; - rc = cli_resource_delete(cib_conn, crmd_channel, host_uname, rsc, &data_set); -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index a270cbf..f260e19 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -616,35 +616,40 @@ cli_resource_delete(cib_t *cib_conn, crm_ipc_t * crmd_channel, const char *host_ - void - cli_resource_check(cib_t * cib_conn, resource_t *rsc) - { -- -+ int need_nl = 0; - char *role_s = NULL; - char *managed = NULL; - resource_t *parent = uber_parent(rsc); - -- find_resource_attr(cib_conn, XML_ATTR_ID, parent->id, -- XML_TAG_META_SETS, NULL, NULL, XML_RSC_ATTR_MANAGED, &managed); -+ find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id, -+ NULL, NULL, NULL, XML_RSC_ATTR_MANAGED, &managed); - -- find_resource_attr(cib_conn, XML_ATTR_ID, parent->id, -- XML_TAG_META_SETS, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &role_s); -+ find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id, -+ NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &role_s); - -- if(managed == NULL) { -- managed = strdup("1"); -- } -- if(crm_is_true(managed) == FALSE) { -- printf("\n\t*Resource %s is configured to not be managed by the cluster\n", parent->id); -- } - if(role_s) { - enum rsc_role_e role = text2role(role_s); - if(role == RSC_ROLE_UNKNOWN) { - // Treated as if unset - - } else if(role == RSC_ROLE_STOPPED) { -- printf("\n\t* The configuration specifies that '%s' should remain stopped\n", parent->id); -+ printf("\n * The configuration specifies that '%s' should remain stopped\n", parent->id); -+ need_nl++; - - } else if(parent->variant > pe_clone && role != RSC_ROLE_MASTER) { -- printf("\n\t* The configuration specifies that '%s' should not be promoted\n", parent->id); -+ printf("\n * The configuration specifies that '%s' should not be promoted\n", parent->id); -+ need_nl++; - } - } -+ -+ if(managed && crm_is_true(managed) == FALSE) { -+ printf("%s * The configuration prevents the cluster from stopping or starting '%s' (unmanaged)\n", need_nl == 0?"\n":"", parent->id); -+ need_nl++; -+ } -+ -+ if(need_nl) { -+ printf("\n"); -+ } - } - - int diff --git a/SOURCES/0005-Fix-PE-Bug-cl-5247-Imply-resources-running-on-a-cont.patch b/SOURCES/0005-Fix-PE-Bug-cl-5247-Imply-resources-running-on-a-cont.patch deleted file mode 100644 index cf19707..0000000 --- a/SOURCES/0005-Fix-PE-Bug-cl-5247-Imply-resources-running-on-a-cont.patch +++ /dev/null @@ -1,328 +0,0 @@ -From: Andrew Beekhof -Date: Tue, 18 Aug 2015 10:30:49 +1000 -Subject: [PATCH] Fix: PE: Bug cl#5247 - Imply resources running on a container - are stopped when the container is stopped - -(cherry picked from commit e10eff1902d5b451454e2d467ee337c964f536ab) ---- - lib/pengine/unpack.c | 29 ++++++++++++++++++++--------- - pengine/allocate.c | 17 +++++++++++++++++ - pengine/graph.c | 7 ++++++- - pengine/test10/bug-rh-1097457.dot | 2 ++ - pengine/test10/bug-rh-1097457.exp | 12 ++++++++++-- - pengine/test10/bug-rh-1097457.summary | 10 +++++----- - pengine/test10/whitebox-fail1.dot | 1 + - pengine/test10/whitebox-fail1.exp | 6 +++++- - pengine/test10/whitebox-fail1.summary | 8 ++++---- - pengine/test10/whitebox-fail2.dot | 1 + - pengine/test10/whitebox-fail2.exp | 6 +++++- - pengine/test10/whitebox-fail2.summary | 8 ++++---- - 12 files changed, 80 insertions(+), 27 deletions(-) - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 106c674..0f83be4 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -44,7 +44,7 @@ CRM_TRACE_INIT_DATA(pe_status); - - gboolean unpack_rsc_op(resource_t * rsc, node_t * node, xmlNode * xml_op, - enum action_fail_response *failed, pe_working_set_t * data_set); --static gboolean determine_remote_online_status(node_t * this_node); -+static gboolean determine_remote_online_status(pe_working_set_t * data_set, node_t * this_node); - - static gboolean - is_dangling_container_remote_node(node_t *node) -@@ -73,6 +73,8 @@ pe_fence_node(pe_working_set_t * data_set, node_t * node, const char *reason) - if (is_set(rsc->flags, pe_rsc_failed) == FALSE) { - crm_warn("Remote node %s will be fenced by recovering container resource %s", - node->details->uname, rsc->id, reason); -+ /* node->details->unclean = TRUE; */ -+ node->details->remote_requires_reset = TRUE; - set_bit(rsc->flags, pe_rsc_failed); - } - } else if (is_dangling_container_remote_node(node)) { -@@ -1157,7 +1159,7 @@ unpack_remote_status(xmlNode * status, pe_working_set_t * data_set) - if ((this_node == NULL) || (is_remote_node(this_node) == FALSE)) { - continue; - } -- determine_remote_online_status(this_node); -+ determine_remote_online_status(data_set, this_node); - } - - /* process attributes */ -@@ -1366,7 +1368,7 @@ determine_online_status_fencing(pe_working_set_t * data_set, xmlNode * node_stat - } - - static gboolean --determine_remote_online_status(node_t * this_node) -+determine_remote_online_status(pe_working_set_t * data_set, node_t * this_node) - { - resource_t *rsc = this_node->details->remote_rsc; - resource_t *container = NULL; -@@ -1393,13 +1395,21 @@ determine_remote_online_status(node_t * this_node) - } - - /* Now check all the failure conditions. */ -- if (is_set(rsc->flags, pe_rsc_failed) || -- (rsc->role == RSC_ROLE_STOPPED) || -- (container && is_set(container->flags, pe_rsc_failed)) || -- (container && container->role == RSC_ROLE_STOPPED)) { -+ if(container && is_set(container->flags, pe_rsc_failed)) { -+ crm_trace("Remote node %s is set to UNCLEAN. rsc failed.", this_node->details->id); -+ this_node->details->online = FALSE; -+ this_node->details->remote_requires_reset = TRUE; - -- crm_trace("Remote node %s is set to OFFLINE. node is stopped or rsc failed.", this_node->details->id); -+ } else if(is_set(rsc->flags, pe_rsc_failed)) { -+ crm_trace("Remote node %s is set to OFFLINE. rsc failed.", this_node->details->id); - this_node->details->online = FALSE; -+ -+ } else if (rsc->role == RSC_ROLE_STOPPED -+ || (container && container->role == RSC_ROLE_STOPPED)) { -+ -+ crm_trace("Remote node %s is set to OFFLINE. node is stopped.", this_node->details->id); -+ this_node->details->online = FALSE; -+ this_node->details->remote_requires_reset = FALSE; - } - - remote_online_done: -@@ -3375,7 +3385,8 @@ find_operations(const char *rsc, const char *node, gboolean active_filter, - continue; - - } else if (is_remote_node(this_node)) { -- determine_remote_online_status(this_node); -+ determine_remote_online_status(data_set, this_node); -+ - } else { - determine_online_status(node_state, this_node, data_set); - } -diff --git a/pengine/allocate.c b/pengine/allocate.c -index c2e56f9..65ae05d 100644 ---- a/pengine/allocate.c -+++ b/pengine/allocate.c -@@ -1406,6 +1406,23 @@ stage6(pe_working_set_t * data_set) - - /* remote-nodes associated with a container resource (such as a vm) are not fenced */ - if (is_container_remote_node(node)) { -+ /* Guest */ -+ if (need_stonith -+ && node->details->remote_requires_reset -+ && pe_can_fence(data_set, node)) { -+ resource_t *container = node->details->remote_rsc->container; -+ char *key = stop_key(container); -+ GListPtr stop_list = find_actions(container->actions, key, NULL); -+ -+ crm_info("Impliying node %s is down when container %s is stopped (%p)", -+ node->details->uname, container->id, stop_list); -+ if(stop_list) { -+ stonith_constraints(node, stop_list->data, data_set); -+ } -+ -+ g_list_free(stop_list); -+ free(key); -+ } - continue; - } - -diff --git a/pengine/graph.c b/pengine/graph.c -index 3d832f0..a50f15b 100644 ---- a/pengine/graph.c -+++ b/pengine/graph.c -@@ -697,7 +697,12 @@ stonith_constraints(node_t * node, action_t * stonith_op, pe_working_set_t * dat - for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) { - resource_t *rsc = (resource_t *) lpc->data; - -- rsc_stonith_ordering(rsc, stonith_op, data_set); -+ if(stonith_op->rsc == NULL) { -+ rsc_stonith_ordering(rsc, stonith_op, data_set); -+ -+ } else if(stonith_op->rsc != rsc && stonith_op->rsc != rsc->container) { -+ rsc_stonith_ordering(rsc, stonith_op, data_set); -+ } - } - } - -diff --git a/pengine/test10/bug-rh-1097457.dot b/pengine/test10/bug-rh-1097457.dot -index 666099c..078d177 100644 ---- a/pengine/test10/bug-rh-1097457.dot -+++ b/pengine/test10/bug-rh-1097457.dot -@@ -49,10 +49,12 @@ digraph "g" { - "VM2_start_0 lama3" [ style=bold color="green" fontcolor="black"] - "VM2_stop_0 lama3" -> "FAKE4-IP_stop_0 lamaVM2" [ style = bold] - "VM2_stop_0 lama3" -> "FAKE4_stop_0 lamaVM2" [ style = bold] -+"VM2_stop_0 lama3" -> "FAKE6-clone_stop_0" [ style = bold] - "VM2_stop_0 lama3" -> "FAKE6_stop_0 lamaVM2" [ style = bold] - "VM2_stop_0 lama3" -> "FSlun3_stop_0 lamaVM2" [ style = bold] - "VM2_stop_0 lama3" -> "VM2_start_0 lama3" [ style = bold] - "VM2_stop_0 lama3" -> "all_stopped" [ style = bold] -+"VM2_stop_0 lama3" -> "lamaVM2-G4_stop_0" [ style = bold] - "VM2_stop_0 lama3" [ style=bold color="green" fontcolor="black"] - "all_stopped" [ style=bold color="green" fontcolor="orange"] - "lamaVM2-G4_running_0" [ style=bold color="green" fontcolor="orange"] -diff --git a/pengine/test10/bug-rh-1097457.exp b/pengine/test10/bug-rh-1097457.exp -index 36af9f3..175f413 100644 ---- a/pengine/test10/bug-rh-1097457.exp -+++ b/pengine/test10/bug-rh-1097457.exp -@@ -119,7 +119,11 @@ - - - -- -+ -+ -+ -+ -+ - - - -@@ -331,7 +335,11 @@ - - - -- -+ -+ -+ -+ -+ - - - -diff --git a/pengine/test10/bug-rh-1097457.summary b/pengine/test10/bug-rh-1097457.summary -index e2f235d..c8751ae 100644 ---- a/pengine/test10/bug-rh-1097457.summary -+++ b/pengine/test10/bug-rh-1097457.summary -@@ -39,17 +39,17 @@ Transition Summary: - * Restart lamaVM2 (Started lama3) - - Executing cluster transition: -- * Pseudo action: lamaVM2-G4_stop_0 -- * Pseudo action: FAKE6-clone_stop_0 - * Resource action: lamaVM2 stop on lama3 - * Resource action: VM2 stop on lama3 -+ * Pseudo action: lamaVM2-G4_stop_0 - * Pseudo action: FAKE4-IP_stop_0 -- * Pseudo action: FAKE6_stop_0 -- * Pseudo action: FAKE6-clone_stopped_0 -- * Pseudo action: FAKE6-clone_start_0 -+ * Pseudo action: FAKE6-clone_stop_0 - * Resource action: VM2 start on lama3 - * Resource action: VM2 monitor=10000 on lama3 - * Pseudo action: FAKE4_stop_0 -+ * Pseudo action: FAKE6_stop_0 -+ * Pseudo action: FAKE6-clone_stopped_0 -+ * Pseudo action: FAKE6-clone_start_0 - * Resource action: lamaVM2 start on lama3 - * Resource action: lamaVM2 monitor=30000 on lama3 - * Resource action: FSlun3 monitor=10000 on lamaVM2 -diff --git a/pengine/test10/whitebox-fail1.dot b/pengine/test10/whitebox-fail1.dot -index b595015..0f0fe26 100644 ---- a/pengine/test10/whitebox-fail1.dot -+++ b/pengine/test10/whitebox-fail1.dot -@@ -26,6 +26,7 @@ digraph "g" { - "container1_start_0 18node2" -> "lxc1_start_0 18node2" [ style = bold] - "container1_start_0 18node2" [ style=bold color="green" fontcolor="black"] - "container1_stop_0 18node2" -> "B_stop_0 lxc1" [ style = bold] -+"container1_stop_0 18node2" -> "M-clone_stop_0" [ style = bold] - "container1_stop_0 18node2" -> "M_stop_0 lxc1" [ style = bold] - "container1_stop_0 18node2" -> "all_stopped" [ style = bold] - "container1_stop_0 18node2" -> "container1_start_0 18node2" [ style = bold] -diff --git a/pengine/test10/whitebox-fail1.exp b/pengine/test10/whitebox-fail1.exp -index 834b231..01bb142 100644 ---- a/pengine/test10/whitebox-fail1.exp -+++ b/pengine/test10/whitebox-fail1.exp -@@ -96,7 +96,11 @@ - - - -- -+ -+ -+ -+ -+ - - - -diff --git a/pengine/test10/whitebox-fail1.summary b/pengine/test10/whitebox-fail1.summary -index 5e5887b..1586407 100644 ---- a/pengine/test10/whitebox-fail1.summary -+++ b/pengine/test10/whitebox-fail1.summary -@@ -20,17 +20,17 @@ Transition Summary: - * Restart lxc1 (Started 18node2) - - Executing cluster transition: -- * Pseudo action: M-clone_stop_0 - * Resource action: lxc1 stop on 18node2 - * Resource action: container1 stop on 18node2 -+ * Pseudo action: M-clone_stop_0 -+ * Pseudo action: B_stop_0 -+ * Resource action: container1 start on 18node2 - * Pseudo action: M_stop_0 - * Pseudo action: M-clone_stopped_0 - * Pseudo action: M-clone_start_0 -- * Pseudo action: B_stop_0 -- * Pseudo action: all_stopped -- * Resource action: container1 start on 18node2 - * Resource action: lxc1 start on 18node2 - * Resource action: lxc1 monitor=30000 on 18node2 -+ * Pseudo action: all_stopped - * Resource action: M start on lxc1 - * Pseudo action: M-clone_running_0 - * Resource action: B start on lxc1 -diff --git a/pengine/test10/whitebox-fail2.dot b/pengine/test10/whitebox-fail2.dot -index b595015..0f0fe26 100644 ---- a/pengine/test10/whitebox-fail2.dot -+++ b/pengine/test10/whitebox-fail2.dot -@@ -26,6 +26,7 @@ digraph "g" { - "container1_start_0 18node2" -> "lxc1_start_0 18node2" [ style = bold] - "container1_start_0 18node2" [ style=bold color="green" fontcolor="black"] - "container1_stop_0 18node2" -> "B_stop_0 lxc1" [ style = bold] -+"container1_stop_0 18node2" -> "M-clone_stop_0" [ style = bold] - "container1_stop_0 18node2" -> "M_stop_0 lxc1" [ style = bold] - "container1_stop_0 18node2" -> "all_stopped" [ style = bold] - "container1_stop_0 18node2" -> "container1_start_0 18node2" [ style = bold] -diff --git a/pengine/test10/whitebox-fail2.exp b/pengine/test10/whitebox-fail2.exp -index 834b231..01bb142 100644 ---- a/pengine/test10/whitebox-fail2.exp -+++ b/pengine/test10/whitebox-fail2.exp -@@ -96,7 +96,11 @@ - - - -- -+ -+ -+ -+ -+ - - - -diff --git a/pengine/test10/whitebox-fail2.summary b/pengine/test10/whitebox-fail2.summary -index 338173d..ab40d99 100644 ---- a/pengine/test10/whitebox-fail2.summary -+++ b/pengine/test10/whitebox-fail2.summary -@@ -20,17 +20,17 @@ Transition Summary: - * Recover lxc1 (Started 18node2) - - Executing cluster transition: -- * Pseudo action: M-clone_stop_0 - * Resource action: lxc1 stop on 18node2 - * Resource action: container1 stop on 18node2 -+ * Pseudo action: M-clone_stop_0 -+ * Pseudo action: B_stop_0 -+ * Resource action: container1 start on 18node2 - * Pseudo action: M_stop_0 - * Pseudo action: M-clone_stopped_0 - * Pseudo action: M-clone_start_0 -- * Pseudo action: B_stop_0 -- * Pseudo action: all_stopped -- * Resource action: container1 start on 18node2 - * Resource action: lxc1 start on 18node2 - * Resource action: lxc1 monitor=30000 on 18node2 -+ * Pseudo action: all_stopped - * Resource action: M start on lxc1 - * Pseudo action: M-clone_running_0 - * Resource action: B start on lxc1 diff --git a/SOURCES/0006-Fix-Date-Correctly-set-time-from-seconds-since-epoch.patch b/SOURCES/0006-Fix-Date-Correctly-set-time-from-seconds-since-epoch.patch deleted file mode 100644 index ea40f7e..0000000 --- a/SOURCES/0006-Fix-Date-Correctly-set-time-from-seconds-since-epoch.patch +++ /dev/null @@ -1,21 +0,0 @@ -From: Andrew Beekhof -Date: Tue, 18 Aug 2015 11:06:13 +1000 -Subject: [PATCH] Fix: Date: Correctly set time from seconds-since-epoch - -(cherry picked from commit efa318114d0b2124cc82fe143403e6de502e0134) ---- - lib/common/iso8601.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/lib/common/iso8601.c b/lib/common/iso8601.c -index 769e01b..5f4a73d 100644 ---- a/lib/common/iso8601.c -+++ b/lib/common/iso8601.c -@@ -1011,6 +1011,7 @@ ha_set_tm_time(crm_time_t * target, struct tm *source) - target->days = 1 + source->tm_yday; - } - -+ target->seconds = 0; - if (source->tm_hour >= 0) { - target->seconds += 60 * 60 * source->tm_hour; - } diff --git a/SOURCES/0007-Test-PE-Bug-cl-5247-Imply-resources-running-on-a-con.patch b/SOURCES/0007-Test-PE-Bug-cl-5247-Imply-resources-running-on-a-con.patch deleted file mode 100644 index 74aa4b1..0000000 --- a/SOURCES/0007-Test-PE-Bug-cl-5247-Imply-resources-running-on-a-con.patch +++ /dev/null @@ -1,1419 +0,0 @@ -From: Andrew Beekhof -Date: Tue, 18 Aug 2015 10:31:06 +1000 -Subject: [PATCH] Test: PE: Bug cl#5247 - Imply resources running on a - container are stopped when the container is stopped - -(cherry picked from commit 825e82a5098bde0412944c7d4f54c3d825ddff08) ---- - pengine/regression.sh | 29 +- - pengine/test10/bug-cl-5247.dot | 136 +++++++ - pengine/test10/bug-cl-5247.exp | 704 +++++++++++++++++++++++++++++++++++++ - pengine/test10/bug-cl-5247.scores | 84 +++++ - pengine/test10/bug-cl-5247.summary | 96 +++++ - pengine/test10/bug-cl-5247.xml | 295 ++++++++++++++++ - 6 files changed, 1331 insertions(+), 13 deletions(-) - create mode 100644 pengine/test10/bug-cl-5247.dot - create mode 100644 pengine/test10/bug-cl-5247.exp - create mode 100644 pengine/test10/bug-cl-5247.scores - create mode 100644 pengine/test10/bug-cl-5247.summary - create mode 100644 pengine/test10/bug-cl-5247.xml - -diff --git a/pengine/regression.sh b/pengine/regression.sh -index 7f73f92..1517e3d 100755 ---- a/pengine/regression.sh -+++ b/pengine/regression.sh -@@ -31,19 +31,6 @@ info Performing the following tests from $io_dir - create_mode="false" - - echo "" --do_test cloned_start_one "order first clone then clone... first clone_min=2" --do_test cloned_start_two "order first clone then clone... first clone_min=2" --do_test cloned_stop_one "order first clone then clone... first clone_min=2" --do_test cloned_stop_two "order first clone then clone... first clone_min=2" --do_test clone_min_interleave_start_one "order first clone then clone... first clone_min=2 and then has interleave=true" --do_test clone_min_interleave_start_two "order first clone then clone... first clone_min=2 and then has interleave=true" --do_test clone_min_interleave_stop_one "order first clone then clone... first clone_min=2 and then has interleave=true" --do_test clone_min_interleave_stop_two "order first clone then clone... first clone_min=2 and then has interleave=true" --do_test clone_min_start_one "order first clone then primitive... first clone_min=2" --do_test clone_min_start_two "order first clone then primitive... first clone_min=2" --do_test clone_min_stop_all "order first clone then primitive... first clone_min=2" --do_test clone_min_stop_one "order first clone then primitive... first clone_min=2" --do_test clone_min_stop_two "order first clone then primitive... first clone_min=2" - - do_test simple1 "Offline " - do_test simple2 "Start " -@@ -373,6 +360,21 @@ do_test clone-interleave-2 "Clone-3 must stop on pcmk-1 due to interleaved order - do_test clone-interleave-3 "Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)" - - echo "" -+do_test cloned_start_one "order first clone then clone... first clone_min=2" -+do_test cloned_start_two "order first clone then clone... first clone_min=2" -+do_test cloned_stop_one "order first clone then clone... first clone_min=2" -+do_test cloned_stop_two "order first clone then clone... first clone_min=2" -+do_test clone_min_interleave_start_one "order first clone then clone... first clone_min=2 and then has interleave=true" -+do_test clone_min_interleave_start_two "order first clone then clone... first clone_min=2 and then has interleave=true" -+do_test clone_min_interleave_stop_one "order first clone then clone... first clone_min=2 and then has interleave=true" -+do_test clone_min_interleave_stop_two "order first clone then clone... first clone_min=2 and then has interleave=true" -+do_test clone_min_start_one "order first clone then primitive... first clone_min=2" -+do_test clone_min_start_two "order first clone then primitive... first clone_min=2" -+do_test clone_min_stop_all "order first clone then primitive... first clone_min=2" -+do_test clone_min_stop_one "order first clone then primitive... first clone_min=2" -+do_test clone_min_stop_two "order first clone then primitive... first clone_min=2" -+ -+echo "" - do_test unfence-startup "Clean unfencing" - do_test unfence-definition "Unfencing when the agent changes" - do_test unfence-parameters "Unfencing when the agent parameters changes" -@@ -785,6 +787,7 @@ do_test container-group-3 "Container in group - stop failed" - do_test container-group-4 "Container in group - reached migration-threshold" - do_test container-is-remote-node "Place resource within container when container is remote-node" - do_test bug-rh-1097457 "Kill user defined container/contents ordering" -+do_test bug-cl-5247 "Graph loop when recovering m/s resource in a container" - - echo "" - do_test whitebox-fail1 "Fail whitebox container rsc." -diff --git a/pengine/test10/bug-cl-5247.dot b/pengine/test10/bug-cl-5247.dot -new file mode 100644 -index 0000000..ed728ac ---- /dev/null -+++ b/pengine/test10/bug-cl-5247.dot -@@ -0,0 +1,136 @@ -+digraph "g" { -+"all_stopped" [ style=bold color="green" fontcolor="orange"] -+"grpStonith1_running_0" [ style=bold color="green" fontcolor="orange"] -+"grpStonith1_start_0" -> "grpStonith1_running_0" [ style = bold] -+"grpStonith1_start_0" -> "prmStonith1-2_start_0 bl460g8n4" [ style = bold] -+"grpStonith1_start_0" [ style=bold color="green" fontcolor="orange"] -+"grpStonith1_stop_0" -> "grpStonith1_stopped_0" [ style = bold] -+"grpStonith1_stop_0" -> "prmStonith1-2_stop_0 bl460g8n4" [ style = bold] -+"grpStonith1_stop_0" [ style=bold color="green" fontcolor="orange"] -+"grpStonith1_stopped_0" -> "grpStonith1_start_0" [ style = bold] -+"grpStonith1_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"grpStonith2_running_0" [ style=bold color="green" fontcolor="orange"] -+"grpStonith2_start_0" -> "grpStonith2_running_0" [ style = bold] -+"grpStonith2_start_0" -> "prmStonith2-2_start_0 bl460g8n3" [ style = bold] -+"grpStonith2_start_0" [ style=bold color="green" fontcolor="orange"] -+"grpStonith2_stop_0" -> "grpStonith2_stopped_0" [ style = bold] -+"grpStonith2_stop_0" -> "prmStonith2-2_stop_0 bl460g8n3" [ style = bold] -+"grpStonith2_stop_0" [ style=bold color="green" fontcolor="orange"] -+"grpStonith2_stopped_0" -> "grpStonith2_start_0" [ style = bold] -+"grpStonith2_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"master-group_running_0" [ style=bold color="green" fontcolor="orange"] -+"master-group_start_0" -> "master-group_running_0" [ style = bold] -+"master-group_start_0" -> "vip-master_start_0 pgsr01" [ style = bold] -+"master-group_start_0" -> "vip-rep_start_0 pgsr01" [ style = bold] -+"master-group_start_0" [ style=bold color="green" fontcolor="orange"] -+"master-group_stop_0" -> "master-group_stopped_0" [ style = bold] -+"master-group_stop_0" -> "vip-master_stop_0 pgsr02" [ style = bold] -+"master-group_stop_0" -> "vip-rep_stop_0 pgsr02" [ style = bold] -+"master-group_stop_0" [ style=bold color="green" fontcolor="orange"] -+"master-group_stopped_0" -> "master-group_start_0" [ style = bold] -+"master-group_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"msPostgresql_confirmed-post_notify_demoted_0" -> "master-group_stop_0" [ style = bold] -+"msPostgresql_confirmed-post_notify_demoted_0" -> "msPostgresql_pre_notify_stop_0" [ style = bold] -+"msPostgresql_confirmed-post_notify_demoted_0" -> "pgsql_monitor_9000 pgsr01" [ style = bold] -+"msPostgresql_confirmed-post_notify_demoted_0" [ style=bold color="green" fontcolor="orange"] -+"msPostgresql_confirmed-post_notify_stopped_0" -> "all_stopped" [ style = bold] -+"msPostgresql_confirmed-post_notify_stopped_0" -> "pgsql_monitor_9000 pgsr01" [ style = bold] -+"msPostgresql_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"msPostgresql_confirmed-pre_notify_demote_0" -> "msPostgresql_demote_0" [ style = bold] -+"msPostgresql_confirmed-pre_notify_demote_0" -> "msPostgresql_post_notify_demoted_0" [ style = bold] -+"msPostgresql_confirmed-pre_notify_demote_0" [ style=bold color="green" fontcolor="orange"] -+"msPostgresql_confirmed-pre_notify_stop_0" -> "msPostgresql_post_notify_stopped_0" [ style = bold] -+"msPostgresql_confirmed-pre_notify_stop_0" -> "msPostgresql_stop_0" [ style = bold] -+"msPostgresql_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"] -+"msPostgresql_demote_0" -> "msPostgresql_demoted_0" [ style = bold] -+"msPostgresql_demote_0" -> "pgsql_demote_0 pgsr02" [ style = bold] -+"msPostgresql_demote_0" [ style=bold color="green" fontcolor="orange"] -+"msPostgresql_demoted_0" -> "msPostgresql_post_notify_demoted_0" [ style = bold] -+"msPostgresql_demoted_0" -> "msPostgresql_stop_0" [ style = bold] -+"msPostgresql_demoted_0" [ style=bold color="green" fontcolor="orange"] -+"msPostgresql_post_notify_demoted_0" -> "msPostgresql_confirmed-post_notify_demoted_0" [ style = bold] -+"msPostgresql_post_notify_demoted_0" -> "pgsql_post_notify_demoted_0 pgsr01" [ style = bold] -+"msPostgresql_post_notify_demoted_0" [ style=bold color="green" fontcolor="orange"] -+"msPostgresql_post_notify_stopped_0" -> "msPostgresql_confirmed-post_notify_stopped_0" [ style = bold] -+"msPostgresql_post_notify_stopped_0" -> "pgsql_post_notify_stop_0 pgsr01" [ style = bold] -+"msPostgresql_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"msPostgresql_pre_notify_demote_0" -> "msPostgresql_confirmed-pre_notify_demote_0" [ style = bold] -+"msPostgresql_pre_notify_demote_0" -> "pgsql_pre_notify_demote_0 pgsr01" [ style = bold] -+"msPostgresql_pre_notify_demote_0" [ style=bold color="green" fontcolor="orange"] -+"msPostgresql_pre_notify_stop_0" -> "msPostgresql_confirmed-pre_notify_stop_0" [ style = bold] -+"msPostgresql_pre_notify_stop_0" -> "pgsql_pre_notify_stop_0 pgsr01" [ style = bold] -+"msPostgresql_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"] -+"msPostgresql_stop_0" -> "msPostgresql_stopped_0" [ style = bold] -+"msPostgresql_stop_0" -> "pgsql_stop_0 pgsr02" [ style = bold] -+"msPostgresql_stop_0" [ style=bold color="green" fontcolor="orange"] -+"msPostgresql_stopped_0" -> "msPostgresql_post_notify_stopped_0" [ style = bold] -+"msPostgresql_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"pgsql_confirmed-post_notify_stop_0" -> "all_stopped" [ style = bold] -+"pgsql_confirmed-post_notify_stop_0" -> "pgsql_monitor_9000 pgsr01" [ style = bold] -+"pgsql_confirmed-post_notify_stop_0" [ style=bold color="green" fontcolor="orange"] -+"pgsql_demote_0 pgsr02" -> "msPostgresql_demoted_0" [ style = bold] -+"pgsql_demote_0 pgsr02" -> "pgsql_stop_0 pgsr02" [ style = bold] -+"pgsql_demote_0 pgsr02" [ style=bold color="green" fontcolor="orange"] -+"pgsql_monitor_9000 pgsr01" [ style=bold color="green" fontcolor="black"] -+"pgsql_post_notify_demoted_0 pgsr01" -> "msPostgresql_confirmed-post_notify_demoted_0" [ style = bold] -+"pgsql_post_notify_demoted_0 pgsr01" [ style=bold color="green" fontcolor="black"] -+"pgsql_post_notify_stop_0 pgsr01" -> "msPostgresql_confirmed-post_notify_stopped_0" [ style = bold] -+"pgsql_post_notify_stop_0 pgsr01" -> "pgsql_confirmed-post_notify_stop_0" [ style = bold] -+"pgsql_post_notify_stop_0 pgsr01" [ style=bold color="green" fontcolor="black"] -+"pgsql_post_notify_stop_0" -> "pgsql_confirmed-post_notify_stop_0" [ style = bold] -+"pgsql_post_notify_stop_0" -> "pgsql_post_notify_stop_0 pgsr01" [ style = bold] -+"pgsql_post_notify_stop_0" [ style=bold color="green" fontcolor="orange"] -+"pgsql_pre_notify_demote_0 pgsr01" -> "msPostgresql_confirmed-pre_notify_demote_0" [ style = bold] -+"pgsql_pre_notify_demote_0 pgsr01" [ style=bold color="green" fontcolor="black"] -+"pgsql_pre_notify_stop_0 pgsr01" -> "msPostgresql_confirmed-pre_notify_stop_0" [ style = bold] -+"pgsql_pre_notify_stop_0 pgsr01" [ style=bold color="green" fontcolor="black"] -+"pgsql_stop_0 pgsr02" -> "all_stopped" [ style = bold] -+"pgsql_stop_0 pgsr02" -> "msPostgresql_stopped_0" [ style = bold] -+"pgsql_stop_0 pgsr02" [ style=bold color="green" fontcolor="orange"] -+"pgsr02_stop_0 bl460g8n4" -> "all_stopped" [ style = bold] -+"pgsr02_stop_0 bl460g8n4" -> "prmDB2_stop_0 bl460g8n4" [ style = bold] -+"pgsr02_stop_0 bl460g8n4" [ style=bold color="green" fontcolor="black"] -+"prmDB2_stop_0 bl460g8n4" -> "all_stopped" [ style = bold] -+"prmDB2_stop_0 bl460g8n4" -> "master-group_stop_0" [ style = bold] -+"prmDB2_stop_0 bl460g8n4" -> "msPostgresql_stop_0" [ style = bold] -+"prmDB2_stop_0 bl460g8n4" -> "pgsql_demote_0 pgsr02" [ style = bold] -+"prmDB2_stop_0 bl460g8n4" -> "pgsql_post_notify_stop_0" [ style = bold] -+"prmDB2_stop_0 bl460g8n4" -> "pgsql_stop_0 pgsr02" [ style = bold] -+"prmDB2_stop_0 bl460g8n4" -> "vip-master_stop_0 pgsr02" [ style = bold] -+"prmDB2_stop_0 bl460g8n4" -> "vip-rep_stop_0 pgsr02" [ style = bold] -+"prmDB2_stop_0 bl460g8n4" [ style=bold color="green" fontcolor="black"] -+"prmStonith1-2_monitor_3600000 bl460g8n4" [ style=bold color="green" fontcolor="black"] -+"prmStonith1-2_start_0 bl460g8n4" -> "grpStonith1_running_0" [ style = bold] -+"prmStonith1-2_start_0 bl460g8n4" -> "prmStonith1-2_monitor_3600000 bl460g8n4" [ style = bold] -+"prmStonith1-2_start_0 bl460g8n4" [ style=bold color="green" fontcolor="black"] -+"prmStonith1-2_stop_0 bl460g8n4" -> "all_stopped" [ style = bold] -+"prmStonith1-2_stop_0 bl460g8n4" -> "grpStonith1_stopped_0" [ style = bold] -+"prmStonith1-2_stop_0 bl460g8n4" -> "prmStonith1-2_start_0 bl460g8n4" [ style = bold] -+"prmStonith1-2_stop_0 bl460g8n4" [ style=bold color="green" fontcolor="orange"] -+"prmStonith2-2_monitor_3600000 bl460g8n3" [ style=bold color="green" fontcolor="black"] -+"prmStonith2-2_start_0 bl460g8n3" -> "grpStonith2_running_0" [ style = bold] -+"prmStonith2-2_start_0 bl460g8n3" -> "prmStonith2-2_monitor_3600000 bl460g8n3" [ style = bold] -+"prmStonith2-2_start_0 bl460g8n3" [ style=bold color="green" fontcolor="black"] -+"prmStonith2-2_stop_0 bl460g8n3" -> "all_stopped" [ style = bold] -+"prmStonith2-2_stop_0 bl460g8n3" -> "grpStonith2_stopped_0" [ style = bold] -+"prmStonith2-2_stop_0 bl460g8n3" -> "prmStonith2-2_start_0 bl460g8n3" [ style = bold] -+"prmStonith2-2_stop_0 bl460g8n3" [ style=bold color="green" fontcolor="black"] -+"vip-master_monitor_10000 pgsr01" [ style=bold color="green" fontcolor="black"] -+"vip-master_start_0 pgsr01" -> "master-group_running_0" [ style = bold] -+"vip-master_start_0 pgsr01" -> "vip-master_monitor_10000 pgsr01" [ style = bold] -+"vip-master_start_0 pgsr01" -> "vip-rep_start_0 pgsr01" [ style = bold] -+"vip-master_start_0 pgsr01" [ style=bold color="green" fontcolor="black"] -+"vip-master_stop_0 pgsr02" -> "all_stopped" [ style = bold] -+"vip-master_stop_0 pgsr02" -> "master-group_stopped_0" [ style = bold] -+"vip-master_stop_0 pgsr02" -> "vip-master_start_0 pgsr01" [ style = bold] -+"vip-master_stop_0 pgsr02" [ style=bold color="green" fontcolor="orange"] -+"vip-rep_monitor_10000 pgsr01" [ style=bold color="green" fontcolor="black"] -+"vip-rep_start_0 pgsr01" -> "master-group_running_0" [ style = bold] -+"vip-rep_start_0 pgsr01" -> "vip-rep_monitor_10000 pgsr01" [ style = bold] -+"vip-rep_start_0 pgsr01" [ style=bold color="green" fontcolor="black"] -+"vip-rep_stop_0 pgsr02" -> "all_stopped" [ style = bold] -+"vip-rep_stop_0 pgsr02" -> "master-group_stopped_0" [ style = bold] -+"vip-rep_stop_0 pgsr02" -> "vip-master_stop_0 pgsr02" [ style = bold] -+"vip-rep_stop_0 pgsr02" -> "vip-rep_start_0 pgsr01" [ style = bold] -+"vip-rep_stop_0 pgsr02" [ style=bold color="green" fontcolor="orange"] -+} -diff --git a/pengine/test10/bug-cl-5247.exp b/pengine/test10/bug-cl-5247.exp -new file mode 100644 -index 0000000..5e36e84 ---- /dev/null -+++ b/pengine/test10/bug-cl-5247.exp -@@ -0,0 +1,704 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/bug-cl-5247.scores b/pengine/test10/bug-cl-5247.scores -new file mode 100644 -index 0000000..e9e4709 ---- /dev/null -+++ b/pengine/test10/bug-cl-5247.scores -@@ -0,0 +1,84 @@ -+Allocation scores: -+Using the original execution date of: 2015-08-12 02:53:40Z -+clone_color: msPostgresql allocation score on bl460g8n3: -INFINITY -+clone_color: msPostgresql allocation score on bl460g8n4: -INFINITY -+clone_color: msPostgresql allocation score on pgsr01: 0 -+clone_color: msPostgresql allocation score on pgsr02: 0 -+clone_color: pgsql:0 allocation score on bl460g8n3: -INFINITY -+clone_color: pgsql:0 allocation score on bl460g8n4: -INFINITY -+clone_color: pgsql:0 allocation score on pgsr01: 0 -+clone_color: pgsql:0 allocation score on pgsr02: INFINITY -+clone_color: pgsql:1 allocation score on bl460g8n3: -INFINITY -+clone_color: pgsql:1 allocation score on bl460g8n4: -INFINITY -+clone_color: pgsql:1 allocation score on pgsr01: INFINITY -+clone_color: pgsql:1 allocation score on pgsr02: 0 -+group_color: grpStonith1 allocation score on bl460g8n3: -INFINITY -+group_color: grpStonith1 allocation score on bl460g8n4: 0 -+group_color: grpStonith1 allocation score on pgsr01: -INFINITY -+group_color: grpStonith1 allocation score on pgsr02: -INFINITY -+group_color: grpStonith2 allocation score on bl460g8n3: 0 -+group_color: grpStonith2 allocation score on bl460g8n4: -INFINITY -+group_color: grpStonith2 allocation score on pgsr01: -INFINITY -+group_color: grpStonith2 allocation score on pgsr02: -INFINITY -+group_color: master-group allocation score on bl460g8n3: 0 -+group_color: master-group allocation score on bl460g8n4: 0 -+group_color: master-group allocation score on pgsr01: 0 -+group_color: master-group allocation score on pgsr02: 0 -+group_color: prmStonith1-2 allocation score on bl460g8n3: -INFINITY -+group_color: prmStonith1-2 allocation score on bl460g8n4: INFINITY -+group_color: prmStonith1-2 allocation score on pgsr01: -INFINITY -+group_color: prmStonith1-2 allocation score on pgsr02: -INFINITY -+group_color: prmStonith2-2 allocation score on bl460g8n3: INFINITY -+group_color: prmStonith2-2 allocation score on bl460g8n4: -INFINITY -+group_color: prmStonith2-2 allocation score on pgsr01: -INFINITY -+group_color: prmStonith2-2 allocation score on pgsr02: -INFINITY -+group_color: vip-master allocation score on bl460g8n3: 0 -+group_color: vip-master allocation score on bl460g8n4: 0 -+group_color: vip-master allocation score on pgsr01: 0 -+group_color: vip-master allocation score on pgsr02: INFINITY -+group_color: vip-rep allocation score on bl460g8n3: 0 -+group_color: vip-rep allocation score on bl460g8n4: 0 -+group_color: vip-rep allocation score on pgsr01: 0 -+group_color: vip-rep allocation score on pgsr02: INFINITY -+native_color: pgsql:0 allocation score on bl460g8n3: -INFINITY -+native_color: pgsql:0 allocation score on bl460g8n4: -INFINITY -+native_color: pgsql:0 allocation score on pgsr01: -INFINITY -+native_color: pgsql:0 allocation score on pgsr02: -INFINITY -+native_color: pgsql:1 allocation score on bl460g8n3: -INFINITY -+native_color: pgsql:1 allocation score on bl460g8n4: -INFINITY -+native_color: pgsql:1 allocation score on pgsr01: INFINITY -+native_color: pgsql:1 allocation score on pgsr02: -INFINITY -+native_color: pgsr01 allocation score on bl460g8n3: INFINITY -+native_color: pgsr01 allocation score on bl460g8n4: -INFINITY -+native_color: pgsr01 allocation score on pgsr01: -INFINITY -+native_color: pgsr01 allocation score on pgsr02: -INFINITY -+native_color: pgsr02 allocation score on bl460g8n3: -INFINITY -+native_color: pgsr02 allocation score on bl460g8n4: -INFINITY -+native_color: pgsr02 allocation score on pgsr01: -INFINITY -+native_color: pgsr02 allocation score on pgsr02: -INFINITY -+native_color: prmDB1 allocation score on bl460g8n3: INFINITY -+native_color: prmDB1 allocation score on bl460g8n4: -INFINITY -+native_color: prmDB1 allocation score on pgsr01: -INFINITY -+native_color: prmDB1 allocation score on pgsr02: -INFINITY -+native_color: prmDB2 allocation score on bl460g8n3: -INFINITY -+native_color: prmDB2 allocation score on bl460g8n4: -INFINITY -+native_color: prmDB2 allocation score on pgsr01: -INFINITY -+native_color: prmDB2 allocation score on pgsr02: -INFINITY -+native_color: prmStonith1-2 allocation score on bl460g8n3: -INFINITY -+native_color: prmStonith1-2 allocation score on bl460g8n4: INFINITY -+native_color: prmStonith1-2 allocation score on pgsr01: -INFINITY -+native_color: prmStonith1-2 allocation score on pgsr02: -INFINITY -+native_color: prmStonith2-2 allocation score on bl460g8n3: INFINITY -+native_color: prmStonith2-2 allocation score on bl460g8n4: -INFINITY -+native_color: prmStonith2-2 allocation score on pgsr01: -INFINITY -+native_color: prmStonith2-2 allocation score on pgsr02: -INFINITY -+native_color: vip-master allocation score on bl460g8n3: -INFINITY -+native_color: vip-master allocation score on bl460g8n4: -INFINITY -+native_color: vip-master allocation score on pgsr01: INFINITY -+native_color: vip-master allocation score on pgsr02: -INFINITY -+native_color: vip-rep allocation score on bl460g8n3: -INFINITY -+native_color: vip-rep allocation score on bl460g8n4: -INFINITY -+native_color: vip-rep allocation score on pgsr01: 0 -+native_color: vip-rep allocation score on pgsr02: -INFINITY -+pgsql:0 promotion score on none: 0 -+pgsql:1 promotion score on pgsr01: 10 -diff --git a/pengine/test10/bug-cl-5247.summary b/pengine/test10/bug-cl-5247.summary -new file mode 100644 -index 0000000..5564286 ---- /dev/null -+++ b/pengine/test10/bug-cl-5247.summary -@@ -0,0 +1,96 @@ -+Using the original execution date of: 2015-08-12 02:53:40Z -+ -+Current cluster status: -+Online: [ bl460g8n3 bl460g8n4 ] -+Containers: [ pgsr01:prmDB1 ] -+ -+ prmDB1 (ocf::heartbeat:VirtualDomain): Started bl460g8n3 -+ prmDB2 (ocf::heartbeat:VirtualDomain): FAILED bl460g8n4 -+ Resource Group: grpStonith1 -+ prmStonith1-2 (stonith:external/ipmi): Started bl460g8n4 -+ Resource Group: grpStonith2 -+ prmStonith2-2 (stonith:external/ipmi): Started bl460g8n3 -+ Resource Group: master-group -+ vip-master (ocf::heartbeat:Dummy): FAILED pgsr02 -+ vip-rep (ocf::heartbeat:Dummy): FAILED pgsr02 -+ Master/Slave Set: msPostgresql [pgsql] -+ Masters: [ pgsr01 ] -+ Stopped: [ bl460g8n3 bl460g8n4 ] -+ -+Transition Summary: -+ * Stop prmDB2 (bl460g8n4) -+ * Restart prmStonith1-2 (Started bl460g8n4) -+ * Restart prmStonith2-2 (Started bl460g8n3) -+ * Recover vip-master (Started pgsr02 -> pgsr01) -+ * Recover vip-rep (Started pgsr02 -> pgsr01) -+ * Demote pgsql:0 (Master -> Stopped pgsr02) -+ * Stop pgsr02 (bl460g8n4) -+ -+Executing cluster transition: -+ * Pseudo action: grpStonith1_stop_0 -+ * Pseudo action: prmStonith1-2_stop_0 -+ * Pseudo action: grpStonith2_stop_0 -+ * Resource action: prmStonith2-2 stop on bl460g8n3 -+ * Pseudo action: msPostgresql_pre_notify_demote_0 -+ * Resource action: pgsr02 stop on bl460g8n4 -+ * Resource action: prmDB2 stop on bl460g8n4 -+ * Pseudo action: grpStonith1_stopped_0 -+ * Pseudo action: grpStonith1_start_0 -+ * Resource action: prmStonith1-2 start on bl460g8n4 -+ * Resource action: prmStonith1-2 monitor=3600000 on bl460g8n4 -+ * Pseudo action: grpStonith2_stopped_0 -+ * Pseudo action: grpStonith2_start_0 -+ * Resource action: prmStonith2-2 start on bl460g8n3 -+ * Resource action: prmStonith2-2 monitor=3600000 on bl460g8n3 -+ * Pseudo action: pgsql_post_notify_stop_0 -+ * Resource action: pgsql notify on pgsr01 -+ * Pseudo action: msPostgresql_confirmed-pre_notify_demote_0 -+ * Pseudo action: msPostgresql_demote_0 -+ * Pseudo action: grpStonith1_running_0 -+ * Pseudo action: grpStonith2_running_0 -+ * Pseudo action: pgsql_demote_0 -+ * Pseudo action: msPostgresql_demoted_0 -+ * Pseudo action: msPostgresql_post_notify_demoted_0 -+ * Resource action: pgsql notify on pgsr01 -+ * Pseudo action: msPostgresql_confirmed-post_notify_demoted_0 -+ * Pseudo action: msPostgresql_pre_notify_stop_0 -+ * Pseudo action: master-group_stop_0 -+ * Pseudo action: vip-rep_stop_0 -+ * Resource action: pgsql notify on pgsr01 -+ * Pseudo action: msPostgresql_confirmed-pre_notify_stop_0 -+ * Pseudo action: msPostgresql_stop_0 -+ * Pseudo action: vip-master_stop_0 -+ * Pseudo action: pgsql_stop_0 -+ * Pseudo action: msPostgresql_stopped_0 -+ * Pseudo action: master-group_stopped_0 -+ * Pseudo action: master-group_start_0 -+ * Resource action: vip-master start on pgsr01 -+ * Resource action: vip-rep start on pgsr01 -+ * Pseudo action: msPostgresql_post_notify_stopped_0 -+ * Pseudo action: master-group_running_0 -+ * Resource action: vip-master monitor=10000 on pgsr01 -+ * Resource action: vip-rep monitor=10000 on pgsr01 -+ * Resource action: pgsql notify on pgsr01 -+ * Pseudo action: msPostgresql_confirmed-post_notify_stopped_0 -+ * Pseudo action: pgsql_notified_0 -+ * Resource action: pgsql monitor=9000 on pgsr01 -+ * Pseudo action: all_stopped -+Using the original execution date of: 2015-08-12 02:53:40Z -+ -+Revised cluster status: -+Online: [ bl460g8n3 bl460g8n4 ] -+Containers: [ pgsr01:prmDB1 ] -+ -+ prmDB1 (ocf::heartbeat:VirtualDomain): Started bl460g8n3 -+ prmDB2 (ocf::heartbeat:VirtualDomain): FAILED -+ Resource Group: grpStonith1 -+ prmStonith1-2 (stonith:external/ipmi): Started bl460g8n4 -+ Resource Group: grpStonith2 -+ prmStonith2-2 (stonith:external/ipmi): Started bl460g8n3 -+ Resource Group: master-group -+ vip-master (ocf::heartbeat:Dummy): FAILED[ pgsr02 pgsr01 ] -+ vip-rep (ocf::heartbeat:Dummy): FAILED[ pgsr02 pgsr01 ] -+ Master/Slave Set: msPostgresql [pgsql] -+ Masters: [ pgsr01 ] -+ Stopped: [ bl460g8n3 bl460g8n4 ] -+ -diff --git a/pengine/test10/bug-cl-5247.xml b/pengine/test10/bug-cl-5247.xml -new file mode 100644 -index 0000000..c36ef40 ---- /dev/null -+++ b/pengine/test10/bug-cl-5247.xml -@@ -0,0 +1,295 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ diff --git a/SOURCES/0008-Fix-tools-memory-leak-in-crm_resource.patch b/SOURCES/0008-Fix-tools-memory-leak-in-crm_resource.patch deleted file mode 100644 index c29561f..0000000 --- a/SOURCES/0008-Fix-tools-memory-leak-in-crm_resource.patch +++ /dev/null @@ -1,33 +0,0 @@ -From: Ken Gaillot -Date: Mon, 17 Aug 2015 10:28:19 -0500 -Subject: [PATCH] Fix: tools: memory leak in crm_resource - -(cherry picked from commit c11bc4b856b07d5ea5b8284a3d566dd782e6bb7c) ---- - tools/crm_resource_runtime.c | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index f260e19..b9427bc 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -399,9 +399,11 @@ cli_resource_delete_attribute(const char *rsc_id, const char *attr_set, const ch - &local_attr_id); - - if (rc == -ENXIO) { -+ free(lookup_id); - return pcmk_ok; - - } else if (rc != pcmk_ok) { -+ free(lookup_id); - return rc; - } - -@@ -424,6 +426,7 @@ cli_resource_delete_attribute(const char *rsc_id, const char *attr_set, const ch - attr_name ? " name=" : "", attr_name ? attr_name : ""); - } - -+ free(lookup_id); - free_xml(xml_obj); - free(local_attr_id); - return rc; diff --git a/SOURCES/0009-Fix-pengine-The-failed-action-of-the-resource-that-o.patch b/SOURCES/0009-Fix-pengine-The-failed-action-of-the-resource-that-o.patch deleted file mode 100644 index 1ddba9f..0000000 --- a/SOURCES/0009-Fix-pengine-The-failed-action-of-the-resource-that-o.patch +++ /dev/null @@ -1,31 +0,0 @@ -From: Hideo Yamauchi -Date: Fri, 21 Aug 2015 14:12:33 +0900 -Subject: [PATCH] Fix: pengine: The failed action of the resource that occurred - in shutdown is not displayed. - -It is like the problem that entered when you summarized an old judgment -in function (record_failed_op) by the next correction. - -* -https://github.com/ClusterLabs/pacemaker/commit/9cd666ac15a2998f4543e1dac33edea36bbcf930#diff-7dae505817fa61e544018e581ee45933 - -(cherry picked from commit 119df5c0bd8fac02bd36e45a28288dcf4624b89d) ---- - lib/pengine/unpack.c | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 0f83be4..156a192 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -2546,9 +2546,7 @@ record_failed_op(xmlNode *op, node_t* node, pe_working_set_t * data_set) - xmlNode *xIter = NULL; - const char *op_key = crm_element_value(op, XML_LRM_ATTR_TASK_KEY); - -- if (node->details->shutdown) { -- return; -- } else if(node->details->online == FALSE) { -+ if ((node->details->shutdown) && (node->details->online == FALSE)) { - return; - } - diff --git a/SOURCES/001-makefile-cleanup.patch b/SOURCES/001-makefile-cleanup.patch new file mode 100644 index 0000000..c2f5746 --- /dev/null +++ b/SOURCES/001-makefile-cleanup.patch @@ -0,0 +1,535 @@ +From d720014c2cf7ff00004b64ca9cf817ffc00e6ffb Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= +Date: Fri, 10 Jun 2016 15:00:03 +0200 +Subject: [PATCH 1/5] Build: Makefile.common to define MAINTAINERCLEANFILES + + AM_CPPFLAGS + +Fallout after 89fba95. +--- + attrd/Makefile.am | 5 +---- + lib/cib/Makefile.am | 5 +---- + lib/cluster/Makefile.am | 5 +---- + lib/common/Makefile.am | 8 +++----- + lib/fencing/Makefile.am | 6 +----- + lib/lrmd/Makefile.am | 8 +------- + lib/pengine/Makefile.am | 5 +---- + lib/transition/Makefile.am | 5 +---- + lrmd/Makefile.am | 1 - + 9 files changed, 10 insertions(+), 38 deletions(-) + +diff --git a/attrd/Makefile.am b/attrd/Makefile.am +index 9d5e223..0c5d456 100644 +--- a/attrd/Makefile.am ++++ b/attrd/Makefile.am +@@ -15,10 +15,7 @@ + # along with this program; if not, write to the Free Software + # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + # +-MAINTAINERCLEANFILES = Makefile.in +- +-AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include \ +- -I$(top_builddir)/libltdl -I$(top_srcdir)/libltdl ++include $(top_srcdir)/Makefile.common + + halibdir = $(CRM_DAEMON_DIR) + halib_PROGRAMS = attrd +diff --git a/lib/cib/Makefile.am b/lib/cib/Makefile.am +index 1ebd7b0..687b9f3 100644 +--- a/lib/cib/Makefile.am ++++ b/lib/cib/Makefile.am +@@ -15,10 +15,7 @@ + # along with this program; if not, write to the Free Software + # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + # +-MAINTAINERCLEANFILES = Makefile.in +- +-AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include \ +- -I$(top_builddir)/libltdl -I$(top_srcdir)/libltdl ++include $(top_srcdir)/Makefile.common + + ## libraries + lib_LTLIBRARIES = libcib.la +diff --git a/lib/cluster/Makefile.am b/lib/cluster/Makefile.am +index 5b6f3f7..f5f40bc 100644 +--- a/lib/cluster/Makefile.am ++++ b/lib/cluster/Makefile.am +@@ -15,10 +15,7 @@ + # along with this program; if not, write to the Free Software + # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + # +-MAINTAINERCLEANFILES = Makefile.in +- +-AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include \ +- -I$(top_builddir)/libltdl -I$(top_srcdir)/libltdl ++include $(top_srcdir)/Makefile.common + + headerdir=$(pkgincludedir)/crm/cluster + +diff --git a/lib/common/Makefile.am b/lib/common/Makefile.am +index 8ef08db..7ae1dc8 100644 +--- a/lib/common/Makefile.am ++++ b/lib/common/Makefile.am +@@ -15,12 +15,10 @@ + # along with this program; if not, write to the Free Software + # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + # +-MAINTAINERCLEANFILES = Makefile.in ++include $(top_srcdir)/Makefile.common + +-AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include \ +- -I$(top_builddir)/libltdl -I$(top_srcdir)/libltdl \ +- -I$(top_builddir)/lib/gnu -I$(top_srcdir)/lib/gnu \ +- -DSBINDIR=\"$(sbindir)\" ++AM_CPPFLAGS += -I$(top_builddir)/lib/gnu -I$(top_srcdir)/lib/gnu \ ++ -DSBINDIR=\"$(sbindir)\" + + ## libraries + lib_LTLIBRARIES = libcrmcommon.la +diff --git a/lib/fencing/Makefile.am b/lib/fencing/Makefile.am +index 3320466..aa57ba5 100644 +--- a/lib/fencing/Makefile.am ++++ b/lib/fencing/Makefile.am +@@ -16,11 +16,7 @@ + # along with this program; if not, write to the Free Software + # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + # +-MAINTAINERCLEANFILES = Makefile.in +- +-AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include \ +- -I$(top_builddir)/libltdl -I$(top_srcdir)/libltdl \ +- -I$(top_builddir) -I$(top_srcdir) ++include $(top_srcdir)/Makefile.common + + lib_LTLIBRARIES = libstonithd.la + +diff --git a/lib/lrmd/Makefile.am b/lib/lrmd/Makefile.am +index d5ae2f4..8c76a78 100644 +--- a/lib/lrmd/Makefile.am ++++ b/lib/lrmd/Makefile.am +@@ -14,13 +14,7 @@ + # License along with this library; if not, write to the Free Software + # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + # +-# +- +-MAINTAINERCLEANFILES = Makefile.in +- +-AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include \ +- -I$(top_builddir)/libltdl -I$(top_srcdir)/libltdl \ +- -I$(top_builddir) -I$(top_srcdir) ++include $(top_srcdir)/Makefile.common + + lib_LTLIBRARIES = liblrmd.la + +diff --git a/lib/pengine/Makefile.am b/lib/pengine/Makefile.am +index b9191d5..4544377 100644 +--- a/lib/pengine/Makefile.am ++++ b/lib/pengine/Makefile.am +@@ -15,10 +15,7 @@ + # along with this program; if not, write to the Free Software + # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + # +-MAINTAINERCLEANFILES = Makefile.in +- +-AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include \ +- -I$(top_builddir)/libltdl -I$(top_srcdir)/libltdl ++include $(top_srcdir)/Makefile.common + + ## libraries + lib_LTLIBRARIES = libpe_rules.la libpe_status.la +diff --git a/lib/transition/Makefile.am b/lib/transition/Makefile.am +index 4447f1b..6cc9bca 100644 +--- a/lib/transition/Makefile.am ++++ b/lib/transition/Makefile.am +@@ -15,10 +15,7 @@ + # along with this program; if not, write to the Free Software + # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + # +-MAINTAINERCLEANFILES = Makefile.in +- +-AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include \ +- -I$(top_builddir)/libltdl -I$(top_srcdir)/libltdl ++include $(top_srcdir)/Makefile.common + + ## libraries + lib_LTLIBRARIES = libtransitioner.la +diff --git a/lrmd/Makefile.am b/lrmd/Makefile.am +index a8fb07a..556d48a 100644 +--- a/lrmd/Makefile.am ++++ b/lrmd/Makefile.am +@@ -14,7 +14,6 @@ + # License along with this library; if not, write to the Free Software + # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + # +- + include $(top_srcdir)/Makefile.common + + testdir = $(datadir)/$(PACKAGE)/tests/lrmd +-- +1.8.3.1 + + +From bfad92c9e23f3bf7a73e86017eed46ba7fd63c12 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= +Date: Fri, 10 Jun 2016 17:38:46 +0200 +Subject: [PATCH 2/5] Build: Makefile.am files: drop extraneous variables + + targets + +- empty variables +- non-fitting .PHONY definitions +- unemployed install-exec-local + uninstall-local target pairs (per file) +- AM_CFLAGS unnecessarily initialized with current AM_CPPFLAGS value +- empty header_HEADERS variable + superfluous headerdir definition +--- + Makefile.am | 2 -- + attrd/Makefile.am | 8 -------- + fencing/Makefile.am | 2 -- + include/crm/Makefile.am | 1 - + include/crm/fencing/Makefile.am | 3 --- + lib/cib/Makefile.am | 5 ----- + lib/cluster/Makefile.am | 8 -------- + lib/common/Makefile.am | 4 ---- + lib/fencing/Makefile.am | 2 -- + lib/lrmd/Makefile.am | 3 --- + lib/pengine/Makefile.am | 4 ---- + lib/services/Makefile.am | 2 -- + lib/transition/Makefile.am | 5 ----- + tools/Makefile.am | 11 ----------- + 14 files changed, 60 deletions(-) + +diff --git a/Makefile.am b/Makefile.am +index 0edf0c9..5d1a689 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -73,5 +73,3 @@ dist-clean-local: + + maintainer-clean-local: + rm -f libltdl.tar +- +-.PHONY: rpm pkg handy handy-copy +diff --git a/attrd/Makefile.am b/attrd/Makefile.am +index 0c5d456..9a841e5 100644 +--- a/attrd/Makefile.am ++++ b/attrd/Makefile.am +@@ -21,8 +21,6 @@ halibdir = $(CRM_DAEMON_DIR) + halib_PROGRAMS = attrd + ## SOURCES + +-noinst_HEADERS = +- + attrd_SOURCES = + attrd_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la \ + $(top_builddir)/lib/common/libcrmcommon.la \ +@@ -37,9 +35,3 @@ endif + + clean-generic: + rm -f *.log *.debug *.xml *~ +- +-install-exec-local: +- +-uninstall-local: +- +-.PHONY: install-exec-hook +diff --git a/fencing/Makefile.am b/fencing/Makefile.am +index 383c217..bef6e6e 100644 +--- a/fencing/Makefile.am ++++ b/fencing/Makefile.am +@@ -17,8 +17,6 @@ + # + include $(top_srcdir)/Makefile.common + +-SUBDIRS = +- + ## binary progs + testdir = $(datadir)/$(PACKAGE)/tests/fencing + test_SCRIPTS = regression.py +diff --git a/include/crm/Makefile.am b/include/crm/Makefile.am +index ef96011..bdb627c 100644 +--- a/include/crm/Makefile.am ++++ b/include/crm/Makefile.am +@@ -19,7 +19,6 @@ MAINTAINERCLEANFILES = Makefile.in + + headerdir=$(pkgincludedir)/crm + +-noinst_HEADERS = + header_HEADERS = crm.h cib.h attrd.h msg_xml.h transition.h stonith-ng.h cluster.h lrmd.h services.h error.h compatibility.h + + SUBDIRS = common pengine cib fencing cluster +diff --git a/include/crm/fencing/Makefile.am b/include/crm/fencing/Makefile.am +index f85c842..57a787b 100644 +--- a/include/crm/fencing/Makefile.am ++++ b/include/crm/fencing/Makefile.am +@@ -17,7 +17,4 @@ + # + MAINTAINERCLEANFILES = Makefile.in + +-headerdir=$(pkgincludedir)/crm/fencing +- + noinst_HEADERS = internal.h +-header_HEADERS = +diff --git a/lib/cib/Makefile.am b/lib/cib/Makefile.am +index 687b9f3..07bf261 100644 +--- a/lib/cib/Makefile.am ++++ b/lib/cib/Makefile.am +@@ -21,7 +21,6 @@ include $(top_srcdir)/Makefile.common + lib_LTLIBRARIES = libcib.la + + ## SOURCES +-noinst_HEADERS = + libcib_la_SOURCES = cib_ops.c cib_utils.c cib_client.c cib_native.c cib_attrs.c + libcib_la_SOURCES += cib_file.c cib_remote.c + +@@ -31,7 +30,3 @@ libcib_la_CFLAGS = -I$(top_srcdir) + + clean-generic: + rm -f *.log *.debug *.xml *~ +- +-install-exec-local: +- +-uninstall-local: +diff --git a/lib/cluster/Makefile.am b/lib/cluster/Makefile.am +index f5f40bc..dcc1fbd 100644 +--- a/lib/cluster/Makefile.am ++++ b/lib/cluster/Makefile.am +@@ -17,10 +17,6 @@ + # + include $(top_srcdir)/Makefile.common + +-headerdir=$(pkgincludedir)/crm/cluster +- +-header_HEADERS = +- + ## libraries + lib_LTLIBRARIES = libcrmcluster.la + +@@ -45,7 +41,3 @@ endif + + clean-generic: + rm -f *.log *.debug *.xml *~ +- +-install-exec-local: +- +-uninstall-local: +diff --git a/lib/common/Makefile.am b/lib/common/Makefile.am +index 7ae1dc8..111628f 100644 +--- a/lib/common/Makefile.am ++++ b/lib/common/Makefile.am +@@ -44,7 +44,3 @@ libcrmcommon_la_SOURCES += $(top_builddir)/lib/gnu/md5.c + + clean-generic: + rm -f *.log *.debug *.xml *~ +- +-install-exec-local: +- +-uninstall-local: +diff --git a/lib/fencing/Makefile.am b/lib/fencing/Makefile.am +index aa57ba5..a9f9874 100644 +--- a/lib/fencing/Makefile.am ++++ b/lib/fencing/Makefile.am +@@ -23,5 +23,3 @@ lib_LTLIBRARIES = libstonithd.la + libstonithd_la_SOURCES = st_client.c + libstonithd_la_LDFLAGS = -version-info 4:1:2 + libstonithd_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la +- +-AM_CFLAGS = $(AM_CPPFLAGS) +diff --git a/lib/lrmd/Makefile.am b/lib/lrmd/Makefile.am +index 8c76a78..c23fef5 100644 +--- a/lib/lrmd/Makefile.am ++++ b/lib/lrmd/Makefile.am +@@ -23,6 +23,3 @@ liblrmd_la_LDFLAGS = -version-info 4:0:3 + liblrmd_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la \ + $(top_builddir)/lib/services/libcrmservice.la \ + $(top_builddir)/lib/fencing/libstonithd.la +- +- +-AM_CFLAGS = $(AM_CPPFLAGS) +diff --git a/lib/pengine/Makefile.am b/lib/pengine/Makefile.am +index 4544377..a0d19e5 100644 +--- a/lib/pengine/Makefile.am ++++ b/lib/pengine/Makefile.am +@@ -34,7 +34,3 @@ libpe_status_la_LIBADD = @CURSESLIBS@ $(top_builddir)/lib/common/libcrmcommon.l + + clean-generic: + rm -f *.log *.debug *~ +- +-install-exec-local: +- +-uninstall-local: +diff --git a/lib/services/Makefile.am b/lib/services/Makefile.am +index a9fe26d..2797b53 100644 +--- a/lib/services/Makefile.am ++++ b/lib/services/Makefile.am +@@ -39,5 +39,3 @@ endif + if BUILD_SYSTEMD + libcrmservice_la_SOURCES += systemd.c + endif +- +-AM_CFLAGS = $(AM_CPPFLAGS) +diff --git a/lib/transition/Makefile.am b/lib/transition/Makefile.am +index 6cc9bca..7bcfc1a 100644 +--- a/lib/transition/Makefile.am ++++ b/lib/transition/Makefile.am +@@ -21,7 +21,6 @@ include $(top_srcdir)/Makefile.common + lib_LTLIBRARIES = libtransitioner.la + + ## SOURCES +-noinst_HEADERS = + libtransitioner_la_SOURCES = unpack.c graph.c utils.c + + libtransitioner_la_LDFLAGS = -version-info 2:5:0 +@@ -30,7 +29,3 @@ libtransitioner_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la + + clean-generic: + rm -f *~ +- +-install-exec-local: +- +-uninstall-local: +diff --git a/tools/Makefile.am b/tools/Makefile.am +index 555b1db..59ce743 100644 +--- a/tools/Makefile.am ++++ b/tools/Makefile.am +@@ -26,9 +26,6 @@ COMMONLIBS = \ + $(top_builddir)/lib/cib/libcib.la \ + $(CURSESLIBS) $(CLUSTERLIBS) + +-headerdir = $(pkgincludedir)/crm +-header_HEADERS = +- + pcmkdir = $(datadir)/$(PACKAGE) + pcmk_DATA = report.common report.collector + +@@ -57,8 +54,6 @@ endif + + ## SOURCES + +-noinst_HEADERS = +- + MAN8DEPS = crm_attribute crm_node + + crmadmin_SOURCES = crmadmin.c +@@ -143,10 +138,4 @@ ipmiservicelogd_CFLAGS = $(OPENIPMI_SERVICELOG_CFLAGS) $(SERVICELOG_CFLAGS) + ipmiservicelogd_LDFLAGS = $(top_builddir)/lib/common/libcrmcommon.la $(OPENIPMI_SERVICELOG_LIBS) $(SERVICELOG_LIBS) + endif + +-install-exec-local: +- +-uninstall-local: +- +-.PHONY: install-exec-hook +- + CLEANFILES = $(man8_MANS) +-- +1.8.3.1 + + +From 048efbf21a84568816b35522ed5f7b84f9ffdc41 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= +Date: Mon, 20 Jun 2016 15:10:34 +0200 +Subject: [PATCH 3/5] Build: drop superfluous libcrmcluster_la_DEPENDENCIES var + +--- + lib/cluster/Makefile.am | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/lib/cluster/Makefile.am b/lib/cluster/Makefile.am +index dcc1fbd..ffa2a73 100644 +--- a/lib/cluster/Makefile.am ++++ b/lib/cluster/Makefile.am +@@ -23,7 +23,6 @@ lib_LTLIBRARIES = libcrmcluster.la + libcrmcluster_la_SOURCES = election.c cluster.c membership.c + libcrmcluster_la_LDFLAGS = -version-info 6:0:2 + libcrmcluster_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la $(top_builddir)/lib/fencing/libstonithd.la $(CLUSTERLIBS) +-libcrmcluster_la_DEPENDENCIES = $(top_builddir)/lib/common/libcrmcommon.la $(top_builddir)/lib/fencing/libstonithd.la + + if BUILD_CS_SUPPORT + libcrmcluster_la_SOURCES += cpg.c +-- +1.8.3.1 + + +From f5042bf76d048cb1fb77f9a60253cbf5a0ee9fc1 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= +Date: Mon, 20 Jun 2016 21:09:00 +0200 +Subject: [PATCH 4/5] Build: cib/Makefile.am: avoid second-guessing Libtool + +Explicitly referring to location that is an implementation detail of +Libtool is not very reasonable and appears to be working regardless. +--- + lib/cib/Makefile.am | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/lib/cib/Makefile.am b/lib/cib/Makefile.am +index 07bf261..0c57eee 100644 +--- a/lib/cib/Makefile.am ++++ b/lib/cib/Makefile.am +@@ -24,7 +24,7 @@ lib_LTLIBRARIES = libcib.la + libcib_la_SOURCES = cib_ops.c cib_utils.c cib_client.c cib_native.c cib_attrs.c + libcib_la_SOURCES += cib_file.c cib_remote.c + +-libcib_la_LDFLAGS = -version-info 5:1:1 -L$(top_builddir)/lib/pengine/.libs ++libcib_la_LDFLAGS = -version-info 5:1:1 + libcib_la_LIBADD = $(CRYPTOLIB) $(top_builddir)/lib/pengine/libpe_rules.la $(top_builddir)/lib/common/libcrmcommon.la + libcib_la_CFLAGS = -I$(top_srcdir) + +-- +1.8.3.1 + + +From 8a86b03d1dbb8bab4a38f39544c519a2e7e85136 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= +Date: Fri, 10 Jun 2016 17:55:57 +0200 +Subject: [PATCH 5/5] Build: Makefile.am f.: move/drop comments to reflect + previous commits + +* "# lex/yacc issues": +- introduced with 800b0c3 ("CFLAGS = $(CFLAGS_COPY:-Werror=)") +- comment + the statement got separated as of ae6fb13 + +* "# utils.h pengine.h" +- introduced with a4d9634 +- became superfluous as of ef1f2e2 +--- + fencing/Makefile.am | 4 +--- + pengine/Makefile.am | 2 +- + 2 files changed, 2 insertions(+), 4 deletions(-) + +diff --git a/fencing/Makefile.am b/fencing/Makefile.am +index bef6e6e..79fe2ed 100644 +--- a/fencing/Makefile.am ++++ b/fencing/Makefile.am +@@ -56,9 +56,6 @@ BUILT_SOURCES = standalone_config.h + + stonithd_SOURCES += standalone_config.c config.y config.l + stonithd_AM_LFLAGS = -o$(LEX_OUTPUT_ROOT).c +- +-# lex/yacc issues: +- + endif + stonithd_YFLAGS = -d + +@@ -69,6 +66,7 @@ stonithd_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ + $(top_builddir)/pengine/libpengine.la \ + $(CRYPTOLIB) $(CLUSTERLIBS) + ++# lex/yacc issues: + CFLAGS = $(CFLAGS_COPY:-Werror=) + + CLEANFILES = $(man7_MANS) $(man8_MANS) +diff --git a/pengine/Makefile.am b/pengine/Makefile.am +index 172a86b..170b728 100644 +--- a/pengine/Makefile.am ++++ b/pengine/Makefile.am +@@ -58,8 +58,8 @@ man7_MANS = pengine.7 + endif + + ## SOURCES ++ + noinst_HEADERS = allocate.h utils.h pengine.h +-#utils.h pengine.h + + libpengine_la_LDFLAGS = -version-info 11:0:1 + # -L$(top_builddir)/lib/pils -lpils -export-dynamic -module -avoid-version +-- +1.8.3.1 + diff --git a/SOURCES/0010-Log-services-Reduce-severity-of-noisy-log-messages.patch b/SOURCES/0010-Log-services-Reduce-severity-of-noisy-log-messages.patch deleted file mode 100644 index 40aeb8b..0000000 --- a/SOURCES/0010-Log-services-Reduce-severity-of-noisy-log-messages.patch +++ /dev/null @@ -1,34 +0,0 @@ -From: "Gao,Yan" -Date: Wed, 26 Aug 2015 18:12:56 +0200 -Subject: [PATCH] Log: services: Reduce severity of noisy log messages - -They occurred for every monitor operation of systemd resources. - -(cherry picked from commit a77c401a3fcdedec165c05d27a75d75abcebf4a1) ---- - lib/services/services.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/lib/services/services.c b/lib/services/services.c -index 3f40078..abf1458 100644 ---- a/lib/services/services.c -+++ b/lib/services/services.c -@@ -366,15 +366,15 @@ services_set_op_pending(svc_action_t *op, DBusPendingCall *pending) - if (pending) { - crm_info("Lost pending %s DBus call (%p)", op->id, op->opaque->pending); - } else { -- crm_info("Done with pending %s DBus call (%p)", op->id, op->opaque->pending); -+ crm_trace("Done with pending %s DBus call (%p)", op->id, op->opaque->pending); - } - dbus_pending_call_unref(op->opaque->pending); - } - op->opaque->pending = pending; - if (pending) { -- crm_info("Updated pending %s DBus call (%p)", op->id, pending); -+ crm_trace("Updated pending %s DBus call (%p)", op->id, pending); - } else { -- crm_info("Cleared pending %s DBus call", op->id); -+ crm_trace("Cleared pending %s DBus call", op->id); - } - } - #endif diff --git a/SOURCES/0011-Fix-xml-Mark-xml-nodes-as-dirty-if-any-children-move.patch b/SOURCES/0011-Fix-xml-Mark-xml-nodes-as-dirty-if-any-children-move.patch deleted file mode 100644 index c67a465..0000000 --- a/SOURCES/0011-Fix-xml-Mark-xml-nodes-as-dirty-if-any-children-move.patch +++ /dev/null @@ -1,24 +0,0 @@ -From: "Gao,Yan" -Date: Wed, 26 Aug 2015 16:28:38 +0200 -Subject: [PATCH] Fix: xml: Mark xml nodes as dirty if any children move - -Otherwise if nothing else changed in the new xml, even the versions -weren't bumped, crm_diff would output an empty xml diff. - -(cherry picked from commit 1073786ec24f3bbf26a0f6a5b0614a65edac4301) ---- - lib/common/xml.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/lib/common/xml.c b/lib/common/xml.c -index 299c7bf..353eb4b 100644 ---- a/lib/common/xml.c -+++ b/lib/common/xml.c -@@ -4275,6 +4275,7 @@ __xml_diff_object(xmlNode * old, xmlNode * new) - if(p_old != p_new) { - crm_info("%s.%s moved from %d to %d - %d", - new_child->name, ID(new_child), p_old, p_new); -+ __xml_node_dirty(new); - p->flags |= xpf_moved; - - if(p_old > p_new) { diff --git a/SOURCES/0012-Feature-crmd-Implement-reliable-event-notifications.patch b/SOURCES/0012-Feature-crmd-Implement-reliable-event-notifications.patch deleted file mode 100644 index 94e3307..0000000 --- a/SOURCES/0012-Feature-crmd-Implement-reliable-event-notifications.patch +++ /dev/null @@ -1,565 +0,0 @@ -From: Andrew Beekhof -Date: Tue, 1 Sep 2015 13:17:45 +1000 -Subject: [PATCH] Feature: crmd: Implement reliable event notifications - -(cherry picked from commit 0cd1b8f02b403976afe106e0ca3a8a8a16864c6c) ---- - crmd/Makefile.am | 2 +- - crmd/callbacks.c | 4 + - crmd/control.c | 67 +++++++++++++--- - crmd/crmd_utils.h | 1 + - crmd/lrm.c | 2 + - crmd/notify.c | 188 ++++++++++++++++++++++++++++++++++++++++++++ - crmd/notify.h | 30 +++++++ - crmd/te_utils.c | 2 + - cts/CIB.py | 2 + - extra/pcmk_notify_sample.sh | 68 ++++++++++++++++ - include/crm_internal.h | 1 + - lib/common/utils.c | 27 +++++++ - 12 files changed, 380 insertions(+), 14 deletions(-) - create mode 100644 crmd/notify.c - create mode 100644 crmd/notify.h - create mode 100755 extra/pcmk_notify_sample.sh - -diff --git a/crmd/Makefile.am b/crmd/Makefile.am -index 8e5e1df..984f5d0 100644 ---- a/crmd/Makefile.am -+++ b/crmd/Makefile.am -@@ -28,7 +28,7 @@ noinst_HEADERS = crmd.h crmd_fsa.h crmd_messages.h fsa_defines.h \ - fsa_matrix.h fsa_proto.h crmd_utils.h crmd_callbacks.h \ - crmd_lrm.h te_callbacks.h tengine.h - --crmd_SOURCES = main.c crmd.c corosync.c \ -+crmd_SOURCES = main.c crmd.c corosync.c notify.c \ - fsa.c control.c messages.c membership.c callbacks.c \ - election.c join_client.c join_dc.c subsystems.c throttle.c \ - cib.c pengine.c tengine.c lrm.c lrm_state.c remote_lrmd_ra.c \ -diff --git a/crmd/callbacks.c b/crmd/callbacks.c -index f646927..38fb30b 100644 ---- a/crmd/callbacks.c -+++ b/crmd/callbacks.c -@@ -126,6 +126,7 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d - case crm_status_nstate: - crm_info("%s is now %s (was %s)", - node->uname, state_text(node->state), state_text(data)); -+ - if (safe_str_eq(data, node->state)) { - /* State did not change */ - return; -@@ -147,7 +148,10 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d - } - } - } -+ -+ crmd_notify_node_event(node); - break; -+ - case crm_status_processes: - if (data) { - old = *(const uint32_t *)data; -diff --git a/crmd/control.c b/crmd/control.c -index f4add49..d92f46b 100644 ---- a/crmd/control.c -+++ b/crmd/control.c -@@ -873,28 +873,64 @@ do_recover(long long action, - - /* *INDENT-OFF* */ - pe_cluster_option crmd_opts[] = { -- /* name, old-name, validate, default, description */ -- { "dc-version", NULL, "string", NULL, "none", NULL, "Version of Pacemaker on the cluster's DC.", "Includes the hash which identifies the exact Mercurial changeset it was built from. Used for diagnostic purposes." }, -- { "cluster-infrastructure", NULL, "string", NULL, "heartbeat", NULL, "The messaging stack on which Pacemaker is currently running.", "Used for informational and diagnostic purposes." }, -- { XML_CONFIG_ATTR_DC_DEADTIME, "dc_deadtime", "time", NULL, "20s", &check_time, "How long to wait for a response from other nodes during startup.", "The \"correct\" value will depend on the speed/load of your network and the type of switches used." }, -+ /* name, old-name, validate, values, default, short description, long description */ -+ { "dc-version", NULL, "string", NULL, "none", NULL, -+ "Version of Pacemaker on the cluster's DC.", -+ "Includes the hash which identifies the exact changeset it was built from. Used for diagnostic purposes." -+ }, -+ { "cluster-infrastructure", NULL, "string", NULL, "heartbeat", NULL, -+ "The messaging stack on which Pacemaker is currently running.", -+ "Used for informational and diagnostic purposes." }, -+ { XML_CONFIG_ATTR_DC_DEADTIME, "dc_deadtime", "time", NULL, "20s", &check_time, -+ "How long to wait for a response from other nodes during startup.", -+ "The \"correct\" value will depend on the speed/load of your network and the type of switches used." -+ }, - { XML_CONFIG_ATTR_RECHECK, "cluster_recheck_interval", "time", -- "Zero disables polling. Positive values are an interval in seconds (unless other SI units are specified. eg. 5min)", "15min", &check_timer, -+ "Zero disables polling. Positive values are an interval in seconds (unless other SI units are specified. eg. 5min)", -+ "15min", &check_timer, - "Polling interval for time based changes to options, resource parameters and constraints.", - "The Cluster is primarily event driven, however the configuration can have elements that change based on time." -- " To ensure these changes take effect, we can optionally poll the cluster's status for changes." }, -+ " To ensure these changes take effect, we can optionally poll the cluster's status for changes." -+ }, -+ -+ { "notification-script", NULL, "string", NULL, "/dev/null", &check_script, -+ "Notification script to be called after significant cluster events", -+ "Full path to a script that will be invoked when resources start/stop/fail, fencing occurs or nodes join/leave the cluster.\n" -+ "Must exist on all nodes in the cluster." -+ }, -+ { "notification-target", NULL, "string", NULL, "", NULL, -+ "Destination for notifications (Optional)", -+ "Where should the supplied script send notifications to. Useful to avoid hard-coding this in the script." -+ }, -+ - { "load-threshold", NULL, "percentage", NULL, "80%", &check_utilization, - "The maximum amount of system resources that should be used by nodes in the cluster", - "The cluster will slow down its recovery process when the amount of system resources used" -- " (currently CPU) approaches this limit", }, -+ " (currently CPU) approaches this limit", -+ }, - { "node-action-limit", NULL, "integer", NULL, "0", &check_number, - "The maximum number of jobs that can be scheduled per node. Defaults to 2x cores"}, -- { XML_CONFIG_ATTR_ELECTION_FAIL, "election_timeout", "time", NULL, "2min", &check_timer, "*** Advanced Use Only ***.", "If need to adjust this value, it probably indicates the presence of a bug." }, -- { XML_CONFIG_ATTR_FORCE_QUIT, "shutdown_escalation", "time", NULL, "20min", &check_timer, "*** Advanced Use Only ***.", "If need to adjust this value, it probably indicates the presence of a bug." }, -- { "crmd-integration-timeout", NULL, "time", NULL, "3min", &check_timer, "*** Advanced Use Only ***.", "If need to adjust this value, it probably indicates the presence of a bug." }, -- { "crmd-finalization-timeout", NULL, "time", NULL, "30min", &check_timer, "*** Advanced Use Only ***.", "If you need to adjust this value, it probably indicates the presence of a bug." }, -- { "crmd-transition-delay", NULL, "time", NULL, "0s", &check_timer, "*** Advanced Use Only ***\nEnabling this option will slow down cluster recovery under all conditions", "Delay cluster recovery for the configured interval to allow for additional/related events to occur.\nUseful if your configuration is sensitive to the order in which ping updates arrive." }, -+ { XML_CONFIG_ATTR_ELECTION_FAIL, "election_timeout", "time", NULL, "2min", &check_timer, -+ "*** Advanced Use Only ***.", "If need to adjust this value, it probably indicates the presence of a bug." -+ }, -+ { XML_CONFIG_ATTR_FORCE_QUIT, "shutdown_escalation", "time", NULL, "20min", &check_timer, -+ "*** Advanced Use Only ***.", "If need to adjust this value, it probably indicates the presence of a bug." -+ }, -+ { "crmd-integration-timeout", NULL, "time", NULL, "3min", &check_timer, -+ "*** Advanced Use Only ***.", "If need to adjust this value, it probably indicates the presence of a bug." -+ }, -+ { "crmd-finalization-timeout", NULL, "time", NULL, "30min", &check_timer, -+ "*** Advanced Use Only ***.", "If you need to adjust this value, it probably indicates the presence of a bug." -+ }, -+ { "crmd-transition-delay", NULL, "time", NULL, "0s", &check_timer, -+ "*** Advanced Use Only ***\n" -+ "Enabling this option will slow down cluster recovery under all conditions", -+ "Delay cluster recovery for the configured interval to allow for additional/related events to occur.\n" -+ "Useful if your configuration is sensitive to the order in which ping updates arrive." -+ }, - { "stonith-watchdog-timeout", NULL, "time", NULL, NULL, &check_timer, -- "How long to wait before we can assume nodes are safely down", NULL }, -+ "How long to wait before we can assume nodes are safely down", NULL -+ }, - { "no-quorum-policy", "no_quorum_policy", "enum", "stop, freeze, ignore, suicide", "stop", &check_quorum, NULL, NULL }, - - #if SUPPORT_PLUGIN -@@ -927,6 +963,7 @@ crmd_pref(GHashTable * options, const char *name) - static void - config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) - { -+ const char *script = NULL; - const char *value = NULL; - GHashTable *config_hash = NULL; - crm_time_t *now = crm_time_new(NULL); -@@ -955,6 +992,10 @@ config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void - - verify_crmd_options(config_hash); - -+ script = crmd_pref(config_hash, "notification-script"); -+ value = crmd_pref(config_hash, "notification-target"); -+ crmd_enable_notifications(script, value); -+ - value = crmd_pref(config_hash, XML_CONFIG_ATTR_DC_DEADTIME); - election_trigger->period_ms = crm_get_msec(value); - -diff --git a/crmd/crmd_utils.h b/crmd/crmd_utils.h -index 78214bf..7e8c3e6 100644 ---- a/crmd/crmd_utils.h -+++ b/crmd/crmd_utils.h -@@ -21,6 +21,7 @@ - # include - # include - # include /* For CIB_OP_MODIFY */ -+# include "notify.h" - - # define CLIENT_EXIT_WAIT 30 - # define FAKE_TE_ID "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" -diff --git a/crmd/lrm.c b/crmd/lrm.c -index 418e7cf..48195e8 100644 ---- a/crmd/lrm.c -+++ b/crmd/lrm.c -@@ -2415,6 +2415,8 @@ process_lrm_event(lrm_state_t * lrm_state, lrmd_event_data_t * op, struct recurr - free(prefix); - } - -+ crmd_notify_resource_op(lrm_state->node_name, op); -+ - if (op->rsc_deleted) { - crm_info("Deletion of resource '%s' complete after %s", op->rsc_id, op_key); - delete_rsc_entry(lrm_state, NULL, op->rsc_id, NULL, pcmk_ok, NULL); -diff --git a/crmd/notify.c b/crmd/notify.c -new file mode 100644 -index 0000000..980bfa6 ---- /dev/null -+++ b/crmd/notify.c -@@ -0,0 +1,188 @@ -+/* -+ * Copyright (C) 2015 Andrew Beekhof -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public -+ * License as published by the Free Software Foundation; either -+ * version 2 of the License, or (at your option) any later version. -+ * -+ * This software is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public -+ * License along with this library; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ */ -+ -+#include -+#include -+#include -+#include "notify.h" -+ -+char *notify_script = NULL; -+char *notify_target = NULL; -+ -+ -+static const char *notify_keys[] = -+{ -+ "CRM_notify_recipient", -+ "CRM_notify_node", -+ "CRM_notify_rsc", -+ "CRM_notify_task", -+ "CRM_notify_interval", -+ "CRM_notify_desc", -+ "CRM_notify_status", -+ "CRM_notify_target_rc", -+ "CRM_notify_rc", -+ "CRM_notify_kind", -+ "CRM_notify_version", -+}; -+ -+ -+void -+crmd_enable_notifications(const char *script, const char *target) -+{ -+ free(notify_script); -+ notify_script = NULL; -+ -+ free(notify_target); -+ notify_target = NULL; -+ -+ if(safe_str_eq(script, "/dev/null")) { -+ crm_notice("Notifications disabled"); -+ return; -+ } -+ -+ notify_script = strdup(script); -+ notify_target = strdup(target); -+ crm_notice("Notifications enabled"); -+} -+ -+static void -+set_notify_key(const char *name, const char *cvalue, char *value) -+{ -+ int lpc; -+ bool found = 0; -+ -+ if(cvalue == NULL) { -+ cvalue = value; -+ } -+ -+ for(lpc = 0; lpc < DIMOF(notify_keys); lpc++) { -+ if(safe_str_eq(name, notify_keys[lpc])) { -+ found = 1; -+ crm_trace("Setting notify key %s = '%s'", name, cvalue); -+ setenv(name, cvalue, 1); -+ break; -+ } -+ } -+ -+ CRM_ASSERT(found != 0); -+ free(value); -+} -+ -+ -+static void -+send_notification(const char *kind) -+{ -+ int lpc; -+ pid_t pid; -+ -+ crm_debug("Sending '%s' notification to '%s' via '%s'", kind, notify_target, notify_script); -+ -+ set_notify_key("CRM_notify_recipient", notify_target, NULL); -+ set_notify_key("CRM_notify_kind", kind, NULL); -+ set_notify_key("CRM_notify_version", VERSION, NULL); -+ -+ pid = fork(); -+ if (pid == -1) { -+ crm_perror(LOG_ERR, "notification failed"); -+ } -+ -+ if (pid == 0) { -+ /* crm_debug("notification: I am the child. Executing the nofitication program."); */ -+ execl(notify_script, notify_script, NULL); -+ exit(EXIT_FAILURE); -+ -+ } else { -+ for(lpc = 0; lpc < DIMOF(notify_keys); lpc++) { -+ unsetenv(notify_keys[lpc]); -+ } -+ } -+} -+ -+void crmd_notify_node_event(crm_node_t *node) -+{ -+ if(notify_script == NULL) { -+ return; -+ } -+ -+ set_notify_key("CRM_notify_node", node->uname, NULL); -+ set_notify_key("CRM_notify_desc", node->state, NULL); -+ -+ send_notification("node"); -+} -+ -+void -+crmd_notify_fencing_op(stonith_event_t * e) -+{ -+ char *desc = NULL; -+ -+ if(notify_script) { -+ return; -+ } -+ -+ desc = crm_strdup_printf("Operation %s requested by %s for peer %s: %s (ref=%s)", -+ e->operation, e->origin, e->target, pcmk_strerror(e->result), -+ e->id); -+ -+ set_notify_key("CRM_notify_node", e->target, NULL); -+ set_notify_key("CRM_notify_task", e->operation, NULL); -+ set_notify_key("CRM_notify_desc", NULL, desc); -+ set_notify_key("CRM_notify_rc", NULL, crm_itoa(e->result)); -+ -+ send_notification("fencing"); -+} -+ -+void -+crmd_notify_resource_op(const char *node, lrmd_event_data_t * op) -+{ -+ int target_rc = 0; -+ -+ if(notify_script == NULL) { -+ return; -+ } -+ -+ target_rc = rsc_op_expected_rc(op); -+ if(op->interval == 0 && target_rc == op->rc && safe_str_eq(op->op_type, RSC_STATUS)) { -+ /* Leave it up to the script if they want to notify for -+ * 'failed' probes, only swallow ones for which the result was -+ * unexpected. -+ * -+ * Even if we find a resource running, it was probably because -+ * someone erased the status section. -+ */ -+ return; -+ } -+ -+ set_notify_key("CRM_notify_node", node, NULL); -+ -+ set_notify_key("CRM_notify_rsc", op->rsc_id, NULL); -+ set_notify_key("CRM_notify_task", op->op_type, NULL); -+ set_notify_key("CRM_notify_interval", NULL, crm_itoa(op->interval)); -+ -+ set_notify_key("CRM_notify_target_rc", NULL, crm_itoa(target_rc)); -+ set_notify_key("CRM_notify_status", NULL, crm_itoa(op->op_status)); -+ set_notify_key("CRM_notify_rc", NULL, crm_itoa(op->rc)); -+ -+ if(op->op_status == PCMK_LRM_OP_DONE) { -+ set_notify_key("CRM_notify_desc", services_ocf_exitcode_str(op->rc), NULL); -+ } else { -+ set_notify_key("CRM_notify_desc", services_lrm_status_str(op->op_status), NULL); -+ } -+ -+ send_notification("resource"); -+} -+ -diff --git a/crmd/notify.h b/crmd/notify.h -new file mode 100644 -index 0000000..4b138ea ---- /dev/null -+++ b/crmd/notify.h -@@ -0,0 +1,30 @@ -+/* -+ * Copyright (C) 2015 Andrew Beekhof -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public -+ * License as published by the Free Software Foundation; either -+ * version 2 of the License, or (at your option) any later version. -+ * -+ * This software is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public -+ * License along with this library; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ */ -+#ifndef CRMD_NOTIFY__H -+# define CRMD_NOTIFY__H -+ -+# include -+# include -+# include -+ -+void crmd_enable_notifications(const char *script, const char *target); -+void crmd_notify_node_event(crm_node_t *node); -+void crmd_notify_fencing_op(stonith_event_t * e); -+void crmd_notify_resource_op(const char *node, lrmd_event_data_t * op); -+ -+#endif -diff --git a/crmd/te_utils.c b/crmd/te_utils.c -index a1d29f6..22551ba 100644 ---- a/crmd/te_utils.c -+++ b/crmd/te_utils.c -@@ -124,6 +124,8 @@ tengine_stonith_notify(stonith_t * st, stonith_event_t * st_event) - return; - } - -+ crmd_notify_fencing_op(st_event); -+ - if (st_event->result == pcmk_ok && safe_str_eq("on", st_event->action)) { - crm_notice("%s was successfully unfenced by %s (at the request of %s)", - st_event->target, st_event->executioner ? st_event->executioner : "", st_event->origin); -diff --git a/cts/CIB.py b/cts/CIB.py -index 8fbba6c..cd3a6a1 100644 ---- a/cts/CIB.py -+++ b/cts/CIB.py -@@ -219,6 +219,8 @@ class CIB11(ConfigBase): - o["dc-deadtime"] = "5s" - o["no-quorum-policy"] = no_quorum - o["expected-quorum-votes"] = self.num_nodes -+ o["notification-script"] = "/var/lib/pacemaker/notify.sh" -+ o["notification-target"] = "/var/lib/pacemaker/notify.log" - - if self.CM.Env["DoBSC"] == 1: - o["ident-string"] = "Linux-HA TEST configuration file - REMOVEME!!" -diff --git a/extra/pcmk_notify_sample.sh b/extra/pcmk_notify_sample.sh -new file mode 100755 -index 0000000..83cf8e9 ---- /dev/null -+++ b/extra/pcmk_notify_sample.sh -@@ -0,0 +1,68 @@ -+#!/bin/bash -+# -+# Copyright (C) 2015 Andrew Beekhof -+# -+# This program is free software; you can redistribute it and/or -+# modify it under the terms of the GNU General Public -+# License as published by the Free Software Foundation; either -+# version 2 of the License, or (at your option) any later version. -+# -+# This software is distributed in the hope that it will be useful, -+# but WITHOUT ANY WARRANTY; without even the implied warranty of -+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+# General Public License for more details. -+# -+# You should have received a copy of the GNU General Public -+# License along with this library; if not, write to the Free Software -+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ -+if [ -z $CRM_notify_version ]; then -+ echo "Pacemaker version 1.1.14 is required" >> ${CRM_notify_recipient} -+ exit 0 -+fi -+ -+case $CRM_notify_kind in -+ node) -+ echo "Node '${CRM_notify_node}' is now '${CRM_notify_desc}'" >> ${CRM_notify_recipient} -+ ;; -+ fencing) -+ # Other keys: -+ # -+ # CRM_notify_node -+ # CRM_notify_task -+ # CRM_notify_rc -+ # -+ echo "Fencing ${CRM_notify_desc}" >> ${CRM_notify_recipient} -+ ;; -+ resource) -+ # Other keys: -+ # -+ # CRM_notify_target_rc -+ # CRM_notify_status -+ # CRM_notify_rc -+ # -+ if [ ${CRM_notify_interval} = "0" ]; then -+ CRM_notify_interval="" -+ else -+ CRM_notify_interval=" (${CRM_notify_interval})" -+ fi -+ -+ if [ ${CRM_notify_target_rc} = "0" ]; then -+ CRM_notify_target_rc="" -+ else -+ CRM_notify_target_rc=" (target: ${CRM_notify_target_rc})" -+ fi -+ -+ case ${CRM_notify_desc} in -+ Cancelled) ;; -+ *) -+ echo "Resource operation '${CRM_notify_task}${CRM_notify_interval}' for '${CRM_notify_rsc}' on '${CRM_notify_node}': ${CRM_notify_desc}${CRM_notify_target_rc}" >> ${CRM_notify_recipient} -+ ;; -+ esac -+ ;; -+ *) -+ echo "Unhandled $CRM_notify_kind notification" >> ${CRM_notify_recipient} -+ env | grep CRM_notify >> ${CRM_notify_recipient} -+ ;; -+ -+esac -diff --git a/include/crm_internal.h b/include/crm_internal.h -index c13bc7b..fb03537 100644 ---- a/include/crm_internal.h -+++ b/include/crm_internal.h -@@ -127,6 +127,7 @@ gboolean check_timer(const char *value); - gboolean check_boolean(const char *value); - gboolean check_number(const char *value); - gboolean check_quorum(const char *value); -+gboolean check_script(const char *value); - gboolean check_utilization(const char *value); - - /* Shared PE/crmd functionality */ -diff --git a/lib/common/utils.c b/lib/common/utils.c -index 6a234dc..628cf2f 100644 ---- a/lib/common/utils.c -+++ b/lib/common/utils.c -@@ -180,6 +180,33 @@ check_quorum(const char *value) - } - - gboolean -+check_script(const char *value) -+{ -+ struct stat st; -+ -+ if(safe_str_eq(value, "/dev/null")) { -+ return TRUE; -+ } -+ -+ if(stat(value, &st) != 0) { -+ crm_err("Script %s does not exist", value); -+ return FALSE; -+ } -+ -+ if(S_ISREG(st.st_mode) == 0) { -+ crm_err("Script %s is not a regular file", value); -+ return FALSE; -+ } -+ -+ if( (st.st_mode & (S_IXUSR | S_IXGRP )) == 0) { -+ crm_err("Script %s is not executable", value); -+ return FALSE; -+ } -+ -+ return TRUE; -+} -+ -+gboolean - check_utilization(const char *value) - { - char *end = NULL; diff --git a/SOURCES/0013-Fix-cman-Suppress-implied-node-names.patch b/SOURCES/0013-Fix-cman-Suppress-implied-node-names.patch deleted file mode 100644 index eb14b0d..0000000 --- a/SOURCES/0013-Fix-cman-Suppress-implied-node-names.patch +++ /dev/null @@ -1,47 +0,0 @@ -From: Andrew Beekhof -Date: Wed, 2 Sep 2015 12:08:52 +1000 -Subject: [PATCH] Fix: cman: Suppress implied node names - -(cherry picked from commit e94fbcd0c49db9d3c69b7c0e478ba89a4d360dde) ---- - tools/crm_node.c | 20 +++++++++++++++++++- - 1 file changed, 19 insertions(+), 1 deletion(-) - -diff --git a/tools/crm_node.c b/tools/crm_node.c -index d0195e3..24cc4d7 100644 ---- a/tools/crm_node.c -+++ b/tools/crm_node.c -@@ -434,6 +434,21 @@ try_heartbeat(int command, enum cluster_type_e stack) - #if SUPPORT_CMAN - # include - # define MAX_NODES 256 -+static bool valid_cman_name(const char *name, uint32_t nodeid) -+{ -+ bool rc = TRUE; -+ -+ /* Yes, %d, because that's what CMAN does */ -+ char *fakename = crm_strdup_printf("Node%d", nodeid); -+ -+ if(crm_str_eq(fakename, name, TRUE)) { -+ rc = FALSE; -+ crm_notice("Ignoring inferred name from cman: %s", fakename); -+ } -+ free(fakename); -+ return rc; -+} -+ - static gboolean - try_cman(int command, enum cluster_type_e stack) - { -@@ -478,7 +493,10 @@ try_cman(int command, enum cluster_type_e stack) - } - - for (lpc = 0; lpc < node_count; lpc++) { -- if (command == 'l') { -+ if(valid_cman_name(cman_nodes[lpc].cn_name, cman_nodes[lpc].cn_nodeid) == FALSE) { -+ /* Do not print */ -+ -+ } if (command == 'l') { - printf("%s ", cman_nodes[lpc].cn_name); - - } else if (cman_nodes[lpc].cn_nodeid != 0 && cman_nodes[lpc].cn_member) { diff --git a/SOURCES/0014-Fix-crmd-Choose-more-appropriate-names-for-notificat.patch b/SOURCES/0014-Fix-crmd-Choose-more-appropriate-names-for-notificat.patch deleted file mode 100644 index 2a12849..0000000 --- a/SOURCES/0014-Fix-crmd-Choose-more-appropriate-names-for-notificat.patch +++ /dev/null @@ -1,58 +0,0 @@ -From: Andrew Beekhof -Date: Wed, 2 Sep 2015 14:32:40 +1000 -Subject: [PATCH] Fix: crmd: Choose more appropriate names for notification - options - -(cherry picked from commit 8971ef024ffebf3d0240b30e620697a7b58232c4) ---- - crmd/control.c | 12 ++++++------ - cts/CIB.py | 4 ++-- - 2 files changed, 8 insertions(+), 8 deletions(-) - -diff --git a/crmd/control.c b/crmd/control.c -index d92f46b..d1f9acd 100644 ---- a/crmd/control.c -+++ b/crmd/control.c -@@ -893,12 +893,12 @@ pe_cluster_option crmd_opts[] = { - " To ensure these changes take effect, we can optionally poll the cluster's status for changes." - }, - -- { "notification-script", NULL, "string", NULL, "/dev/null", &check_script, -- "Notification script to be called after significant cluster events", -- "Full path to a script that will be invoked when resources start/stop/fail, fencing occurs or nodes join/leave the cluster.\n" -+ { "notification-agent", NULL, "string", NULL, "/dev/null", &check_script, -+ "Notification script or tool to be called after significant cluster events", -+ "Full path to a script or binary that will be invoked when resources start/stop/fail, fencing occurs or nodes join/leave the cluster.\n" - "Must exist on all nodes in the cluster." - }, -- { "notification-target", NULL, "string", NULL, "", NULL, -+ { "notification-recipient", NULL, "string", NULL, "", NULL, - "Destination for notifications (Optional)", - "Where should the supplied script send notifications to. Useful to avoid hard-coding this in the script." - }, -@@ -992,8 +992,8 @@ config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void - - verify_crmd_options(config_hash); - -- script = crmd_pref(config_hash, "notification-script"); -- value = crmd_pref(config_hash, "notification-target"); -+ script = crmd_pref(config_hash, "notification-agent"); -+ value = crmd_pref(config_hash, "notification-recipient"); - crmd_enable_notifications(script, value); - - value = crmd_pref(config_hash, XML_CONFIG_ATTR_DC_DEADTIME); -diff --git a/cts/CIB.py b/cts/CIB.py -index cd3a6a1..0933ccd 100644 ---- a/cts/CIB.py -+++ b/cts/CIB.py -@@ -219,8 +219,8 @@ class CIB11(ConfigBase): - o["dc-deadtime"] = "5s" - o["no-quorum-policy"] = no_quorum - o["expected-quorum-votes"] = self.num_nodes -- o["notification-script"] = "/var/lib/pacemaker/notify.sh" -- o["notification-target"] = "/var/lib/pacemaker/notify.log" -+ o["notification-agent"] = "/var/lib/pacemaker/notify.sh" -+ o["notification-recipient"] = "/var/lib/pacemaker/notify.log" - - if self.CM.Env["DoBSC"] == 1: - o["ident-string"] = "Linux-HA TEST configuration file - REMOVEME!!" diff --git a/SOURCES/0015-Fix-crmd-Correctly-enable-disable-notifications.patch b/SOURCES/0015-Fix-crmd-Correctly-enable-disable-notifications.patch deleted file mode 100644 index 575f6ea..0000000 --- a/SOURCES/0015-Fix-crmd-Correctly-enable-disable-notifications.patch +++ /dev/null @@ -1,22 +0,0 @@ -From: Andrew Beekhof -Date: Wed, 2 Sep 2015 14:48:17 +1000 -Subject: [PATCH] Fix: crmd: Correctly enable/disable notifications - -(cherry picked from commit 7368cf120cd5ee848d2bdcd788497a3b89616b05) ---- - crmd/notify.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/crmd/notify.c b/crmd/notify.c -index 980bfa6..ccf5ea8 100644 ---- a/crmd/notify.c -+++ b/crmd/notify.c -@@ -50,7 +50,7 @@ crmd_enable_notifications(const char *script, const char *target) - free(notify_target); - notify_target = NULL; - -- if(safe_str_eq(script, "/dev/null")) { -+ if(script == NULL || safe_str_eq(script, "/dev/null")) { - crm_notice("Notifications disabled"); - return; - } diff --git a/SOURCES/0016-Fix-crmd-Report-the-completion-status-and-output-of-.patch b/SOURCES/0016-Fix-crmd-Report-the-completion-status-and-output-of-.patch deleted file mode 100644 index e7bc0e3..0000000 --- a/SOURCES/0016-Fix-crmd-Report-the-completion-status-and-output-of-.patch +++ /dev/null @@ -1,109 +0,0 @@ -From: Andrew Beekhof -Date: Wed, 2 Sep 2015 14:34:04 +1000 -Subject: [PATCH] Fix: crmd: Report the completion status and output of - notifications - -(cherry picked from commit 0c303d8a6f9f9a9dbec9f6d2e9e799fe335f8eaa) ---- - crmd/notify.c | 37 ++++++++++++++++++++++++------------- - lib/services/services.c | 4 ++-- - 2 files changed, 26 insertions(+), 15 deletions(-) - -diff --git a/crmd/notify.c b/crmd/notify.c -index ccf5ea8..ca2be0f 100644 ---- a/crmd/notify.c -+++ b/crmd/notify.c -@@ -29,6 +29,7 @@ static const char *notify_keys[] = - { - "CRM_notify_recipient", - "CRM_notify_node", -+ "CRM_notify_nodeid", - "CRM_notify_rsc", - "CRM_notify_task", - "CRM_notify_interval", -@@ -83,12 +84,21 @@ set_notify_key(const char *name, const char *cvalue, char *value) - free(value); - } - -+static void crmd_notify_complete(svc_action_t *op) -+{ -+ if(op->rc == 0) { -+ crm_info("Notification %d (%s) complete", op->sequence, op->agent); -+ } else { -+ crm_warn("Notification %d (%s) failed: %d", op->sequence, op->agent, op->rc); -+ } -+} - - static void - send_notification(const char *kind) - { - int lpc; -- pid_t pid; -+ svc_action_t *notify = NULL; -+ static int operations = 0; - - crm_debug("Sending '%s' notification to '%s' via '%s'", kind, notify_target, notify_script); - -@@ -96,20 +106,20 @@ send_notification(const char *kind) - set_notify_key("CRM_notify_kind", kind, NULL); - set_notify_key("CRM_notify_version", VERSION, NULL); - -- pid = fork(); -- if (pid == -1) { -- crm_perror(LOG_ERR, "notification failed"); -- } -+ notify = services_action_create_generic(notify_script, NULL); - -- if (pid == 0) { -- /* crm_debug("notification: I am the child. Executing the nofitication program."); */ -- execl(notify_script, notify_script, NULL); -- exit(EXIT_FAILURE); -+ notify->timeout = 300; -+ notify->standard = strdup("event"); -+ notify->id = strdup(notify_script); -+ notify->agent = strdup(notify_script); -+ notify->sequence = ++operations; - -- } else { -- for(lpc = 0; lpc < DIMOF(notify_keys); lpc++) { -- unsetenv(notify_keys[lpc]); -- } -+ if(services_action_async(notify, &crmd_notify_complete) == FALSE) { -+ services_action_free(notify); -+ } -+ -+ for(lpc = 0; lpc < DIMOF(notify_keys); lpc++) { -+ unsetenv(notify_keys[lpc]); - } - } - -@@ -120,6 +130,7 @@ void crmd_notify_node_event(crm_node_t *node) - } - - set_notify_key("CRM_notify_node", node->uname, NULL); -+ set_notify_key("CRM_notify_nodeid", NULL, crm_itoa(node->id)); - set_notify_key("CRM_notify_desc", node->state, NULL); - - send_notification("node"); -diff --git a/lib/services/services.c b/lib/services/services.c -index abf1458..4609a7d 100644 ---- a/lib/services/services.c -+++ b/lib/services/services.c -@@ -598,7 +598,7 @@ action_async_helper(svc_action_t * op) { - } - - /* keep track of ops that are in-flight to avoid collisions in the same namespace */ -- if (res) { -+ if (op->rsc && res) { - inflight_ops = g_list_append(inflight_ops, op); - } - -@@ -622,7 +622,7 @@ services_action_async(svc_action_t * op, void (*action_callback) (svc_action_t * - g_hash_table_replace(recurring_actions, op->id, op); - } - -- if (is_op_blocked(op->rsc)) { -+ if (op->rsc && is_op_blocked(op->rsc)) { - blocked_ops = g_list_append(blocked_ops, op); - return TRUE; - } diff --git a/SOURCES/0017-Fix-cman-Print-the-nodeid-of-nodes-with-fake-names.patch b/SOURCES/0017-Fix-cman-Print-the-nodeid-of-nodes-with-fake-names.patch deleted file mode 100644 index b627349..0000000 --- a/SOURCES/0017-Fix-cman-Print-the-nodeid-of-nodes-with-fake-names.patch +++ /dev/null @@ -1,23 +0,0 @@ -From: Andrew Beekhof -Date: Thu, 3 Sep 2015 10:58:59 +1000 -Subject: [PATCH] Fix: cman: Print the nodeid of nodes with fake names - -(cherry picked from commit dd9a379408aa43b89c81d31ce7efa60b2e77f593) ---- - tools/crm_node.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/tools/crm_node.c b/tools/crm_node.c -index 24cc4d7..ed02ee7 100644 ---- a/tools/crm_node.c -+++ b/tools/crm_node.c -@@ -494,7 +494,8 @@ try_cman(int command, enum cluster_type_e stack) - - for (lpc = 0; lpc < node_count; lpc++) { - if(valid_cman_name(cman_nodes[lpc].cn_name, cman_nodes[lpc].cn_nodeid) == FALSE) { -- /* Do not print */ -+ /* The name was invented, but we need to print something, make it the id instead */ -+ printf("%u ", cman_nodes[lpc].cn_nodeid); - - } if (command == 'l') { - printf("%s ", cman_nodes[lpc].cn_name); diff --git a/SOURCES/0018-Refactor-Tools-Isolate-the-paths-which-truely-requir.patch b/SOURCES/0018-Refactor-Tools-Isolate-the-paths-which-truely-requir.patch deleted file mode 100644 index 2fbd35e..0000000 --- a/SOURCES/0018-Refactor-Tools-Isolate-the-paths-which-truely-requir.patch +++ /dev/null @@ -1,299 +0,0 @@ -From: Andrew Beekhof -Date: Thu, 3 Sep 2015 11:36:21 +1000 -Subject: [PATCH] Refactor: Tools: Isolate the paths which truely require - corosync-2.x - -(cherry picked from commit 32c05b99f6a3e953668dcda71ce24e03927d83cb) ---- - tools/crm_node.c | 243 +++++++++++++++++++++++++++++++------------------------ - 1 file changed, 139 insertions(+), 104 deletions(-) - -diff --git a/tools/crm_node.c b/tools/crm_node.c -index ed02ee7..308d4f9 100644 ---- a/tools/crm_node.c -+++ b/tools/crm_node.c -@@ -60,6 +60,9 @@ static struct crm_option long_options[] = { - #if SUPPORT_COROSYNC - {"openais", 0, 0, 'A', "\tOnly try connecting to an OpenAIS-based cluster"}, - #endif -+#ifdef SUPPORT_CS_QUORUM -+ {"corosync", 0, 0, 'C', "\tOnly try connecting to an Corosync-based cluster"}, -+#endif - #ifdef SUPPORT_HEARTBEAT - {"heartbeat", 0, 0, 'H', "Only try connecting to a Heartbeat-based cluster"}, - #endif -@@ -223,6 +226,138 @@ int tools_remove_node_cache(const char *node, const char *target) - return rc > 0 ? 0 : rc; - } - -+static gint -+compare_node_uname(gconstpointer a, gconstpointer b) -+{ -+ const crm_node_t *a_node = a; -+ const crm_node_t *b_node = b; -+ return strcmp(a_node->uname?a_node->uname:"", b_node->uname?b_node->uname:""); -+} -+ -+static int -+node_mcp_dispatch(const char *buffer, ssize_t length, gpointer userdata) -+{ -+ xmlNode *msg = string2xml(buffer); -+ -+ if (msg) { -+ xmlNode *node = NULL; -+ GListPtr nodes = NULL; -+ GListPtr iter = NULL; -+ -+ crm_log_xml_trace(msg, "message"); -+ -+ for (node = __xml_first_child(msg); node != NULL; node = __xml_next(node)) { -+ crm_node_t *peer = calloc(1, sizeof(crm_node_t)); -+ -+ nodes = g_list_insert_sorted(nodes, peer, compare_node_uname); -+ peer->uname = (char*)crm_element_value_copy(node, "uname"); -+ peer->state = (char*)crm_element_value_copy(node, "state"); -+ crm_element_value_int(node, "id", (int*)&peer->id); -+ } -+ -+ for(iter = nodes; iter; iter = iter->next) { -+ crm_node_t *peer = iter->data; -+ if (command == 'l') { -+ fprintf(stdout, "%u %s %s\n", peer->id, peer->uname, peer->state); -+ -+ } else if (command == 'p') { -+ if(safe_str_eq(peer->state, CRM_NODE_MEMBER)) { -+ fprintf(stdout, "%s ", peer->uname); -+ } -+ -+ } else if (command == 'i') { -+ if(safe_str_eq(peer->state, CRM_NODE_MEMBER)) { -+ fprintf(stdout, "%u ", peer->id); -+ } -+ } -+ } -+ -+ g_list_free_full(nodes, free); -+ free_xml(msg); -+ -+ if (command == 'p') { -+ fprintf(stdout, "\n"); -+ } -+ -+ crm_exit(pcmk_ok); -+ } -+ -+ return 0; -+} -+ -+static void -+node_mcp_destroy(gpointer user_data) -+{ -+ crm_exit(ENOTCONN); -+} -+ -+static gboolean -+try_pacemaker(int command, enum cluster_type_e stack) -+{ -+ struct ipc_client_callbacks node_callbacks = { -+ .dispatch = node_mcp_dispatch, -+ .destroy = node_mcp_destroy -+ }; -+ -+ if (stack == pcmk_cluster_heartbeat) { -+ /* Nothing to do for them */ -+ return FALSE; -+ } -+ -+ switch (command) { -+ case 'e': -+ /* Age only applies to heartbeat clusters */ -+ fprintf(stdout, "1\n"); -+ crm_exit(pcmk_ok); -+ -+ case 'q': -+ /* Implement one day? -+ * Wouldn't be much for pacemakerd to track it and include in the poke reply -+ */ -+ return FALSE; -+ -+ case 'R': -+ { -+ int lpc = 0; -+ const char *daemons[] = { -+ CRM_SYSTEM_CRMD, -+ "stonith-ng", -+ T_ATTRD, -+ CRM_SYSTEM_MCP, -+ }; -+ -+ for(lpc = 0; lpc < DIMOF(daemons); lpc++) { -+ if (tools_remove_node_cache(target_uname, daemons[lpc])) { -+ crm_err("Failed to connect to %s to remove node '%s'", daemons[lpc], target_uname); -+ crm_exit(pcmk_err_generic); -+ } -+ } -+ crm_exit(pcmk_ok); -+ } -+ break; -+ -+ case 'i': -+ case 'l': -+ case 'p': -+ /* Go to pacemakerd */ -+ { -+ GMainLoop *amainloop = g_main_new(FALSE); -+ mainloop_io_t *ipc = -+ mainloop_add_ipc_client(CRM_SYSTEM_MCP, G_PRIORITY_DEFAULT, 0, NULL, &node_callbacks); -+ if (ipc != NULL) { -+ /* Sending anything will get us a list of nodes */ -+ xmlNode *poke = create_xml_node(NULL, "poke"); -+ -+ crm_ipc_send(mainloop_get_ipc_client(ipc), poke, 0, 0, NULL); -+ free_xml(poke); -+ g_main_run(amainloop); -+ } -+ } -+ break; -+ } -+ return FALSE; -+} -+ - #if SUPPORT_HEARTBEAT - # include - # include -@@ -626,66 +761,6 @@ ais_membership_dispatch(cpg_handle_t handle, - # include - # include - --static gint --compare_node_uname(gconstpointer a, gconstpointer b) --{ -- const crm_node_t *a_node = a; -- const crm_node_t *b_node = b; -- return strcmp(a_node->uname?a_node->uname:"", b_node->uname?b_node->uname:""); --} -- --static int --node_mcp_dispatch(const char *buffer, ssize_t length, gpointer userdata) --{ -- xmlNode *msg = string2xml(buffer); -- -- if (msg) { -- xmlNode *node = NULL; -- GListPtr nodes = NULL; -- GListPtr iter = NULL; -- -- crm_log_xml_trace(msg, "message"); -- -- for (node = __xml_first_child(msg); node != NULL; node = __xml_next(node)) { -- crm_node_t *peer = calloc(1, sizeof(crm_node_t)); -- -- nodes = g_list_insert_sorted(nodes, peer, compare_node_uname); -- peer->uname = (char*)crm_element_value_copy(node, "uname"); -- peer->state = (char*)crm_element_value_copy(node, "state"); -- crm_element_value_int(node, "id", (int*)&peer->id); -- } -- -- for(iter = nodes; iter; iter = iter->next) { -- crm_node_t *peer = iter->data; -- if (command == 'l') { -- fprintf(stdout, "%u %s\n", peer->id, peer->uname); -- -- } else if (command == 'p') { -- if(safe_str_eq(peer->state, CRM_NODE_MEMBER)) { -- fprintf(stdout, "%s ", peer->uname); -- } -- } -- } -- -- g_list_free_full(nodes, free); -- free_xml(msg); -- -- if (command == 'p') { -- fprintf(stdout, "\n"); -- } -- -- crm_exit(pcmk_ok); -- } -- -- return 0; --} -- --static void --node_mcp_destroy(gpointer user_data) --{ -- crm_exit(ENOTCONN); --} -- - static gboolean - try_corosync(int command, enum cluster_type_e stack) - { -@@ -696,36 +771,7 @@ try_corosync(int command, enum cluster_type_e stack) - cpg_handle_t c_handle = 0; - quorum_handle_t q_handle = 0; - -- mainloop_io_t *ipc = NULL; -- GMainLoop *amainloop = NULL; -- const char *daemons[] = { -- CRM_SYSTEM_CRMD, -- "stonith-ng", -- T_ATTRD, -- CRM_SYSTEM_MCP, -- }; -- -- struct ipc_client_callbacks node_callbacks = { -- .dispatch = node_mcp_dispatch, -- .destroy = node_mcp_destroy -- }; -- - switch (command) { -- case 'R': -- for(rc = 0; rc < DIMOF(daemons); rc++) { -- if (tools_remove_node_cache(target_uname, daemons[rc])) { -- crm_err("Failed to connect to %s to remove node '%s'", daemons[rc], target_uname); -- crm_exit(pcmk_err_generic); -- } -- } -- crm_exit(pcmk_ok); -- break; -- -- case 'e': -- /* Age makes no sense (yet) in an AIS cluster */ -- fprintf(stdout, "1\n"); -- crm_exit(pcmk_ok); -- - case 'q': - /* Go direct to the Quorum API */ - rc = quorum_initialize(&q_handle, NULL, &quorum_type); -@@ -766,21 +812,8 @@ try_corosync(int command, enum cluster_type_e stack) - cpg_finalize(c_handle); - crm_exit(pcmk_ok); - -- case 'l': -- case 'p': -- /* Go to pacemakerd */ -- amainloop = g_main_new(FALSE); -- ipc = -- mainloop_add_ipc_client(CRM_SYSTEM_MCP, G_PRIORITY_DEFAULT, 0, NULL, -- &node_callbacks); -- if (ipc != NULL) { -- /* Sending anything will get us a list of nodes */ -- xmlNode *poke = create_xml_node(NULL, "poke"); -- -- crm_ipc_send(mainloop_get_ipc_client(ipc), poke, 0, 0, NULL); -- free_xml(poke); -- g_main_run(amainloop); -- } -+ default: -+ try_pacemaker(command, stack); - break; - } - return FALSE; -@@ -963,5 +996,7 @@ main(int argc, char **argv) - } - #endif - -+ try_pacemaker(command, try_stack); -+ - return (1); - } diff --git a/SOURCES/0019-Fix-corosync-Display-node-state-and-quorum-data-if-a.patch b/SOURCES/0019-Fix-corosync-Display-node-state-and-quorum-data-if-a.patch deleted file mode 100644 index b7822e3..0000000 --- a/SOURCES/0019-Fix-corosync-Display-node-state-and-quorum-data-if-a.patch +++ /dev/null @@ -1,94 +0,0 @@ -From: Andrew Beekhof -Date: Thu, 3 Sep 2015 12:27:59 +1000 -Subject: [PATCH] Fix: corosync: Display node state and quorum data if - available - -(cherry picked from commit 4d4c92e515bbaf74917a311e19d5995b30c29430) ---- - mcp/pacemaker.c | 7 +++++++ - tools/crm_node.c | 17 ++++++++++------- - 2 files changed, 17 insertions(+), 7 deletions(-) - -diff --git a/mcp/pacemaker.c b/mcp/pacemaker.c -index f9fc015..9c3195e 100644 ---- a/mcp/pacemaker.c -+++ b/mcp/pacemaker.c -@@ -35,6 +35,8 @@ - - #include - #include -+ -+gboolean pcmk_quorate = FALSE; - gboolean fatal_error = FALSE; - GMainLoop *mainloop = NULL; - -@@ -560,6 +562,10 @@ update_process_clients(crm_client_t *client) - crm_node_t *node = NULL; - xmlNode *update = create_xml_node(NULL, "nodes"); - -+ if (is_corosync_cluster()) { -+ crm_xml_add_int(update, "quorate", pcmk_quorate); -+ } -+ - g_hash_table_iter_init(&iter, crm_peer_cache); - while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { - xmlNode *xml = create_xml_node(update, "node"); -@@ -896,6 +902,7 @@ static gboolean - mcp_quorum_callback(unsigned long long seq, gboolean quorate) - { - /* Nothing to do */ -+ pcmk_quorate = quorate; - return TRUE; - } - -diff --git a/tools/crm_node.c b/tools/crm_node.c -index 308d4f9..9626120 100644 ---- a/tools/crm_node.c -+++ b/tools/crm_node.c -@@ -243,8 +243,16 @@ node_mcp_dispatch(const char *buffer, ssize_t length, gpointer userdata) - xmlNode *node = NULL; - GListPtr nodes = NULL; - GListPtr iter = NULL; -+ const char *quorate = crm_element_value(msg, "quorate"); - - crm_log_xml_trace(msg, "message"); -+ if (command == 'q' && quorate != NULL) { -+ fprintf(stdout, "%s\n", quorate); -+ crm_exit(pcmk_ok); -+ -+ } else if(command == 'q') { -+ crm_exit(1); -+ } - - for (node = __xml_first_child(msg); node != NULL; node = __xml_next(node)) { - crm_node_t *peer = calloc(1, sizeof(crm_node_t)); -@@ -258,7 +266,7 @@ node_mcp_dispatch(const char *buffer, ssize_t length, gpointer userdata) - for(iter = nodes; iter; iter = iter->next) { - crm_node_t *peer = iter->data; - if (command == 'l') { -- fprintf(stdout, "%u %s %s\n", peer->id, peer->uname, peer->state); -+ fprintf(stdout, "%u %s %s\n", peer->id, peer->uname, peer->state?peer->state:""); - - } else if (command == 'p') { - if(safe_str_eq(peer->state, CRM_NODE_MEMBER)) { -@@ -310,12 +318,6 @@ try_pacemaker(int command, enum cluster_type_e stack) - fprintf(stdout, "1\n"); - crm_exit(pcmk_ok); - -- case 'q': -- /* Implement one day? -- * Wouldn't be much for pacemakerd to track it and include in the poke reply -- */ -- return FALSE; -- - case 'R': - { - int lpc = 0; -@@ -338,6 +340,7 @@ try_pacemaker(int command, enum cluster_type_e stack) - - case 'i': - case 'l': -+ case 'q': - case 'p': - /* Go to pacemakerd */ - { diff --git a/SOURCES/002-build-cleanup.patch b/SOURCES/002-build-cleanup.patch new file mode 100644 index 0000000..6fe5701 --- /dev/null +++ b/SOURCES/002-build-cleanup.patch @@ -0,0 +1,678 @@ +From f4305372f80faafb5a1856f68b06c4dd87f8f521 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= +Date: Fri, 17 Jun 2016 21:47:34 +0200 +Subject: [PATCH 1/3] Build: drop acinclude.m4 never ever used by pacemaker + proper + +It was rather used by the management tool being part of heartbeat +project -- pacemaker's precursor -- hence the shared codebase that +just wasn't split carefully. + +[note that hearbeat also mistakenly carries that file along while + the reason disappeared as of: + http://hg.linux-ha.org/heartbeat-STABLE_3_0/rev/cf17a3d4167b#l1.39] +--- + acinclude.m4 | 39 --------------------------------------- + 1 file changed, 39 deletions(-) + delete mode 100644 acinclude.m4 + +diff --git a/acinclude.m4 b/acinclude.m4 +deleted file mode 100644 +index fa8fef2..0000000 +--- a/acinclude.m4 ++++ /dev/null +@@ -1,39 +0,0 @@ +-dnl +-dnl local autoconf/automake macros needed for heartbeat +-dnl Started by David Lee February 2006 +-dnl +-dnl License: GNU General Public License (GPL) +- +- +-dnl AM_CHECK_PYTHON_HEADERS: Find location of python include files. +-dnl Taken from: +-dnl http://source.macgimp.org/ +-dnl which is GPL and is attributed to James Henstridge. +-dnl +-dnl AM_CHECK_PYTHON_HEADERS([ACTION-IF-POSSIBLE], [ACTION-IF-NOT-POSSIBLE]) +-dnl Imports: +-dnl $PYTHON +-dnl Exports: +-dnl PYTHON_INCLUDES +- +-AC_DEFUN([AM_CHECK_PYTHON_HEADERS], +-[AC_REQUIRE([AM_PATH_PYTHON]) +-AC_MSG_CHECKING(for headers required to compile python extensions) +-dnl deduce PYTHON_INCLUDES +-py_prefix=`$PYTHON -c "import sys; print sys.prefix"` +-py_exec_prefix=`$PYTHON -c "import sys; print sys.exec_prefix"` +-PYTHON_INCLUDES="-I${py_prefix}/include/python${PYTHON_VERSION}" +-if test "$py_prefix" != "$py_exec_prefix"; then +- PYTHON_INCLUDES="$PYTHON_INCLUDES -I${py_exec_prefix}/include/python${PYTHON_VERSION}" +-fi +-AC_SUBST(PYTHON_INCLUDES) +-dnl check if the headers exist: +-save_CPPFLAGS="$CPPFLAGS" +-CPPFLAGS="$CPPFLAGS $PYTHON_INCLUDES" +-AC_TRY_CPP([#include ],dnl +-[AC_MSG_RESULT(found) +-$1],dnl +-[AC_MSG_RESULT(not found) +-$2]) +-CPPFLAGS="$save_CPPFLAGS" +-]) +-- +1.8.3.1 + + +From 4e0d5c81dae95be2bd3598d2f0dd639b64e5486a Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= +Date: Fri, 17 Jun 2016 22:59:42 +0200 +Subject: [PATCH 2/3] Build: configure.ac: drop unused CC_ERRORS, move + CC_EXTRAS + +... so as not to delimit the comment and respective code with unrelated +stuff. +--- + configure.ac | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +diff --git a/configure.ac b/configure.ac +index 1a393fc..c5b30dc 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -1703,10 +1703,6 @@ dnl not have CFLAGS in their environment then this should have + dnl no effect. However if CFLAGS was exported from the user's + dnl environment, then the new CFLAGS will also be exported + dnl to sub processes. +- +-CC_ERRORS="" +-CC_EXTRAS="" +- + if export | fgrep " CFLAGS=" > /dev/null; then + SAVED_CFLAGS="$CFLAGS" + unset CFLAGS +@@ -1714,6 +1710,8 @@ if export | fgrep " CFLAGS=" > /dev/null; then + unset SAVED_CFLAGS + fi + ++CC_EXTRAS="" ++ + if test "$GCC" != yes; then + CFLAGS="$CFLAGS -g" + enable_fatal_warnings=no +-- +1.8.3.1 + + +From 9632cd73b758dd4d41c2dbf2b9f10679cc3ee1a2 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= +Date: Tue, 21 Jun 2016 19:05:52 +0200 +Subject: [PATCH 3/3] Refactor: sanitizing touch on makefiles about to receive + hardening + +* whitespace cleanup +* internally provided -D... and -I... switches belong to CPPFLAGS rather + than CFLAGS +* use a following order of the per-target flags: + - CPPFLAGS + - YFLAGS + - CFLAGS + - LDFLAGS + - LDADD/LIBADD + - SOURCES + +This is in part to reflect common conditional inclusion of additional +sources (which should preferably immediately follow the main SOURCES +definition) sometimes connected with extending other flags as well. +--- + attrd/Makefile.am | 12 ++++++------ + cib/Makefile.am | 18 +++++++++--------- + crmd/Makefile.am | 31 +++++++++++++++---------------- + fencing/Makefile.am | 30 ++++++++++++++++-------------- + lib/cib/Makefile.am | 9 +++++---- + lib/cluster/Makefile.am | 3 +-- + lib/common/Makefile.am | 8 ++++---- + lib/fencing/Makefile.am | 8 ++++---- + lib/lrmd/Makefile.am | 10 +++++----- + lib/pengine/Makefile.am | 12 ++++++------ + lib/services/Makefile.am | 24 ++++++++++++------------ + lib/transition/Makefile.am | 9 +++++---- + lrmd/Makefile.am | 46 ++++++++++++++++++++++------------------------ + mcp/Makefile.am | 4 ++-- + pengine/Makefile.am | 16 +++++++--------- + 15 files changed, 119 insertions(+), 121 deletions(-) + +diff --git a/attrd/Makefile.am b/attrd/Makefile.am +index 9a841e5..a116e0e 100644 +--- a/attrd/Makefile.am ++++ b/attrd/Makefile.am +@@ -17,16 +17,16 @@ + # + include $(top_srcdir)/Makefile.common + +-halibdir = $(CRM_DAEMON_DIR) +-halib_PROGRAMS = attrd ++halibdir = $(CRM_DAEMON_DIR) ++halib_PROGRAMS = attrd + ## SOURCES + +-attrd_SOURCES = +-attrd_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la \ +- $(top_builddir)/lib/common/libcrmcommon.la \ +- $(top_builddir)/lib/cib/libcib.la \ ++attrd_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la \ ++ $(top_builddir)/lib/common/libcrmcommon.la \ ++ $(top_builddir)/lib/cib/libcib.la \ + $(CLUSTERLIBS) + ++attrd_SOURCES = + if BUILD_ATOMIC_ATTRD + attrd_SOURCES += main.c commands.c + else +diff --git a/cib/Makefile.am b/cib/Makefile.am +index 8508223..fcb8ce9 100644 +--- a/cib/Makefile.am ++++ b/cib/Makefile.am +@@ -23,23 +23,23 @@ hadir = $(sysconfdir)/ha.d + halibdir = $(CRM_DAEMON_DIR) + commmoddir = $(halibdir)/modules/comm + +-COMMONLIBS = $(top_builddir)/lib/common/libcrmcommon.la \ +- $(top_builddir)/lib/cib/libcib.la ++COMMONLIBS = $(top_builddir)/lib/common/libcrmcommon.la \ ++ $(top_builddir)/lib/cib/libcib.la + + ## binary progs + halib_PROGRAMS = cib cibmon + + ## SOURCES +-noinst_HEADERS = callbacks.h cibio.h cibmessages.h common.h notify.h ++noinst_HEADERS = callbacks.h cibio.h cibmessages.h common.h notify.h + +-cib_SOURCES = io.c messages.c notify.c \ +- callbacks.c main.c remote.c common.c ++cib_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la \ ++ $(COMMONLIBS) $(CRYPTOLIB) $(CLUSTERLIBS) + +-cib_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la \ +- $(COMMONLIBS) $(CRYPTOLIB) $(CLUSTERLIBS) ++cib_SOURCES = io.c messages.c notify.c \ ++ callbacks.c main.c remote.c common.c + +-cibmon_SOURCES = cibmon.c +-cibmon_LDADD = $(COMMONLIBS) ++cibmon_LDADD = $(COMMONLIBS) ++cibmon_SOURCES = cibmon.c + + clean-generic: + rm -f *.log *.debug *.xml *~ +diff --git a/crmd/Makefile.am b/crmd/Makefile.am +index c28da0b..979e266 100644 +--- a/crmd/Makefile.am ++++ b/crmd/Makefile.am +@@ -24,20 +24,10 @@ halib_PROGRAMS = crmd + + ## SOURCES + +-noinst_HEADERS = crmd.h crmd_fsa.h crmd_messages.h fsa_defines.h \ +- fsa_matrix.h fsa_proto.h crmd_utils.h crmd_callbacks.h \ ++noinst_HEADERS = crmd.h crmd_fsa.h crmd_messages.h fsa_defines.h \ ++ fsa_matrix.h fsa_proto.h crmd_utils.h crmd_callbacks.h \ + crmd_lrm.h te_callbacks.h tengine.h + +-crmd_SOURCES = main.c crmd.c corosync.c notify.c \ +- fsa.c control.c messages.c membership.c callbacks.c \ +- election.c join_client.c join_dc.c subsystems.c throttle.c \ +- cib.c pengine.c tengine.c lrm.c lrm_state.c remote_lrmd_ra.c \ +- utils.c misc.c te_events.c te_actions.c te_utils.c te_callbacks.c +- +-if BUILD_HEARTBEAT_SUPPORT +-crmd_SOURCES += heartbeat.c +-endif +- + crmd_LDADD = $(top_builddir)/lib/fencing/libstonithd.la \ + $(top_builddir)/lib/transition/libtransitioner.la \ + $(top_builddir)/lib/pengine/libpe_rules.la \ +@@ -45,19 +35,28 @@ crmd_LDADD = $(top_builddir)/lib/fencing/libstonithd.la \ + $(top_builddir)/lib/cluster/libcrmcluster.la \ + $(top_builddir)/lib/common/libcrmcommon.la \ + $(top_builddir)/lib/services/libcrmservice.la \ +- $(top_builddir)/lib/lrmd/liblrmd.la \ ++ $(top_builddir)/lib/lrmd/liblrmd.la \ + $(CLUSTERLIBS) + ++crmd_SOURCES = main.c crmd.c corosync.c notify.c \ ++ fsa.c control.c messages.c membership.c callbacks.c \ ++ election.c join_client.c join_dc.c subsystems.c throttle.c \ ++ cib.c pengine.c tengine.c lrm.c lrm_state.c remote_lrmd_ra.c \ ++ utils.c misc.c te_events.c te_actions.c te_utils.c te_callbacks.c ++if BUILD_HEARTBEAT_SUPPORT ++crmd_SOURCES += heartbeat.c ++endif ++ + if BUILD_XML_HELP +-man7_MANS = crmd.7 ++man7_MANS = crmd.7 + endif + +-graphs: fsa_inputs.png fsa_inputs_by_action.png fsa_actions_by_state.png ++graphs: fsa_inputs.png fsa_inputs_by_action.png fsa_actions_by_state.png + + %.png: %.dot + dot -Tpng $< > $@ + +-%.dot : fsa_matrix.h make_dot.pl ++%.dot: fsa_matrix.h make_dot.pl + perl $(top_srcdir)/crmd/make_dot.pl $(top_srcdir)/crmd/fsa_matrix.h $(top_builddir)/crmd + + CLEANFILES = $(man7_MANS) +diff --git a/fencing/Makefile.am b/fencing/Makefile.am +index 79fe2ed..1d591fc 100644 +--- a/fencing/Makefile.am ++++ b/fencing/Makefile.am +@@ -24,13 +24,13 @@ test_SCRIPTS = regression.py + halibdir = $(CRM_DAEMON_DIR) + halib_PROGRAMS = stonithd stonith-test + +-sbin_PROGRAMS = stonith_admin +-sbin_SCRIPTS = fence_legacy fence_pcmk ++sbin_PROGRAMS = stonith_admin ++sbin_SCRIPTS = fence_legacy fence_pcmk + +-noinst_HEADERS = internal.h ++noinst_HEADERS = internal.h + + if BUILD_XML_HELP +-man7_MANS = stonithd.7 ++man7_MANS = stonithd.7 + endif + + stonith_test_SOURCES = test.c +@@ -49,23 +49,25 @@ stonith_admin_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ + $(top_builddir)/lib/fencing/libstonithd.la \ + $(CRYPTOLIB) $(CLUSTERLIBS) + +-stonithd_CFLAGS = -I$(top_srcdir)/pengine +-stonithd_SOURCES = main.c commands.c remote.c +-if BUILD_STONITH_CONFIG +-BUILT_SOURCES = standalone_config.h +- +-stonithd_SOURCES += standalone_config.c config.y config.l +-stonithd_AM_LFLAGS = -o$(LEX_OUTPUT_ROOT).c +-endif +-stonithd_YFLAGS = -d ++stonithd_CPPFLAGS = -I$(top_srcdir)/pengine $(AM_CPPFLAGS) ++stonithd_YFLAGS = -d + + stonithd_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ + $(top_builddir)/lib/cluster/libcrmcluster.la \ + $(top_builddir)/lib/fencing/libstonithd.la \ + $(top_builddir)/lib/pengine/libpe_status.la \ +- $(top_builddir)/pengine/libpengine.la \ ++ $(top_builddir)/pengine/libpengine.la \ + $(CRYPTOLIB) $(CLUSTERLIBS) + ++stonithd_SOURCES = main.c commands.c remote.c ++ ++if BUILD_STONITH_CONFIG ++BUILT_SOURCES = standalone_config.h ++ ++stonithd_SOURCES += standalone_config.c config.y config.l ++stonithd_AM_LFLAGS = -o$(LEX_OUTPUT_ROOT).c ++endif ++ + # lex/yacc issues: + CFLAGS = $(CFLAGS_COPY:-Werror=) + +diff --git a/lib/cib/Makefile.am b/lib/cib/Makefile.am +index 0c57eee..e414a7f 100644 +--- a/lib/cib/Makefile.am ++++ b/lib/cib/Makefile.am +@@ -18,15 +18,16 @@ + include $(top_srcdir)/Makefile.common + + ## libraries +-lib_LTLIBRARIES = libcib.la ++lib_LTLIBRARIES = libcib.la + + ## SOURCES + libcib_la_SOURCES = cib_ops.c cib_utils.c cib_client.c cib_native.c cib_attrs.c +-libcib_la_SOURCES += cib_file.c cib_remote.c ++libcib_la_SOURCES += cib_file.c cib_remote.c + + libcib_la_LDFLAGS = -version-info 5:1:1 +-libcib_la_LIBADD = $(CRYPTOLIB) $(top_builddir)/lib/pengine/libpe_rules.la $(top_builddir)/lib/common/libcrmcommon.la +-libcib_la_CFLAGS = -I$(top_srcdir) ++libcib_la_CPPFLAGS = -I$(top_srcdir) $(AM_CPPFLAGS) ++ ++libcib_la_LIBADD = $(CRYPTOLIB) $(top_builddir)/lib/pengine/libpe_rules.la $(top_builddir)/lib/common/libcrmcommon.la + + clean-generic: + rm -f *.log *.debug *.xml *~ +diff --git a/lib/cluster/Makefile.am b/lib/cluster/Makefile.am +index ffa2a73..06d7066 100644 +--- a/lib/cluster/Makefile.am ++++ b/lib/cluster/Makefile.am +@@ -20,10 +20,10 @@ include $(top_srcdir)/Makefile.common + ## libraries + lib_LTLIBRARIES = libcrmcluster.la + +-libcrmcluster_la_SOURCES = election.c cluster.c membership.c + libcrmcluster_la_LDFLAGS = -version-info 6:0:2 + libcrmcluster_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la $(top_builddir)/lib/fencing/libstonithd.la $(CLUSTERLIBS) + ++libcrmcluster_la_SOURCES = election.c cluster.c membership.c + if BUILD_CS_SUPPORT + libcrmcluster_la_SOURCES += cpg.c + if BUILD_CS_PLUGIN +@@ -32,7 +32,6 @@ else + libcrmcluster_la_SOURCES += corosync.c + endif + endif +- + if BUILD_HEARTBEAT_SUPPORT + libcrmcluster_la_SOURCES += heartbeat.c + #libcrmcluster_la_LIBADD += -ldl +diff --git a/lib/common/Makefile.am b/lib/common/Makefile.am +index 111628f..7550ec1 100644 +--- a/lib/common/Makefile.am ++++ b/lib/common/Makefile.am +@@ -31,16 +31,16 @@ lib_LTLIBRARIES = libcrmcommon.la + + CFLAGS = $(CFLAGS_COPY:-Wcast-qual=) -fPIC + ++libcrmcommon_la_LDFLAGS = -version-info 9:0:6 ++libcrmcommon_la_LIBADD = @LIBADD_DL@ $(GNUTLSLIBS) -lm ++ + libcrmcommon_la_SOURCES = compat.c digest.c ipc.c io.c procfs.c utils.c xml.c \ + iso8601.c remote.c mainloop.c logging.c watchdog.c \ + xpath.c + if BUILD_CIBSECRETS + libcrmcommon_la_SOURCES += cib_secrets.c + endif +- +-libcrmcommon_la_LDFLAGS = -version-info 9:0:6 +-libcrmcommon_la_LIBADD = @LIBADD_DL@ $(GNUTLSLIBS) -lm +-libcrmcommon_la_SOURCES += $(top_builddir)/lib/gnu/md5.c ++libcrmcommon_la_SOURCES += $(top_builddir)/lib/gnu/md5.c + + clean-generic: + rm -f *.log *.debug *.xml *~ +diff --git a/lib/fencing/Makefile.am b/lib/fencing/Makefile.am +index a9f9874..85ae40a 100644 +--- a/lib/fencing/Makefile.am ++++ b/lib/fencing/Makefile.am +@@ -18,8 +18,8 @@ + # + include $(top_srcdir)/Makefile.common + +-lib_LTLIBRARIES = libstonithd.la ++lib_LTLIBRARIES = libstonithd.la + +-libstonithd_la_SOURCES = st_client.c +-libstonithd_la_LDFLAGS = -version-info 4:1:2 +-libstonithd_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la ++libstonithd_la_LDFLAGS = -version-info 4:1:2 ++libstonithd_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la ++libstonithd_la_SOURCES = st_client.c +diff --git a/lib/lrmd/Makefile.am b/lib/lrmd/Makefile.am +index c23fef5..25f3d55 100644 +--- a/lib/lrmd/Makefile.am ++++ b/lib/lrmd/Makefile.am +@@ -16,10 +16,10 @@ + # + include $(top_srcdir)/Makefile.common + +-lib_LTLIBRARIES = liblrmd.la ++lib_LTLIBRARIES = liblrmd.la + +-liblrmd_la_SOURCES = lrmd_client.c proxy_common.c +-liblrmd_la_LDFLAGS = -version-info 4:0:3 +-liblrmd_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la \ +- $(top_builddir)/lib/services/libcrmservice.la \ ++liblrmd_la_LDFLAGS = -version-info 4:0:3 ++liblrmd_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la \ ++ $(top_builddir)/lib/services/libcrmservice.la \ + $(top_builddir)/lib/fencing/libstonithd.la ++liblrmd_la_SOURCES = lrmd_client.c proxy_common.c +diff --git a/lib/pengine/Makefile.am b/lib/pengine/Makefile.am +index a0d19e5..de760c3 100644 +--- a/lib/pengine/Makefile.am ++++ b/lib/pengine/Makefile.am +@@ -18,19 +18,19 @@ + include $(top_srcdir)/Makefile.common + + ## libraries +-lib_LTLIBRARIES = libpe_rules.la libpe_status.la ++lib_LTLIBRARIES = libpe_rules.la libpe_status.la + + ## SOURCES +-noinst_HEADERS = unpack.h variant.h ++noinst_HEADERS = unpack.h variant.h + + libpe_rules_la_LDFLAGS = -version-info 2:6:0 +-libpe_rules_la_SOURCES = rules.c common.c + libpe_rules_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la ++libpe_rules_la_SOURCES = rules.c common.c + + libpe_status_la_LDFLAGS = -version-info 11:0:1 +-libpe_status_la_SOURCES = status.c unpack.c utils.c complex.c native.c \ +- group.c clone.c rules.c common.c remote.c +-libpe_status_la_LIBADD = @CURSESLIBS@ $(top_builddir)/lib/common/libcrmcommon.la ++libpe_status_la_LIBADD = @CURSESLIBS@ $(top_builddir)/lib/common/libcrmcommon.la ++libpe_status_la_SOURCES = status.c unpack.c utils.c complex.c native.c \ ++ group.c clone.c rules.c common.c remote.c + + clean-generic: + rm -f *.log *.debug *~ +diff --git a/lib/services/Makefile.am b/lib/services/Makefile.am +index 2797b53..c789fbd 100644 +--- a/lib/services/Makefile.am ++++ b/lib/services/Makefile.am +@@ -18,24 +18,24 @@ + + MAINTAINERCLEANFILES = Makefile.in + +-AM_CPPFLAGS = -I$(top_builddir)/include ++AM_CPPFLAGS = -I$(top_builddir)/include + +-lib_LTLIBRARIES = libcrmservice.la +-noinst_HEADERS = upstart.h systemd.h services_private.h ++lib_LTLIBRARIES = libcrmservice.la ++noinst_HEADERS = upstart.h systemd.h services_private.h + +-libcrmservice_la_SOURCES = services.c services_linux.c +-libcrmservice_la_LDFLAGS = -version-info 4:1:1 +-libcrmservice_la_CFLAGS = $(GIO_CFLAGS) -DOCF_ROOT_DIR=\"@OCF_ROOT_DIR@\" +-libcrmservice_la_LIBADD = $(GIO_LIBS) $(top_builddir)/lib/common/libcrmcommon.la $(DBUS_LIBS) ++libcrmservice_la_LDFLAGS = -version-info 4:1:1 ++libcrmservice_la_CPPFLAGS = -DOCF_ROOT_DIR=\"@OCF_ROOT_DIR@\" $(AM_CPPFLAGS) ++libcrmservice_la_CFLAGS = $(GIO_CFLAGS) + ++libcrmservice_la_LIBADD = $(GIO_LIBS) $(top_builddir)/lib/common/libcrmcommon.la $(DBUS_LIBS) ++ ++libcrmservice_la_SOURCES = services.c services_linux.c + if BUILD_DBUS +-libcrmservice_la_SOURCES += dbus.c ++libcrmservice_la_SOURCES += dbus.c + endif +- + if BUILD_UPSTART +-libcrmservice_la_SOURCES += upstart.c ++libcrmservice_la_SOURCES += upstart.c + endif +- + if BUILD_SYSTEMD +-libcrmservice_la_SOURCES += systemd.c ++libcrmservice_la_SOURCES += systemd.c + endif +diff --git a/lib/transition/Makefile.am b/lib/transition/Makefile.am +index 7bcfc1a..9bc039e 100644 +--- a/lib/transition/Makefile.am ++++ b/lib/transition/Makefile.am +@@ -18,14 +18,15 @@ + include $(top_srcdir)/Makefile.common + + ## libraries +-lib_LTLIBRARIES = libtransitioner.la ++lib_LTLIBRARIES = libtransitioner.la + + ## SOURCES +-libtransitioner_la_SOURCES = unpack.c graph.c utils.c + + libtransitioner_la_LDFLAGS = -version-info 2:5:0 +-libtransitioner_la_CFLAGS = -I$(top_builddir) +-libtransitioner_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la ++libtransitioner_la_CPPFLAGS = -I$(top_builddir) $(AM_CPPFLAGS) ++ ++libtransitioner_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la ++libtransitioner_la_SOURCES = unpack.c graph.c utils.c + + clean-generic: + rm -f *~ +diff --git a/lrmd/Makefile.am b/lrmd/Makefile.am +index 556d48a..64df105 100644 +--- a/lrmd/Makefile.am ++++ b/lrmd/Makefile.am +@@ -19,45 +19,43 @@ include $(top_srcdir)/Makefile.common + testdir = $(datadir)/$(PACKAGE)/tests/lrmd + test_SCRIPTS = regression.py + +-lrmdlibdir = $(CRM_DAEMON_DIR) +-lrmdlib_PROGRAMS = lrmd lrmd_test lrmd_internal_ctl ++lrmdlibdir = $(CRM_DAEMON_DIR) ++lrmdlib_PROGRAMS = lrmd lrmd_test lrmd_internal_ctl + +-initdir = $(INITDIR) +-init_SCRIPTS = pacemaker_remote +-sbin_PROGRAMS = pacemaker_remoted ++initdir = $(INITDIR) ++init_SCRIPTS = pacemaker_remote ++sbin_PROGRAMS = pacemaker_remoted + + if BUILD_SYSTEMD +-systemdunit_DATA = pacemaker_remote.service ++systemdunit_DATA = pacemaker_remote.service + endif + +-lrmd_SOURCES = main.c lrmd.c + lrmd_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ +- $(top_builddir)/lib/services/libcrmservice.la \ +- $(top_builddir)/lib/lrmd/liblrmd.la \ ++ $(top_builddir)/lib/services/libcrmservice.la \ ++ $(top_builddir)/lib/lrmd/liblrmd.la \ + $(top_builddir)/lib/fencing/libstonithd.la ${COMPAT_LIBS} ++lrmd_SOURCES = main.c lrmd.c + ++pacemaker_remoted_CPPFLAGS = -DSUPPORT_REMOTE $(AM_CPPFLAGS) + +-pacemaker_remoted_SOURCES = main.c lrmd.c tls_backend.c ipc_proxy.c +-pacemaker_remoted_CFLAGS = -DSUPPORT_REMOTE + pacemaker_remoted_LDADD = $(lrmd_LDADD) ++pacemaker_remoted_SOURCES = main.c lrmd.c tls_backend.c ipc_proxy.c + +- ++lrmd_internal_ctl_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ ++ $(top_builddir)/lib/lrmd/liblrmd.la \ ++ $(top_builddir)/lib/cib/libcib.la \ ++ $(top_builddir)/lib/services/libcrmservice.la \ ++ $(top_builddir)/lib/pengine/libpe_status.la \ ++ $(top_builddir)/pengine/libpengine.la + lrmd_internal_ctl_SOURCES = remote_ctl.c +-lrmd_internal_ctl_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ +- $(top_builddir)/lib/lrmd/liblrmd.la \ +- $(top_builddir)/lib/cib/libcib.la \ +- $(top_builddir)/lib/services/libcrmservice.la \ +- $(top_builddir)/lib/pengine/libpe_status.la \ +- $(top_builddir)/pengine/libpengine.la + +- +-lrmd_test_SOURCES = test.c + lrmd_test_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ +- $(top_builddir)/lib/lrmd/liblrmd.la \ +- $(top_builddir)/lib/cib/libcib.la \ +- $(top_builddir)/lib/services/libcrmservice.la \ +- $(top_builddir)/lib/pengine/libpe_status.la \ ++ $(top_builddir)/lib/lrmd/liblrmd.la \ ++ $(top_builddir)/lib/cib/libcib.la \ ++ $(top_builddir)/lib/services/libcrmservice.la \ ++ $(top_builddir)/lib/pengine/libpe_status.la \ + $(top_builddir)/pengine/libpengine.la ++lrmd_test_SOURCES = test.c + + noinst_HEADERS = lrmd_private.h + +diff --git a/mcp/Makefile.am b/mcp/Makefile.am +index 1b3720a..195530a 100644 +--- a/mcp/Makefile.am ++++ b/mcp/Makefile.am +@@ -24,16 +24,16 @@ init_SCRIPTS = pacemaker + sbin_PROGRAMS = pacemakerd + + if BUILD_SYSTEMD +-systemdunit_DATA = pacemaker.service ++systemdunit_DATA = pacemaker.service + endif + + ## SOURCES + + noinst_HEADERS = pacemaker.h + +-pacemakerd_SOURCES = pacemaker.c corosync.c + pacemakerd_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la $(top_builddir)/lib/common/libcrmcommon.la + pacemakerd_LDADD += $(CLUSTERLIBS) ++pacemakerd_SOURCES = pacemaker.c corosync.c + + endif + +diff --git a/pengine/Makefile.am b/pengine/Makefile.am +index 170b728..96c914f 100644 +--- a/pengine/Makefile.am ++++ b/pengine/Makefile.am +@@ -17,7 +17,7 @@ + # + include $(top_srcdir)/Makefile.common + +-AM_CPPFLAGS += -I$(top_builddir) -I$(top_srcdir) ++AM_CPPFLAGS += -I$(top_builddir) -I$(top_srcdir) + + halibdir = $(CRM_DAEMON_DIR) + +@@ -30,9 +30,8 @@ test_DATA = regression.core.sh + test10dir = $(datadir)/$(PACKAGE)/tests/pengine/test10 + test10_DATA = $(PE_TESTS) $(PE_TESTS:%.scores=%.xml) $(PE_TESTS:%.scores=%.exp) $(PE_TESTS:%.scores=%.dot) $(PE_TESTS:%.scores=%.summary) $(wildcard test10/*.stderr) + +-COMMONLIBS = \ +- $(top_builddir)/lib/common/libcrmcommon.la \ +- $(top_builddir)/lib/pengine/libpe_status.la \ ++COMMONLIBS = $(top_builddir)/lib/common/libcrmcommon.la \ ++ $(top_builddir)/lib/pengine/libpe_status.la \ + libpengine.la $(CURSESLIBS) $(CLUSTERLIBS) + + beekhof: +@@ -62,17 +61,16 @@ endif + noinst_HEADERS = allocate.h utils.h pengine.h + + libpengine_la_LDFLAGS = -version-info 11:0:1 ++libpengine_la_LIBADD = $(top_builddir)/lib/pengine/libpe_status.la \ ++ $(top_builddir)/lib/cib/libcib.la + # -L$(top_builddir)/lib/pils -lpils -export-dynamic -module -avoid-version + libpengine_la_SOURCES = pengine.c allocate.c utils.c constraints.c +-libpengine_la_SOURCES += native.c group.c clone.c master.c graph.c utilization.c ++libpengine_la_SOURCES += native.c group.c clone.c master.c graph.c utilization.c + +-libpengine_la_LIBADD = $(top_builddir)/lib/pengine/libpe_status.la \ +- $(top_builddir)/lib/cib/libcib.la +- +-pengine_SOURCES = main.c + pengine_LDADD = $(top_builddir)/lib/cib/libcib.la $(COMMONLIBS) + # libcib for get_object_root() + # $(top_builddir)/lib/hbclient/libhbclient.la ++pengine_SOURCES = main.c + + install-exec-local: + $(mkinstalldirs) $(DESTDIR)/$(PE_STATE_DIR) +-- +1.8.3.1 + diff --git a/SOURCES/0020-Fix-pacemakerd-Do-not-forget-about-nodes-that-leave-.patch b/SOURCES/0020-Fix-pacemakerd-Do-not-forget-about-nodes-that-leave-.patch deleted file mode 100644 index e2da8a5..0000000 --- a/SOURCES/0020-Fix-pacemakerd-Do-not-forget-about-nodes-that-leave-.patch +++ /dev/null @@ -1,23 +0,0 @@ -From: Andrew Beekhof -Date: Thu, 3 Sep 2015 13:27:57 +1000 -Subject: [PATCH] Fix: pacemakerd: Do not forget about nodes that leave the - cluster - -(cherry picked from commit 2ac396ae6f54c9437bcf786eeccf94d4e2fdd77a) ---- - mcp/pacemaker.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/mcp/pacemaker.c b/mcp/pacemaker.c -index 9c3195e..88a6a1f 100644 ---- a/mcp/pacemaker.c -+++ b/mcp/pacemaker.c -@@ -1108,6 +1108,8 @@ main(int argc, char **argv) - cluster.cpg.cpg_deliver_fn = mcp_cpg_deliver; - cluster.cpg.cpg_confchg_fn = mcp_cpg_membership; - -+ crm_set_autoreap(FALSE); -+ - if(cluster_connect_cpg(&cluster) == FALSE) { - crm_err("Couldn't connect to Corosync's CPG service"); - rc = -ENOPROTOOPT; diff --git a/SOURCES/0021-Fix-pacemakerd-Track-node-state-in-pacemakerd.patch b/SOURCES/0021-Fix-pacemakerd-Track-node-state-in-pacemakerd.patch deleted file mode 100644 index b2814a8..0000000 --- a/SOURCES/0021-Fix-pacemakerd-Track-node-state-in-pacemakerd.patch +++ /dev/null @@ -1,58 +0,0 @@ -From: Andrew Beekhof -Date: Thu, 3 Sep 2015 14:29:27 +1000 -Subject: [PATCH] Fix: pacemakerd: Track node state in pacemakerd - -(cherry picked from commit c186f54241c49bf20b1620767933b006063d613c) ---- - mcp/pacemaker.c | 22 +++++++++++++++++++++- - 1 file changed, 21 insertions(+), 1 deletion(-) - -diff --git a/mcp/pacemaker.c b/mcp/pacemaker.c -index 88a6a1f..9f00a21 100644 ---- a/mcp/pacemaker.c -+++ b/mcp/pacemaker.c -@@ -901,7 +901,6 @@ mcp_cpg_membership(cpg_handle_t handle, - static gboolean - mcp_quorum_callback(unsigned long long seq, gboolean quorate) - { -- /* Nothing to do */ - pcmk_quorate = quorate; - return TRUE; - } -@@ -909,8 +908,23 @@ mcp_quorum_callback(unsigned long long seq, gboolean quorate) - static void - mcp_quorum_destroy(gpointer user_data) - { -+ crm_info("connection lost"); -+} -+ -+#if SUPPORT_CMAN -+static gboolean -+mcp_cman_dispatch(unsigned long long seq, gboolean quorate) -+{ -+ pcmk_quorate = quorate; -+ return TRUE; -+} -+ -+static void -+mcp_cman_destroy(gpointer user_data) -+{ - crm_info("connection closed"); - } -+#endif - - int - main(int argc, char **argv) -@@ -1122,6 +1136,12 @@ main(int argc, char **argv) - } - } - -+#if SUPPORT_CMAN -+ if (rc == pcmk_ok && is_cman_cluster()) { -+ init_cman_connection(mcp_cman_dispatch, mcp_cman_destroy); -+ } -+#endif -+ - if(rc == pcmk_ok) { - local_name = get_local_node_name(); - update_node_processes(local_nodeid, local_name, get_process_list()); diff --git a/SOURCES/0022-Fix-PE-Resolve-memory-leak.patch b/SOURCES/0022-Fix-PE-Resolve-memory-leak.patch deleted file mode 100644 index e7cd5b1..0000000 --- a/SOURCES/0022-Fix-PE-Resolve-memory-leak.patch +++ /dev/null @@ -1,27 +0,0 @@ -From: Andrew Beekhof -Date: Tue, 8 Sep 2015 12:02:54 +1000 -Subject: [PATCH] Fix: PE: Resolve memory leak - -(cherry picked from commit 4f48a79fd19be0e614716f0900e31985d4714ace) ---- - lib/pengine/unpack.c | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 156a192..c4f3134 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -276,9 +276,13 @@ destroy_digest_cache(gpointer ptr) - op_digest_cache_t *data = ptr; - - free_xml(data->params_all); -+ free_xml(data->params_secure); - free_xml(data->params_restart); -+ - free(data->digest_all_calc); - free(data->digest_restart_calc); -+ free(data->digest_secure_calc); -+ - free(data); - } - diff --git a/SOURCES/0023-Fix-cman-Purge-all-node-caches-for-crm_node-R.patch b/SOURCES/0023-Fix-cman-Purge-all-node-caches-for-crm_node-R.patch deleted file mode 100644 index 5ff7c08..0000000 --- a/SOURCES/0023-Fix-cman-Purge-all-node-caches-for-crm_node-R.patch +++ /dev/null @@ -1,24 +0,0 @@ -From: Andrew Beekhof -Date: Tue, 8 Sep 2015 12:03:56 +1000 -Subject: [PATCH] Fix: cman: Purge all node caches for crm_node -R - -(cherry picked from commit c445e135b6d52b1a5f3cfdacfa54a63b313c00d2) ---- - tools/crm_node.c | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - -diff --git a/tools/crm_node.c b/tools/crm_node.c -index 9626120..48ee7c4 100644 ---- a/tools/crm_node.c -+++ b/tools/crm_node.c -@@ -607,9 +607,7 @@ try_cman(int command, enum cluster_type_e stack) - - switch (command) { - case 'R': -- if (tools_remove_node_cache(target_uname, CRM_SYSTEM_CRMD)) { -- crm_err("Failed to connect to "CRM_SYSTEM_CRMD" to remove node '%s'", target_uname); -- } -+ try_pacemaker(command, stack); - break; - - case 'e': diff --git a/SOURCES/0024-Refactor-membership-Safely-autoreap-nodes-without-co.patch b/SOURCES/0024-Refactor-membership-Safely-autoreap-nodes-without-co.patch deleted file mode 100644 index 35617cc..0000000 --- a/SOURCES/0024-Refactor-membership-Safely-autoreap-nodes-without-co.patch +++ /dev/null @@ -1,92 +0,0 @@ -From: Andrew Beekhof -Date: Tue, 8 Sep 2015 12:05:04 +1000 -Subject: [PATCH] Refactor: membership: Safely autoreap nodes without code - duplication - -(cherry picked from commit acd660a1bdf40ada599041cb14d2128632d2e7a5) ---- - lib/cluster/membership.c | 43 +++++++++++++++++++++---------------------- - 1 file changed, 21 insertions(+), 22 deletions(-) - -diff --git a/lib/cluster/membership.c b/lib/cluster/membership.c -index b7958eb..3081e54 100644 ---- a/lib/cluster/membership.c -+++ b/lib/cluster/membership.c -@@ -795,8 +795,8 @@ crm_update_peer_expected(const char *source, crm_node_t * node, const char *expe - * called within a cache iteration if reaping is possible, - * otherwise reaping could invalidate the iterator. - */ --crm_node_t * --crm_update_peer_state(const char *source, crm_node_t * node, const char *state, int membership) -+static crm_node_t * -+crm_update_peer_state_iter(const char *source, crm_node_t * node, const char *state, int membership, GHashTableIter *iter) - { - gboolean is_member; - -@@ -822,13 +822,19 @@ crm_update_peer_state(const char *source, crm_node_t * node, const char *state, - free(last); - - if (!is_member && crm_autoreap) { -- if (status_type == crm_status_rstate) { -+ if(iter) { -+ crm_notice("Purged 1 peer with id=%u and/or uname=%s from the membership cache", node->id, node->uname); -+ g_hash_table_iter_remove(iter); -+ -+ } else if (status_type == crm_status_rstate) { - crm_remote_peer_cache_remove(node->uname); -+ - } else { - reap_crm_member(node->id, node->uname); - } - node = NULL; - } -+ - } else { - crm_trace("%s: Node %s[%u] - state is unchanged (%s)", source, node->uname, node->id, - state); -@@ -836,6 +842,12 @@ crm_update_peer_state(const char *source, crm_node_t * node, const char *state, - return node; - } - -+crm_node_t * -+crm_update_peer_state(const char *source, crm_node_t * node, const char *state, int membership) -+{ -+ return crm_update_peer_state_iter(source, node, state, membership, NULL); -+} -+ - /*! - * \internal - * \brief Reap all nodes from cache whose membership information does not match -@@ -853,26 +865,13 @@ crm_reap_unseen_nodes(uint64_t membership) - while (g_hash_table_iter_next(&iter, NULL, (gpointer *)&node)) { - if (node->last_seen != membership) { - if (node->state) { -- /* crm_update_peer_state() cannot be called here, because that -- * might modify the peer cache, invalidating our iterator -+ /* -+ * Calling crm_update_peer_state_iter() allows us to -+ * remove the node from crm_peer_cache without -+ * invalidating our iterator - */ -- if (safe_str_eq(node->state, CRM_NODE_LOST)) { -- crm_trace("Node %s[%u] - state is unchanged (%s)", -- node->uname, node->id, CRM_NODE_LOST); -- } else { -- char *last = node->state; -- -- node->state = strdup(CRM_NODE_LOST); -- crm_notice("Node %s[%u] - state is now %s (was %s)", -- node->uname, node->id, CRM_NODE_LOST, last); -- if (crm_status_callback) { -- crm_status_callback(crm_status_nstate, node, last); -- } -- if (crm_autoreap) { -- g_hash_table_iter_remove(&iter); -- } -- free(last); -- } -+ crm_update_peer_state_iter(__FUNCTION__, node, CRM_NODE_LOST, membership, &iter); -+ - } else { - crm_info("State of node %s[%u] is still unknown", - node->uname, node->id); diff --git a/SOURCES/0025-Fix-crmd-Prevent-segfault-by-correctly-detecting-whe.patch b/SOURCES/0025-Fix-crmd-Prevent-segfault-by-correctly-detecting-whe.patch deleted file mode 100644 index a1797e9..0000000 --- a/SOURCES/0025-Fix-crmd-Prevent-segfault-by-correctly-detecting-whe.patch +++ /dev/null @@ -1,23 +0,0 @@ -From: Andrew Beekhof -Date: Wed, 9 Sep 2015 14:46:49 +1000 -Subject: [PATCH] Fix: crmd: Prevent segfault by correctly detecting when - notifications are not required - -(cherry picked from commit 5eb9f93ef666c75e5f32827a92b0a57ada063803) ---- - crmd/notify.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/crmd/notify.c b/crmd/notify.c -index ca2be0f..179af18 100644 ---- a/crmd/notify.c -+++ b/crmd/notify.c -@@ -141,7 +141,7 @@ crmd_notify_fencing_op(stonith_event_t * e) - { - char *desc = NULL; - -- if(notify_script) { -+ if(notify_script == NULL) { - return; - } - diff --git a/SOURCES/0026-Fix-crmd-don-t-add-node-ID-to-proxied-remote-node-re.patch b/SOURCES/0026-Fix-crmd-don-t-add-node-ID-to-proxied-remote-node-re.patch deleted file mode 100644 index ba29678..0000000 --- a/SOURCES/0026-Fix-crmd-don-t-add-node-ID-to-proxied-remote-node-re.patch +++ /dev/null @@ -1,29 +0,0 @@ -From: Ken Gaillot -Date: Thu, 27 Aug 2015 11:00:02 -0500 -Subject: [PATCH] Fix: crmd: don't add node ID to proxied remote node requests - for attrd - -446a1005 incorrectly set F_ATTRD_HOST_ID for proxied remote node requests to -attrd. Since attrd only uses F_ATTRD_HOST_ID to associate a cluster node name -with an ID, it doesn't ever need to be set for remote nodes. - -Additionally, that revision used the proxying cluster node's node ID, which can -lead to node ID conflicts in attrd. - -(cherry picked from commit 6af6da534646dbadf3d8d1d63d0edb2844c72073) ---- - crmd/lrm_state.c | 1 - - 1 file changed, 1 deletion(-) - -diff --git a/crmd/lrm_state.c b/crmd/lrm_state.c -index c03fa0b..bea1027 100644 ---- a/crmd/lrm_state.c -+++ b/crmd/lrm_state.c -@@ -540,7 +540,6 @@ remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) - if (safe_str_eq(type, T_ATTRD) - && crm_element_value(request, F_ATTRD_HOST) == NULL) { - crm_xml_add(request, F_ATTRD_HOST, proxy->node_name); -- crm_xml_add_int(request, F_ATTRD_HOST_ID, get_local_nodeid(0)); - } - - rc = crm_ipc_send(proxy->ipc, request, flags, 5000, NULL); diff --git a/SOURCES/0027-Fix-pacemaker_remote-memory-leak-in-ipc_proxy_dispat.patch b/SOURCES/0027-Fix-pacemaker_remote-memory-leak-in-ipc_proxy_dispat.patch deleted file mode 100644 index 9dad48e..0000000 --- a/SOURCES/0027-Fix-pacemaker_remote-memory-leak-in-ipc_proxy_dispat.patch +++ /dev/null @@ -1,35 +0,0 @@ -From: Ken Gaillot -Date: Mon, 14 Sep 2015 15:00:13 -0500 -Subject: [PATCH] Fix: pacemaker_remote: memory leak in ipc_proxy_dispatch() - -Detected via routine valgrind testing - -(cherry picked from commit 3bb439d1554cb5567b886c52107bd3bb6f27b696) ---- - lrmd/ipc_proxy.c | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/lrmd/ipc_proxy.c b/lrmd/ipc_proxy.c -index 9427393..2a5ad78 100644 ---- a/lrmd/ipc_proxy.c -+++ b/lrmd/ipc_proxy.c -@@ -223,9 +223,9 @@ ipc_proxy_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) - } - - CRM_CHECK(client != NULL, crm_err("Invalid client"); -- return FALSE); -+ free_xml(request); return FALSE); - CRM_CHECK(client->id != NULL, crm_err("Invalid client: %p", client); -- return FALSE); -+ free_xml(request); return FALSE); - - /* this ensures that synced request/responses happen over the event channel - * in the crmd, allowing the crmd to process the messages async */ -@@ -241,6 +241,7 @@ ipc_proxy_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) - crm_xml_add_int(msg, F_LRMD_IPC_MSG_FLAGS, flags); - add_message_xml(msg, F_LRMD_IPC_MSG, request); - lrmd_server_send_notify(ipc_proxy, msg); -+ free_xml(request); - free_xml(msg); - - return 0; diff --git a/SOURCES/0028-Log-The-package-version-is-more-informative.patch b/SOURCES/0028-Log-The-package-version-is-more-informative.patch deleted file mode 100644 index 543d9ab..0000000 --- a/SOURCES/0028-Log-The-package-version-is-more-informative.patch +++ /dev/null @@ -1,115 +0,0 @@ -From: Andrew Beekhof -Date: Wed, 16 Sep 2015 09:14:39 +1000 -Subject: [PATCH] Log: The package version is more informative - -(cherry picked from commit 2b4d195e9e94777fc1953832fcce3637ffa2f449) ---- - crmd/cib.c | 2 +- - crmd/election.c | 2 +- - crmd/main.c | 5 ++--- - lib/ais/plugin.c | 2 +- - lib/common/utils.c | 4 ++-- - mcp/pacemaker.c | 4 ++-- - 6 files changed, 9 insertions(+), 10 deletions(-) - -diff --git a/crmd/cib.c b/crmd/cib.c -index 7ec5eda..41e9efb 100644 ---- a/crmd/cib.c -+++ b/crmd/cib.c -@@ -113,7 +113,7 @@ revision_check_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, vo - cmp = compare_version(revision, CRM_FEATURE_SET); - - if (cmp > 0) { -- crm_err("This build (%s) does not support the current resource configuration", VERSION); -+ crm_err("This build (%s) does not support the current resource configuration", PACEMAKER_VERSION); - crm_err("We can only support up to CRM feature set %s (current=%s)", - CRM_FEATURE_SET, revision); - crm_err("Shutting down the CRM"); -diff --git a/crmd/election.c b/crmd/election.c -index b542a66..adab4e3 100644 ---- a/crmd/election.c -+++ b/crmd/election.c -@@ -215,7 +215,7 @@ do_dc_takeover(long long action, - } - - update_attr_delegate(fsa_cib_conn, cib_none, XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL, -- "dc-version", VERSION "-" BUILD_VERSION, FALSE, NULL, NULL); -+ "dc-version", PACEMAKER_VERSION "-" BUILD_VERSION, FALSE, NULL, NULL); - - update_attr_delegate(fsa_cib_conn, cib_none, XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL, - "cluster-infrastructure", cluster_type, FALSE, NULL, NULL); -diff --git a/crmd/main.c b/crmd/main.c -index e9a69b4..75ed91c 100644 ---- a/crmd/main.c -+++ b/crmd/main.c -@@ -89,13 +89,12 @@ main(int argc, char **argv) - crmd_metadata(); - return 0; - } else if (argc - optind == 1 && safe_str_eq("version", argv[optind])) { -- fprintf(stdout, "CRM Version: "); -- fprintf(stdout, "%s (%s)\n", VERSION, BUILD_VERSION); -+ fprintf(stdout, "CRM Version: %s (%s)\n", PACEMAKER_VERSION, BUILD_VERSION); - return 0; - } - - crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE); -- crm_notice("CRM Git Version: %s\n", BUILD_VERSION); -+ crm_notice("CRM Git Version: %s (%s)\n", PACEMAKER_VERSION, BUILD_VERSION); - - if (optind > argc) { - ++argerr; -diff --git a/lib/ais/plugin.c b/lib/ais/plugin.c -index ab534fa..cf2a131 100644 ---- a/lib/ais/plugin.c -+++ b/lib/ais/plugin.c -@@ -201,7 +201,7 @@ static struct corosync_exec_handler pcmk_exec_service[] = { - */ - /* *INDENT-OFF* */ - struct corosync_service_engine pcmk_service_handler = { -- .name = (char *)"Pacemaker Cluster Manager "PACKAGE_VERSION, -+ .name = (char *)"Pacemaker Cluster Manager "PACEMAKER_VERSION, - .id = PCMK_SERVICE_ID, - .private_data_size = 0, - .flow_control = COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED, -diff --git a/lib/common/utils.c b/lib/common/utils.c -index 628cf2f..2364f5c 100644 ---- a/lib/common/utils.c -+++ b/lib/common/utils.c -@@ -1603,13 +1603,13 @@ crm_help(char cmd, int exit_code) - FILE *stream = (exit_code ? stderr : stdout); - - if (cmd == 'v' || cmd == '$') { -- fprintf(stream, "Pacemaker %s\n", VERSION); -+ fprintf(stream, "Pacemaker %s\n", PACEMAKER_VERSION); - fprintf(stream, "Written by Andrew Beekhof\n"); - goto out; - } - - if (cmd == '!') { -- fprintf(stream, "Pacemaker %s (Build: %s): %s\n", VERSION, BUILD_VERSION, CRM_FEATURES); -+ fprintf(stream, "Pacemaker %s (Build: %s): %s\n", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES); - goto out; - } - -diff --git a/mcp/pacemaker.c b/mcp/pacemaker.c -index 9f00a21..910d154 100644 ---- a/mcp/pacemaker.c -+++ b/mcp/pacemaker.c -@@ -972,7 +972,7 @@ main(int argc, char **argv) - shutdown = TRUE; - break; - case 'F': -- printf("Pacemaker %s (Build: %s)\n Supporting v%s: %s\n", VERSION, BUILD_VERSION, -+ printf("Pacemaker %s (Build: %s)\n Supporting v%s: %s\n", PACEMAKER_VERSION, BUILD_VERSION, - CRM_FEATURE_SET, CRM_FEATURES); - crm_exit(pcmk_ok); - default: -@@ -1039,7 +1039,7 @@ main(int argc, char **argv) - crm_exit(ENODATA); - } - -- crm_notice("Starting Pacemaker %s (Build: %s): %s", VERSION, BUILD_VERSION, CRM_FEATURES); -+ crm_notice("Starting Pacemaker %s (Build: %s): %s", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES); - mainloop = g_main_new(FALSE); - sysrq_init(); - diff --git a/SOURCES/0029-Fix-crm_resource-Allow-the-resource-configuration-to.patch b/SOURCES/0029-Fix-crm_resource-Allow-the-resource-configuration-to.patch deleted file mode 100644 index 942b464..0000000 --- a/SOURCES/0029-Fix-crm_resource-Allow-the-resource-configuration-to.patch +++ /dev/null @@ -1,127 +0,0 @@ -From: Andrew Beekhof -Date: Thu, 17 Sep 2015 09:46:38 +1000 -Subject: [PATCH] Fix: crm_resource: Allow the resource configuration to be - modified for --force-{check,start,..} calls - -(cherry picked from commit 1206f735a8ddb33c77152c736828e823e7755c34) ---- - tools/crm_resource.c | 36 +++++++++++++++++++++++++++++++----- - tools/crm_resource.h | 2 +- - tools/crm_resource_runtime.c | 14 +++++++++++++- - 3 files changed, 45 insertions(+), 7 deletions(-) - -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 156bbea..2a94362 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -247,6 +247,7 @@ main(int argc, char **argv) - const char *prop_set = NULL; - const char *rsc_long_cmd = NULL; - const char *longname = NULL; -+ GHashTable *override_params = NULL; - - char *xml_file = NULL; - crm_ipc_t *crmd_channel = NULL; -@@ -503,11 +504,35 @@ main(int argc, char **argv) - } - } - -- if (optind < argc && argv[optind] != NULL) { -+ if (optind < argc -+ && argv[optind] != NULL -+ && rsc_cmd == 0 -+ && rsc_long_cmd) { -+ -+ override_params = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); -+ while (optind < argc && argv[optind] != NULL) { -+ char *name = calloc(1, strlen(argv[optind])); -+ char *value = calloc(1, strlen(argv[optind])); -+ int rc = sscanf(argv[optind], "%[^=]=%s", name, value); -+ -+ if(rc == 2) { -+ g_hash_table_replace(override_params, name, value); -+ -+ } else { -+ CMD_ERR("Error parsing '%s' as a name=value pair for --%s", argv[optind], rsc_long_cmd); -+ free(value); -+ free(name); -+ argerr++; -+ } -+ optind++; -+ } -+ -+ } else if (optind < argc && argv[optind] != NULL && rsc_cmd == 0) { - CMD_ERR("non-option ARGV-elements: "); - while (optind < argc && argv[optind] != NULL) { -- CMD_ERR("%s ", argv[optind++]); -- ++argerr; -+ CMD_ERR("[%d of %d] %s ", optind, argc, argv[optind]); -+ optind++; -+ argerr++; - } - } - -@@ -516,7 +541,8 @@ main(int argc, char **argv) - } - - if (argerr) { -- crm_help('?', EX_USAGE); -+ CMD_ERR("Invalid option(s) supplied, use --help for valid usage"); -+ return crm_exit(EX_USAGE); - } - - our_pid = calloc(1, 11); -@@ -631,7 +657,7 @@ main(int argc, char **argv) - rc = wait_till_stable(timeout_ms, cib_conn); - - } else if (rsc_cmd == 0 && rsc_long_cmd) { /* force-(stop|start|check) */ -- rc = cli_resource_execute(rsc_id, rsc_long_cmd, cib_conn, &data_set); -+ rc = cli_resource_execute(rsc_id, rsc_long_cmd, override_params, cib_conn, &data_set); - - } else if (rsc_cmd == 'A' || rsc_cmd == 'a') { - GListPtr lpc = NULL; -diff --git a/tools/crm_resource.h b/tools/crm_resource.h -index 5a206e0..d4c3b05 100644 ---- a/tools/crm_resource.h -+++ b/tools/crm_resource.h -@@ -74,7 +74,7 @@ int cli_resource_search(const char *rsc, pe_working_set_t * data_set); - int cli_resource_delete(cib_t *cib_conn, crm_ipc_t * crmd_channel, const char *host_uname, resource_t * rsc, pe_working_set_t * data_set); - int cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * cib); - int cli_resource_move(const char *rsc_id, const char *host_name, cib_t * cib, pe_working_set_t *data_set); --int cli_resource_execute(const char *rsc_id, const char *rsc_action, cib_t * cib, pe_working_set_t *data_set); -+int cli_resource_execute(const char *rsc_id, const char *rsc_action, GHashTable *override_hash, cib_t * cib, pe_working_set_t *data_set); - - int cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const char *attr_id, - const char *attr_name, const char *attr_value, bool recursive, -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index b9427bc..ce9db01 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -1297,7 +1297,7 @@ wait_till_stable(int timeout_ms, cib_t * cib) - } - - int --cli_resource_execute(const char *rsc_id, const char *rsc_action, cib_t * cib, pe_working_set_t *data_set) -+cli_resource_execute(const char *rsc_id, const char *rsc_action, GHashTable *override_hash, cib_t * cib, pe_working_set_t *data_set) - { - int rc = pcmk_ok; - svc_action_t *op = NULL; -@@ -1360,6 +1360,18 @@ cli_resource_execute(const char *rsc_id, const char *rsc_action, cib_t * cib, pe - setenv("OCF_TRACE_RA", "1", 1); - } - -+ if(op && override_hash) { -+ GHashTableIter iter; -+ char *name = NULL; -+ char *value = NULL; -+ -+ g_hash_table_iter_init(&iter, override_hash); -+ while (g_hash_table_iter_next(&iter, (gpointer *) & name, (gpointer *) & value)) { -+ printf("Overriding the cluser configuration for '%s' with '%s' = '%s'\n", rsc->id, name, value); -+ g_hash_table_replace(op->params, strdup(name), strdup(value)); -+ } -+ } -+ - if(op == NULL) { - /* Re-run but with stderr enabled so we can display a sane error message */ - crm_enable_stderr(TRUE); diff --git a/SOURCES/003-harden-toolchain.patch b/SOURCES/003-harden-toolchain.patch new file mode 100644 index 0000000..5f5153a --- /dev/null +++ b/SOURCES/003-harden-toolchain.patch @@ -0,0 +1,603 @@ +From 658fff9445711b8402029bc2916fccbc5d6fd8fc Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= +Date: Tue, 21 Jun 2016 19:16:43 +0200 +Subject: [PATCH 1/2] Feature: conditional hardening, especially for daemons + + libraries + +So far the build system has not been concerned with run-time hardening +measures the typical toolchains provide (beside unconditional enforcing +of -fstack-protector-all). Hence make a step in that direction, +enabling following if available and anticipating more to come: + +[$LD -z relro] +- daemons incl. libs +- make some parts of Global Offset Table (GOT) read-only + +[$CC -fPIE + ld -pie] +- daemons +- benefit from Address Space Layout Randomization (ASLR) for code + areas + +[$LD -z now] +- daemons incl. libs, only when the former two features are supported +- all symbols are resolved initially to that complete GOT is read-only + +[$CC -fstack-protector-strong/-fstack-protector-all/-fstack-protector] +- universal +- extra run-time checks for buffer overflows +- NOTE: + in case -fstack-protector-strong is supported, this is effectively + a weakening of previously enforced -fstack-protector-all, but note + that this variant comes with not entirely negligible performance + penalty [1], making "strong" variant a reasonable tradeoff for + something that is not in the prime line of possible attacks + +For details on how to instruct configure script to do the right +thing (for when the default won't cut it), see detailed comment +in configure.ac under "Hardening flags" section. + +[1] http://nvlpubs.nist.gov/nistpubs/TechnicalNotes/NIST.TN.1860.pdf +--- + acinclude.m4 | 25 +++++++++ + attrd/Makefile.am | 3 + + cib/Makefile.am | 3 + + configure.ac | 135 +++++++++++++++++++++++++++++++++++++++++++-- + crmd/Makefile.am | 3 + + fencing/Makefile.am | 3 + + lib/cib/Makefile.am | 3 + + lib/cluster/Makefile.am | 4 ++ + lib/common/Makefile.am | 4 ++ + lib/fencing/Makefile.am | 4 ++ + lib/lrmd/Makefile.am | 4 ++ + lib/pengine/Makefile.am | 8 +++ + lib/services/Makefile.am | 3 + + lib/transition/Makefile.am | 3 + + lrmd/Makefile.am | 6 ++ + mcp/Makefile.am | 3 + + pacemaker.spec.in | 17 ++++++ + pengine/Makefile.am | 6 ++ + 18 files changed, 231 insertions(+), 6 deletions(-) + create mode 100644 acinclude.m4 + +diff --git a/acinclude.m4 b/acinclude.m4 +new file mode 100644 +index 0000000..ecaa1dd +--- /dev/null ++++ b/acinclude.m4 +@@ -0,0 +1,25 @@ ++dnl ++dnl local autoconf/automake macros for pacemaker ++dnl ++ ++dnl Check if the flag is supported by linker (cacheable) ++dnl CC_CHECK_LDFLAGS([FLAG], [ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND]) ++dnl ++dnl Origin (declared license: GPLv2+ with less restrictive exception): ++dnl https://git.gnome.org/browse/glib/tree/m4macros/attributes.m4?h=2.49.1 ++dnl (AC_LANG_PROGRAM substituted by Jan Pokorny ) ++ ++AC_DEFUN([CC_CHECK_LDFLAGS], [ ++ AC_CACHE_CHECK([if $CC supports $1 flag], ++ AS_TR_SH([cc_cv_ldflags_$1]), ++ [ac_save_LDFLAGS="$LDFLAGS" ++ LDFLAGS="$LDFLAGS $1" ++ AC_LINK_IFELSE([AC_LANG_PROGRAM([[]], [[]])], ++ [eval "AS_TR_SH([cc_cv_ldflags_$1])='yes'"], ++ [eval "AS_TR_SH([cc_cv_ldflags_$1])="]) ++ LDFLAGS="$ac_save_LDFLAGS" ++ ]) ++ ++ AS_IF([eval test x$]AS_TR_SH([cc_cv_ldflags_$1])[ = xyes], ++ [$2], [$3]) ++]) +diff --git a/attrd/Makefile.am b/attrd/Makefile.am +index a116e0e..6eaaae2 100644 +--- a/attrd/Makefile.am ++++ b/attrd/Makefile.am +@@ -21,6 +21,9 @@ halibdir = $(CRM_DAEMON_DIR) + halib_PROGRAMS = attrd + ## SOURCES + ++attrd_CFLAGS = $(CFLAGS_HARDENED_EXE) ++attrd_LDFLAGS = $(LDFLAGS_HARDENED_EXE) ++ + attrd_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la \ + $(top_builddir)/lib/common/libcrmcommon.la \ + $(top_builddir)/lib/cib/libcib.la \ +diff --git a/cib/Makefile.am b/cib/Makefile.am +index fcb8ce9..4273191 100644 +--- a/cib/Makefile.am ++++ b/cib/Makefile.am +@@ -32,6 +32,9 @@ halib_PROGRAMS = cib cibmon + ## SOURCES + noinst_HEADERS = callbacks.h cibio.h cibmessages.h common.h notify.h + ++cib_CFLAGS = $(CFLAGS_HARDENED_EXE) ++cib_LDFLAGS = $(LDFLAGS_HARDENED_EXE) ++ + cib_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la \ + $(COMMONLIBS) $(CRYPTOLIB) $(CLUSTERLIBS) + +diff --git a/configure.ac b/configure.ac +index c5b30dc..edf6a91 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -196,6 +196,13 @@ AC_ARG_ENABLE([systemd], + [ --enable-systemd + Do not build support for the Systemd init system [default=yes]]) + ++AC_ARG_ENABLE(hardening, ++ [ --with-hardening ++ Harden the resulting executables/libraries (best effort by default)], ++ [ HARDENING="${enableval}" ], ++ [ HARDENING=try ], ++) ++ + AC_ARG_WITH(ais, + [ --with-ais + Support the Corosync messaging and membership layer ], +@@ -1710,6 +1717,12 @@ if export | fgrep " CFLAGS=" > /dev/null; then + unset SAVED_CFLAGS + fi + ++AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries]) ++AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries]) ++ ++AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables]) ++AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables]) ++ + CC_EXTRAS="" + + if test "$GCC" != yes; then +@@ -1785,12 +1798,6 @@ dnl otherwise none of both + # Additional warnings it might be nice to enable one day + # -Wshadow + # -Wunreachable-code +- case "$host_os" in +- *solaris*) ;; +- *) EXTRA_FLAGS="$EXTRA_FLAGS +- -fstack-protector-all" +- ;; +- esac + for j in $EXTRA_FLAGS + do + if +@@ -1829,6 +1836,118 @@ dnl System specific options + AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS}) + fi + ++dnl ++dnl Hardening flags ++dnl ++dnl The prime control of whether to apply (targeted) hardening build flags and ++dnl which ones is --{enable,disable}-hardening option passed to ./configure: ++dnl ++dnl --enable-hardening=try (default): ++dnl depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE, ++dnl CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables ++dnl (see below) is set and non-null, all these custom flags (even if not ++dnl set) are used as are, otherwise the best effort is made to offer ++dnl reasonably strong hardening in several categories (RELRO, PIE, ++dnl "bind now", stack protector) according to what the selected toolchain ++dnl can offer ++dnl ++dnl --enable-hardening: ++dnl same effect as --enable-hardening=try when the environment variables ++dnl in question are suppressed ++dnl ++dnl --disable-hardening: ++dnl do not apply any targeted hardening measures at all ++dnl ++dnl The user-injected environment variables that regulate the hardening in ++dnl default case are as follows: ++dnl ++dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE ++dnl compiler and linker flags (respectively) for daemon programs ++dnl (attrd, cib, crmd, lrmd, stonithd, pacemakerd, pacemaker_remoted, ++dnl pengine) ++dnl ++dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB ++dnl compiler and linker flags (respectively) for libraries linked ++dnl with the daemon programs ++dnl ++dnl Note that these are purposedly targeted variables (addressing particular ++dnl targets all over the scattered Makefiles) and have no effect outside of ++dnl the predestined scope (e.g., CLI utilities). For a global reach, ++dnl use CFLAGS, LDFLAGS, etc. as usual. ++dnl ++dnl For guidance on the suitable flags consult, for instance: ++dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description ++dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils ++dnl ++ ++if test "x${HARDENING}" != "xtry"; then ++ unset CFLAGS_HARDENED_EXE ++ unset CFLAGS_HARDENED_LIB ++ unset LDFLAGS_HARDENED_EXE ++ unset LDFLAGS_HARDENED_LIB ++fi ++if test "x${HARDENING}" = "xno"; then ++ AC_MSG_NOTICE([Hardening: explicitly disabled]) ++elif test "x${HARDENING}" = "xyes" \ ++ || test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0; then ++ dnl We'll figure out on our own... ++ CFLAGS_HARDENED_EXE= ++ CFLAGS_HARDENED_LIB= ++ LDFLAGS_HARDENED_EXE= ++ LDFLAGS_HARDENED_LIB= ++ relro=0 ++ pie=0 ++ bindnow=0 ++ # daemons incl. libs: partial RELRO ++ flag="-Wl,-z,relro" ++ CC_CHECK_LDFLAGS(["${flag}"], ++ [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; ++ LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; ++ relro=1] ++ ) ++ # daemons: PIE for both CFLAGS and LDFLAGS ++ if cc_supports_flag -fPIE; then ++ flag="-pie" ++ CC_CHECK_LDFLAGS(["${flag}"], ++ [CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE"; ++ LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; ++ pie=1] ++ ) ++ fi ++ # daemons incl. libs: full RELRO if sensible ++ if test "${relro}" = 1 && test "${pie}" = 1; then ++ flag="-Wl,-z,now" ++ CC_CHECK_LDFLAGS(["${flag}"], ++ [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; ++ LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"; ++ bindnow=1] ++ ) ++ fi ++ # universal: prefer strong > all > default stack protector if possible ++ flag= ++ if cc_supports_flag -fstack-protector-strong; then ++ flag="-fstack-protector-strong" ++ elif cc_supports_flag -fstack-protector-all; then ++ flag="-fstack-protector-all" ++ elif cc_supports_flag -fstack-protector; then ++ flag="-fstack-protector" ++ fi ++ if test -n "${flag}"; then ++ CC_EXTRAS="${CC_EXTRAS} ${flag}" ++ stackprot=1 ++ fi ++ if test "${relro}" = 1 \ ++ || test "${pie}" = 1 \ ++ || test "${stackprot}" = 1; then ++ AC_MSG_NOTICE( ++ [Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${flag}]) ++ else ++ AC_MSG_WARN([Hardening: no suitable features in the toolchain detected]) ++ fi ++else ++ AC_MSG_NOTICE([Hardening: using custom flags]) ++fi ++ + CFLAGS="$CFLAGS $CC_EXTRAS" + + NON_FATAL_CFLAGS="$CFLAGS" +@@ -1978,5 +2097,9 @@ AC_MSG_RESULT([ HA group name = ${CRM_DAEMON_GROUP}]) + AC_MSG_RESULT([ HA user name = ${CRM_DAEMON_USER}]) + AC_MSG_RESULT([]) + AC_MSG_RESULT([ CFLAGS = ${CFLAGS}]) ++AC_MSG_RESULT([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}]) ++AC_MSG_RESULT([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}]) ++AC_MSG_RESULT([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}]) ++AC_MSG_RESULT([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}]) + AC_MSG_RESULT([ Libraries = ${LIBS}]) + AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}]) +diff --git a/crmd/Makefile.am b/crmd/Makefile.am +index 979e266..6d5ee9a 100644 +--- a/crmd/Makefile.am ++++ b/crmd/Makefile.am +@@ -28,6 +28,9 @@ noinst_HEADERS = crmd.h crmd_fsa.h crmd_messages.h fsa_defines.h \ + fsa_matrix.h fsa_proto.h crmd_utils.h crmd_callbacks.h \ + crmd_lrm.h te_callbacks.h tengine.h + ++crmd_CFLAGS = $(CFLAGS_HARDENED_EXE) ++crmd_LDFLAGS = $(LDFLAGS_HARDENED_EXE) ++ + crmd_LDADD = $(top_builddir)/lib/fencing/libstonithd.la \ + $(top_builddir)/lib/transition/libtransitioner.la \ + $(top_builddir)/lib/pengine/libpe_rules.la \ +diff --git a/fencing/Makefile.am b/fencing/Makefile.am +index 1d591fc..c53ead6 100644 +--- a/fencing/Makefile.am ++++ b/fencing/Makefile.am +@@ -52,6 +52,9 @@ stonith_admin_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ + stonithd_CPPFLAGS = -I$(top_srcdir)/pengine $(AM_CPPFLAGS) + stonithd_YFLAGS = -d + ++stonithd_CFLAGS = $(CFLAGS_HARDENED_EXE) ++stonithd_LDFLAGS = $(LDFLAGS_HARDENED_EXE) ++ + stonithd_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ + $(top_builddir)/lib/cluster/libcrmcluster.la \ + $(top_builddir)/lib/fencing/libstonithd.la \ +diff --git a/lib/cib/Makefile.am b/lib/cib/Makefile.am +index e414a7f..637ea8c 100644 +--- a/lib/cib/Makefile.am ++++ b/lib/cib/Makefile.am +@@ -27,6 +27,9 @@ libcib_la_SOURCES += cib_file.c cib_remote.c + libcib_la_LDFLAGS = -version-info 5:1:1 + libcib_la_CPPFLAGS = -I$(top_srcdir) $(AM_CPPFLAGS) + ++libcib_la_CFLAGS = $(CFLAGS_HARDENED_LIB) ++libcib_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) ++ + libcib_la_LIBADD = $(CRYPTOLIB) $(top_builddir)/lib/pengine/libpe_rules.la $(top_builddir)/lib/common/libcrmcommon.la + + clean-generic: +diff --git a/lib/cluster/Makefile.am b/lib/cluster/Makefile.am +index 06d7066..9a57bbb 100644 +--- a/lib/cluster/Makefile.am ++++ b/lib/cluster/Makefile.am +@@ -21,6 +21,10 @@ include $(top_srcdir)/Makefile.common + lib_LTLIBRARIES = libcrmcluster.la + + libcrmcluster_la_LDFLAGS = -version-info 6:0:2 ++ ++libcrmcluster_la_CFLAGS = $(CFLAGS_HARDENED_LIB) ++libcrmcluster_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) ++ + libcrmcluster_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la $(top_builddir)/lib/fencing/libstonithd.la $(CLUSTERLIBS) + + libcrmcluster_la_SOURCES = election.c cluster.c membership.c +diff --git a/lib/common/Makefile.am b/lib/common/Makefile.am +index 7550ec1..0e1ad29 100644 +--- a/lib/common/Makefile.am ++++ b/lib/common/Makefile.am +@@ -32,6 +32,10 @@ lib_LTLIBRARIES = libcrmcommon.la + CFLAGS = $(CFLAGS_COPY:-Wcast-qual=) -fPIC + + libcrmcommon_la_LDFLAGS = -version-info 9:0:6 ++ ++libcrmcommon_la_CFLAGS = $(CFLAGS_HARDENED_LIB) ++libcrmcommon_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) ++ + libcrmcommon_la_LIBADD = @LIBADD_DL@ $(GNUTLSLIBS) -lm + + libcrmcommon_la_SOURCES = compat.c digest.c ipc.c io.c procfs.c utils.c xml.c \ +diff --git a/lib/fencing/Makefile.am b/lib/fencing/Makefile.am +index 85ae40a..dc15799 100644 +--- a/lib/fencing/Makefile.am ++++ b/lib/fencing/Makefile.am +@@ -21,5 +21,9 @@ include $(top_srcdir)/Makefile.common + lib_LTLIBRARIES = libstonithd.la + + libstonithd_la_LDFLAGS = -version-info 4:1:2 ++ ++libstonithd_la_CFLAGS = $(CFLAGS_HARDENED_LIB) ++libstonithd_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) ++ + libstonithd_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la + libstonithd_la_SOURCES = st_client.c +diff --git a/lib/lrmd/Makefile.am b/lib/lrmd/Makefile.am +index 25f3d55..611675e 100644 +--- a/lib/lrmd/Makefile.am ++++ b/lib/lrmd/Makefile.am +@@ -19,6 +19,10 @@ include $(top_srcdir)/Makefile.common + lib_LTLIBRARIES = liblrmd.la + + liblrmd_la_LDFLAGS = -version-info 4:0:3 ++ ++liblrmd_la_CFLAGS = $(CFLAGS_HARDENED_LIB) ++liblrmd_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) ++ + liblrmd_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la \ + $(top_builddir)/lib/services/libcrmservice.la \ + $(top_builddir)/lib/fencing/libstonithd.la +diff --git a/lib/pengine/Makefile.am b/lib/pengine/Makefile.am +index de760c3..ad5c5c3 100644 +--- a/lib/pengine/Makefile.am ++++ b/lib/pengine/Makefile.am +@@ -24,10 +24,18 @@ lib_LTLIBRARIES = libpe_rules.la libpe_status.la + noinst_HEADERS = unpack.h variant.h + + libpe_rules_la_LDFLAGS = -version-info 2:6:0 ++ ++libpe_rules_la_CFLAGS = $(CFLAGS_HARDENED_LIB) ++libpe_rules_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) ++ + libpe_rules_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la + libpe_rules_la_SOURCES = rules.c common.c + + libpe_status_la_LDFLAGS = -version-info 11:0:1 ++ ++libpe_status_la_CFLAGS = $(CFLAGS_HARDENED_LIB) ++libpe_status_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) ++ + libpe_status_la_LIBADD = @CURSESLIBS@ $(top_builddir)/lib/common/libcrmcommon.la + libpe_status_la_SOURCES = status.c unpack.c utils.c complex.c native.c \ + group.c clone.c rules.c common.c remote.c +diff --git a/lib/services/Makefile.am b/lib/services/Makefile.am +index c789fbd..b3208c2 100644 +--- a/lib/services/Makefile.am ++++ b/lib/services/Makefile.am +@@ -27,6 +27,9 @@ libcrmservice_la_LDFLAGS = -version-info 4:1:1 + libcrmservice_la_CPPFLAGS = -DOCF_ROOT_DIR=\"@OCF_ROOT_DIR@\" $(AM_CPPFLAGS) + libcrmservice_la_CFLAGS = $(GIO_CFLAGS) + ++libcrmservice_la_CFLAGS += $(CFLAGS_HARDENED_LIB) ++libcrmservice_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) ++ + libcrmservice_la_LIBADD = $(GIO_LIBS) $(top_builddir)/lib/common/libcrmcommon.la $(DBUS_LIBS) + + libcrmservice_la_SOURCES = services.c services_linux.c +diff --git a/lib/transition/Makefile.am b/lib/transition/Makefile.am +index 9bc039e..4d6cd23 100644 +--- a/lib/transition/Makefile.am ++++ b/lib/transition/Makefile.am +@@ -25,6 +25,9 @@ lib_LTLIBRARIES = libtransitioner.la + libtransitioner_la_LDFLAGS = -version-info 2:5:0 + libtransitioner_la_CPPFLAGS = -I$(top_builddir) $(AM_CPPFLAGS) + ++libtransitioner_la_CFLAGS = $(CFLAGS_HARDENED_LIB) ++libtransitioner_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) ++ + libtransitioner_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la + libtransitioner_la_SOURCES = unpack.c graph.c utils.c + +diff --git a/lrmd/Makefile.am b/lrmd/Makefile.am +index 64df105..5846503 100644 +--- a/lrmd/Makefile.am ++++ b/lrmd/Makefile.am +@@ -30,6 +30,9 @@ if BUILD_SYSTEMD + systemdunit_DATA = pacemaker_remote.service + endif + ++lrmd_CFLAGS = $(CFLAGS_HARDENED_EXE) ++lrmd_LDFLAGS = $(LDFLAGS_HARDENED_EXE) ++ + lrmd_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ + $(top_builddir)/lib/services/libcrmservice.la \ + $(top_builddir)/lib/lrmd/liblrmd.la \ +@@ -38,6 +41,9 @@ lrmd_SOURCES = main.c lrmd.c + + pacemaker_remoted_CPPFLAGS = -DSUPPORT_REMOTE $(AM_CPPFLAGS) + ++pacemaker_remoted_CFLAGS = $(CFLAGS_HARDENED_EXE) ++pacemaker_remoted_LDFLAGS = $(LDFLAGS_HARDENED_EXE) ++ + pacemaker_remoted_LDADD = $(lrmd_LDADD) + pacemaker_remoted_SOURCES = main.c lrmd.c tls_backend.c ipc_proxy.c + +diff --git a/mcp/Makefile.am b/mcp/Makefile.am +index 195530a..074d251 100644 +--- a/mcp/Makefile.am ++++ b/mcp/Makefile.am +@@ -31,6 +31,9 @@ endif + + noinst_HEADERS = pacemaker.h + ++pacemakerd_CFLAGS = $(CFLAGS_HARDENED_EXE) ++pacemakerd_LDFLAGS = $(LDFLAGS_HARDENED_EXE) ++ + pacemakerd_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la $(top_builddir)/lib/common/libcrmcommon.la + pacemakerd_LDADD += $(CLUSTERLIBS) + pacemakerd_SOURCES = pacemaker.c corosync.c +diff --git a/pacemaker.spec.in b/pacemaker.spec.in +index 6024514..a607588 100644 +--- a/pacemaker.spec.in ++++ b/pacemaker.spec.in +@@ -63,6 +63,9 @@ + # Turn off cman support on platforms that normally ship with it + %bcond_without cman + ++# Turn off hardening of libraries and daemon executables ++%bcond_without hardening ++ + %if %{with profiling} + # This disables -debuginfo package creation and also the stripping binaries/libraries + # Useful if you want sane profiling data +@@ -168,6 +171,7 @@ resource health. + + Available rpmbuild rebuild options: + --with(out) : cman stonithd doc coverage profiling pre_release upstart_job ++ hardening + + %package cli + License: GPLv2+ and LGPLv2+ +@@ -301,6 +305,18 @@ find . -exec touch \{\} \; + # Early versions of autotools (e.g. RHEL <= 5) do not support --docdir + export docdir=%{pcmk_docdir} + ++%if %{with hardening} ++# prefer distro-provided hardening flags in case they are defined ++# through _hardening_{c,ld}flags macros, configure script will ++# use its own defaults otherwise; if such hardenings are completely ++# undesired, rpmbuild using "--without hardening" ++# (or "--define '_without_hardening 1'") ++export CFLAGS_HARDENED_EXE="%{?_hardening_cflags}" ++export CFLAGS_HARDENED_LIB="%{?_hardening_cflags}" ++export LDFLAGS_HARDENED_EXE="%{?_hardening_ldflags}" ++export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}" ++%endif ++ + ./autogen.sh + + %{configure} \ +@@ -309,6 +325,7 @@ export docdir=%{pcmk_docdir} + %{!?with_cman: --without-cman} \ + --without-heartbeat \ + %{!?with_doc: --with-brand=} \ ++ %{!?with_hardening: --disable-hardening} \ + --with-initdir=%{_initrddir} \ + --localstatedir=%{_var} \ + --with-version=%{version}-%{release} +diff --git a/pengine/Makefile.am b/pengine/Makefile.am +index 96c914f..d4dbfb9 100644 +--- a/pengine/Makefile.am ++++ b/pengine/Makefile.am +@@ -61,12 +61,18 @@ endif + noinst_HEADERS = allocate.h utils.h pengine.h + + libpengine_la_LDFLAGS = -version-info 11:0:1 ++ ++libpengine_la_CFLAGS = $(CFLAGS_HARDENED_LIB) ++libpengine_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB) ++ + libpengine_la_LIBADD = $(top_builddir)/lib/pengine/libpe_status.la \ + $(top_builddir)/lib/cib/libcib.la + # -L$(top_builddir)/lib/pils -lpils -export-dynamic -module -avoid-version + libpengine_la_SOURCES = pengine.c allocate.c utils.c constraints.c + libpengine_la_SOURCES += native.c group.c clone.c master.c graph.c utilization.c + ++pengine_CFLAGS = $(CFLAGS_HARDENED_EXE) ++pengine_LDFLAGS = $(LDFLAGS_HARDENED_EXE) + pengine_LDADD = $(top_builddir)/lib/cib/libcib.la $(COMMONLIBS) + # libcib for get_object_root() + # $(top_builddir)/lib/hbclient/libhbclient.la +-- +1.8.3.1 + + +From 35ec27112452f2bd06ae8b395d8543db935e2b05 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= +Date: Wed, 22 Jun 2016 15:18:00 +0200 +Subject: [PATCH 2/2] Build: configure.ac: prefer as-needed linking in case of + "-z now" + +Slight optimization of a default toolchain-flags-based hardening. +--- + configure.ac | 12 +++++++++++- + 1 file changed, 11 insertions(+), 1 deletion(-) + +diff --git a/configure.ac b/configure.ac +index edf6a91..4beb877 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -1914,7 +1914,10 @@ elif test "x${HARDENING}" = "xyes" \ + pie=1] + ) + fi +- # daemons incl. libs: full RELRO if sensible ++ # daemons incl. libs: full RELRO if sensible + as-needed linking ++ # so as to possibly mitigate startup performance ++ # hit caused by excessive linking with unneeded ++ # libraries + if test "${relro}" = 1 && test "${pie}" = 1; then + flag="-Wl,-z,now" + CC_CHECK_LDFLAGS(["${flag}"], +@@ -1923,6 +1926,13 @@ elif test "x${HARDENING}" = "xyes" \ + bindnow=1] + ) + fi ++ if test "${bindnow}" = 1; then ++ flag="-Wl,--as-needed" ++ CC_CHECK_LDFLAGS(["${flag}"], ++ [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}"; ++ LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"] ++ ) ++ fi + # universal: prefer strong > all > default stack protector if possible + flag= + if cc_supports_flag -fstack-protector-strong; then +-- +1.8.3.1 + diff --git a/SOURCES/0030-Log-lrmd-Improved-logging-when-no-pacemaker-remote-a.patch b/SOURCES/0030-Log-lrmd-Improved-logging-when-no-pacemaker-remote-a.patch deleted file mode 100644 index 6bff962..0000000 --- a/SOURCES/0030-Log-lrmd-Improved-logging-when-no-pacemaker-remote-a.patch +++ /dev/null @@ -1,34 +0,0 @@ -From: Andrew Beekhof -Date: Thu, 17 Sep 2015 14:43:15 +1000 -Subject: [PATCH] Log: lrmd: Improved logging when no pacemaker remote authkey - is available - -(cherry picked from commit 20c2178f076ff32fdf9ba9a467c193b8dac2f9e5) ---- - lib/lrmd/lrmd_client.c | 8 ++++++-- - 1 file changed, 6 insertions(+), 2 deletions(-) - -diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c -index 42bdf2b..1f1ffde 100644 ---- a/lib/lrmd/lrmd_client.c -+++ b/lib/lrmd/lrmd_client.c -@@ -1061,13 +1061,17 @@ lrmd_tls_set_key(gnutls_datum_t * key) - if (set_key(key, specific_location) == 0) { - crm_debug("Using custom authkey location %s", specific_location); - return 0; -+ -+ } else { -+ crm_err("No lrmd remote key found at %s, trying default locations", specific_location); - } - -- if (set_key(key, DEFAULT_REMOTE_KEY_LOCATION)) { -+ if (set_key(key, DEFAULT_REMOTE_KEY_LOCATION) != 0) { - rc = set_key(key, ALT_REMOTE_KEY_LOCATION); - } -+ - if (rc) { -- crm_err("No lrmd remote key found"); -+ crm_err("No lrmd remote key found at %s", DEFAULT_REMOTE_KEY_LOCATION); - return -1; - } - diff --git a/SOURCES/0031-Fix-liblrmd-don-t-print-error-if-remote-key-environm.patch b/SOURCES/0031-Fix-liblrmd-don-t-print-error-if-remote-key-environm.patch deleted file mode 100644 index 0210482..0000000 --- a/SOURCES/0031-Fix-liblrmd-don-t-print-error-if-remote-key-environm.patch +++ /dev/null @@ -1,38 +0,0 @@ -From: Ken Gaillot -Date: Wed, 23 Sep 2015 10:45:39 -0500 -Subject: [PATCH] Fix: liblrmd: don't print error if remote key environment - variable unset - -20c2178 added error logging if the remote key was unable to be read, -however it would also log an error in the usual case where the -environment variable was simply unset. - -(cherry picked from commit dec3349f1252e2c2c18ed110b8cc4a2b2212b613) ---- - lib/lrmd/lrmd_client.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c -index 1f1ffde..f365e59 100644 ---- a/lib/lrmd/lrmd_client.c -+++ b/lib/lrmd/lrmd_client.c -@@ -1062,8 +1062,8 @@ lrmd_tls_set_key(gnutls_datum_t * key) - crm_debug("Using custom authkey location %s", specific_location); - return 0; - -- } else { -- crm_err("No lrmd remote key found at %s, trying default locations", specific_location); -+ } else if (specific_location) { -+ crm_err("No valid lrmd remote key found at %s, trying default location", specific_location); - } - - if (set_key(key, DEFAULT_REMOTE_KEY_LOCATION) != 0) { -@@ -1071,7 +1071,7 @@ lrmd_tls_set_key(gnutls_datum_t * key) - } - - if (rc) { -- crm_err("No lrmd remote key found at %s", DEFAULT_REMOTE_KEY_LOCATION); -+ crm_err("No valid lrmd remote key found at %s", DEFAULT_REMOTE_KEY_LOCATION); - return -1; - } - diff --git a/SOURCES/0032-Fix-Tools-Repair-the-logging-of-interesting-command-.patch b/SOURCES/0032-Fix-Tools-Repair-the-logging-of-interesting-command-.patch deleted file mode 100644 index fda67b2..0000000 --- a/SOURCES/0032-Fix-Tools-Repair-the-logging-of-interesting-command-.patch +++ /dev/null @@ -1,182 +0,0 @@ -From: Andrew Beekhof -Date: Mon, 28 Sep 2015 14:54:28 +1000 -Subject: [PATCH] Fix: Tools: Repair the logging of 'interesting' command-lines - -(cherry picked from commit b7d6608d8b33b4e9580e04f25446176bac832fb7) ---- - tools/attrd_updater.c | 1 + - tools/cibadmin.c | 8 ++++++-- - tools/crm_attribute.c | 6 +++++- - tools/crm_resource.c | 30 +++++++++++++++++++++++------- - 4 files changed, 35 insertions(+), 10 deletions(-) - -diff --git a/tools/attrd_updater.c b/tools/attrd_updater.c -index 878dab5..11462ee 100644 ---- a/tools/attrd_updater.c -+++ b/tools/attrd_updater.c -@@ -150,6 +150,7 @@ main(int argc, char **argv) - case 'v': - command = flag; - attr_value = optarg; -+ crm_log_args(argc, argv); /* Too much? */ - break; - default: - ++argerr; -diff --git a/tools/cibadmin.c b/tools/cibadmin.c -index 6b90536..c16d3c7 100644 ---- a/tools/cibadmin.c -+++ b/tools/cibadmin.c -@@ -213,7 +213,7 @@ main(int argc, char **argv) - int option_index = 0; - - crm_xml_init(); /* Sets buffer allocation strategy */ -- crm_log_preinit(NULL, argc, argv); -+ crm_log_cli_init("cibadmin"); - crm_set_options(NULL, "command [options] [data]", long_options, - "Provides direct access to the cluster configuration." - "\n\nAllows the configuration, or sections of it, to be queried, modified, replaced and deleted." -@@ -286,6 +286,7 @@ main(int argc, char **argv) - break; - case 'B': - cib_action = CIB_OP_BUMP; -+ crm_log_args(argc, argv); - break; - case 'V': - command_options = command_options | cib_verbose; -@@ -303,13 +304,16 @@ main(int argc, char **argv) - case 'X': - crm_trace("Option %c => %s", flag, optarg); - admin_input_xml = optarg; -+ crm_log_args(argc, argv); - break; - case 'x': - crm_trace("Option %c => %s", flag, optarg); - admin_input_file = optarg; -+ crm_log_args(argc, argv); - break; - case 'p': - admin_input_stdin = TRUE; -+ crm_log_args(argc, argv); - break; - case 'N': - case 'h': -@@ -334,6 +338,7 @@ main(int argc, char **argv) - case 'f': - force_flag = TRUE; - command_options |= cib_quorum_override; -+ crm_log_args(argc, argv); - break; - case 'a': - output = createEmptyCib(1); -@@ -355,7 +360,6 @@ main(int argc, char **argv) - quiet = FALSE; - } - -- crm_log_init(NULL, LOG_CRIT, FALSE, FALSE, argc, argv, quiet); - while (bump_log_num > 0) { - crm_bump_log_level(argc, argv); - bump_log_num--; -diff --git a/tools/crm_attribute.c b/tools/crm_attribute.c -index c37b096..fc2f7c7 100644 ---- a/tools/crm_attribute.c -+++ b/tools/crm_attribute.c -@@ -146,11 +146,15 @@ main(int argc, char **argv) - case '?': - crm_help(flag, EX_OK); - break; -- case 'D': - case 'G': -+ command = flag; -+ attr_value = optarg; -+ break; -+ case 'D': - case 'v': - command = flag; - attr_value = optarg; -+ crm_log_args(argc, argv); - break; - case 'q': - case 'Q': -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 2a94362..1b2976b 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -304,6 +304,7 @@ main(int argc, char **argv) - || safe_str_eq("force-check", longname)) { - rsc_cmd = flag; - rsc_long_cmd = longname; -+ crm_log_args(argc, argv); - - } else if (safe_str_eq("list-ocf-providers", longname) - || safe_str_eq("list-ocf-alternatives", longname) -@@ -433,6 +434,7 @@ main(int argc, char **argv) - break; - case 'f': - do_force = TRUE; -+ crm_log_args(argc, argv); - break; - case 'i': - prop_id = optarg; -@@ -452,41 +454,55 @@ main(int argc, char **argv) - case 'T': - timeout_ms = crm_get_msec(optarg); - break; -+ - case 'C': - case 'R': - case 'P': -- rsc_cmd = 'C'; -+ crm_log_args(argc, argv); - require_resource = FALSE; - require_crmd = TRUE; -+ rsc_cmd = 'C'; - break; -+ - case 'F': -- rsc_cmd = flag; -+ crm_log_args(argc, argv); - require_crmd = TRUE; -+ rsc_cmd = flag; -+ break; -+ -+ case 'U': -+ case 'B': -+ case 'M': -+ case 'D': -+ crm_log_args(argc, argv); -+ rsc_cmd = flag; - break; -+ - case 'L': - case 'c': - case 'l': - case 'q': - case 'w': -- case 'D': - case 'W': -- case 'M': -- case 'U': -- case 'B': - case 'O': - case 'o': - case 'A': - case 'a': - rsc_cmd = flag; - break; -+ - case 'j': - print_pending = TRUE; - break; - case 'p': -- case 'g': - case 'd': - case 'S': -+ crm_log_args(argc, argv); -+ prop_name = optarg; -+ rsc_cmd = flag; -+ break; - case 'G': -+ case 'g': - prop_name = optarg; - rsc_cmd = flag; - break; diff --git a/SOURCES/0033-Feature-Tools-Do-not-send-command-lines-to-syslog.patch b/SOURCES/0033-Feature-Tools-Do-not-send-command-lines-to-syslog.patch deleted file mode 100644 index c01d782..0000000 --- a/SOURCES/0033-Feature-Tools-Do-not-send-command-lines-to-syslog.patch +++ /dev/null @@ -1,46 +0,0 @@ -From: Andrew Beekhof -Date: Mon, 28 Sep 2015 15:02:10 +1000 -Subject: [PATCH] Feature: Tools: Do not send command lines to syslog - -(cherry picked from commit 8dae6838312c6a60c2e4b7ffa73a100fd5d0dce3) ---- - lib/common/logging.c | 8 -------- - 1 file changed, 8 deletions(-) - -diff --git a/lib/common/logging.c b/lib/common/logging.c -index b18b841..6879023 100644 ---- a/lib/common/logging.c -+++ b/lib/common/logging.c -@@ -928,24 +928,17 @@ crm_log_args(int argc, char **argv) - { - int lpc = 0; - int len = 0; -- int restore = FALSE; - int existing_len = 0; - int line = __LINE__; - static int logged = 0; - - char *arg_string = NULL; -- struct qb_log_callsite *args_cs = -- qb_log_callsite_get(__func__, __FILE__, ARGS_FMT, LOG_NOTICE, line, 0); - - if (argc == 0 || argv == NULL || logged) { - return; - } - - logged = 1; -- qb_bit_set(args_cs->targets, QB_LOG_SYSLOG); /* Turn on syslog too */ -- -- restore = qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_STATE_GET, 0); -- qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_ENABLED, QB_TRUE); - - for (; lpc < argc; lpc++) { - if (argv[lpc] == NULL) { -@@ -958,7 +951,6 @@ crm_log_args(int argc, char **argv) - } - - qb_log_from_external_source(__func__, __FILE__, ARGS_FMT, LOG_NOTICE, line, 0, arg_string); -- qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_ENABLED, restore); - - free(arg_string); - } diff --git a/SOURCES/0034-Log-cibadmin-Default-once-again-to-LOG_CRIT.patch b/SOURCES/0034-Log-cibadmin-Default-once-again-to-LOG_CRIT.patch deleted file mode 100644 index ccc3f1e..0000000 --- a/SOURCES/0034-Log-cibadmin-Default-once-again-to-LOG_CRIT.patch +++ /dev/null @@ -1,21 +0,0 @@ -From: Andrew Beekhof -Date: Mon, 28 Sep 2015 18:45:32 +1000 -Subject: [PATCH] Log: cibadmin: Default once again to LOG_CRIT - -(cherry picked from commit d0d6118cbee3eccb3467058eadd91e08d3f4a42f) ---- - tools/cibadmin.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/tools/cibadmin.c b/tools/cibadmin.c -index c16d3c7..84531f8 100644 ---- a/tools/cibadmin.c -+++ b/tools/cibadmin.c -@@ -214,6 +214,7 @@ main(int argc, char **argv) - - crm_xml_init(); /* Sets buffer allocation strategy */ - crm_log_cli_init("cibadmin"); -+ set_crm_log_level(LOG_CRIT); - crm_set_options(NULL, "command [options] [data]", long_options, - "Provides direct access to the cluster configuration." - "\n\nAllows the configuration, or sections of it, to be queried, modified, replaced and deleted." diff --git a/SOURCES/0035-Fix-crm_resource-Correctly-update-existing-meta-attr.patch b/SOURCES/0035-Fix-crm_resource-Correctly-update-existing-meta-attr.patch deleted file mode 100644 index 33670ac..0000000 --- a/SOURCES/0035-Fix-crm_resource-Correctly-update-existing-meta-attr.patch +++ /dev/null @@ -1,87 +0,0 @@ -From: Andrew Beekhof -Date: Wed, 30 Sep 2015 17:33:00 +1000 -Subject: [PATCH] Fix: crm_resource: Correctly update existing meta attributes - regardless of their position in the heirarchy - -(cherry picked from commit f367348c832c64e2dc480dc96d2e0c2aa88639ba) - -Conflicts: - tools/crm_resource_runtime.c ---- - tools/crm_resource_runtime.c | 44 ++++++++++++++++++++++++++++++++++++-------- - 1 file changed, 36 insertions(+), 8 deletions(-) - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index ce9db01..a04adb9 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -213,10 +213,11 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch - } - - if (safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) { -- rc = find_resource_attr(cib, XML_ATTR_ID, uber_parent(rsc)->id, XML_TAG_META_SETS, attr_set, attr_id, -- attr_name, &local_attr_id); -- if(rc == pcmk_ok && do_force == FALSE) { -- if (BE_QUIET == FALSE) { -+ if (do_force == FALSE) { -+ rc = find_resource_attr(cib, XML_ATTR_ID, uber_parent(rsc)->id, -+ XML_TAG_META_SETS, attr_set, attr_id, -+ attr_name, &local_attr_id); -+ if (rc == pcmk_ok && BE_QUIET == FALSE) { - printf("WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)\n", - uber_parent(rsc)->id, attr_name, local_attr_id); - printf(" Delete '%s' first or use --force to override\n", local_attr_id); -@@ -224,7 +225,7 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch - return -ENOTUNIQ; - } - -- } else if(rsc->parent) { -+ } else if(rsc->parent && do_force == FALSE) { - - switch(rsc->parent->variant) { - case pe_group: -@@ -234,14 +235,41 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch - break; - case pe_master: - case pe_clone: -- rsc = rsc->parent; -- if (BE_QUIET == FALSE) { -- printf("Updating '%s' for '%s'...\n", rsc->id, rsc_id); -+ -+ rc = find_resource_attr(cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); -+ free(local_attr_id); -+ -+ if(rc != pcmk_ok) { -+ rsc = rsc->parent; -+ if (BE_QUIET == FALSE) { -+ printf("Updating '%s' on '%s', the parent of '%s'\n", attr_name, rsc->id, rsc_id); -+ } - } - break; - default: - break; - } -+ -+ } else if (rsc->parent && BE_QUIET == FALSE) { -+ printf("Forcing update of '%s' for '%s' instead of '%s'\n", attr_name, rsc_id, rsc->parent->id); -+ -+ } else if(rsc->parent == NULL && rsc->children) { -+ resource_t *child = rsc->children->data; -+ -+ if(child->variant == pe_native) { -+ lookup_id = clone_strip(child->id); /* Could be a cloned group! */ -+ rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id); -+ -+ if(rc == pcmk_ok) { -+ rsc = child; -+ if (BE_QUIET == FALSE) { -+ printf("A value for '%s' already exists in child '%s', updating that instead of '%s'\n", attr_name, lookup_id, rsc_id); -+ } -+ } -+ -+ free(local_attr_id); -+ free(lookup_id); -+ } - } - - lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */ diff --git a/SOURCES/0036-Log-crm_resource-restart-Improved-user-feedback-on-f.patch b/SOURCES/0036-Log-crm_resource-restart-Improved-user-feedback-on-f.patch deleted file mode 100644 index 4dded82..0000000 --- a/SOURCES/0036-Log-crm_resource-restart-Improved-user-feedback-on-f.patch +++ /dev/null @@ -1,27 +0,0 @@ -From: Andrew Beekhof -Date: Mon, 5 Oct 2015 12:27:59 +1100 -Subject: [PATCH] Log: crm_resource --restart: Improved user feedback on - failure - -(cherry picked from commit b557a39973a1fb85b2791be67dc03cfd32c22d89) ---- - tools/crm_resource_runtime.c | 6 ++++++ - 1 file changed, 6 insertions(+) - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index a04adb9..878fd0b 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -1040,6 +1040,12 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - pe_working_set_t data_set; - - if(resource_is_running_on(rsc, host) == FALSE) { -+ const char *id = rsc->clone_name?rsc->clone_name:rsc->id; -+ if(host) { -+ printf("%s is not running on %s and so cannot be restarted\n", id, host); -+ } else { -+ printf("%s is not running anywhere and so cannot be restarted\n", id); -+ } - return -ENXIO; - } - diff --git a/SOURCES/0037-Fix-crm_resource-Correctly-delete-existing-meta-attr.patch b/SOURCES/0037-Fix-crm_resource-Correctly-delete-existing-meta-attr.patch deleted file mode 100644 index 5699706..0000000 --- a/SOURCES/0037-Fix-crm_resource-Correctly-delete-existing-meta-attr.patch +++ /dev/null @@ -1,179 +0,0 @@ -From: "Gao,Yan" -Date: Wed, 30 Sep 2015 16:59:43 +0200 -Subject: [PATCH] Fix: crm_resource: Correctly delete existing meta attributes - regardless of their position in the heirarchy - -Use the same logics as "--set-parameter" for "--delete-parameter". - -(cherry picked from commit cdee10c7310ab433b006126bc087f6b8dff3843e) - -Conflicts: - tools/crm_resource_runtime.c ---- - tools/crm_resource_runtime.c | 109 ++++++++++++++++++++++--------------------- - 1 file changed, 55 insertions(+), 54 deletions(-) - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index 878fd0b..2d51e88 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -190,47 +190,20 @@ find_resource_attr(cib_t * the_cib, const char *attr, const char *rsc, const cha - return rc; - } - --int --cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const char *attr_id, -- const char *attr_name, const char *attr_value, bool recursive, -- cib_t * cib, pe_working_set_t * data_set) -+static resource_t * -+find_matching_attr_resource(resource_t * rsc, const char * rsc_id, const char * attr_set, const char * attr_id, -+ const char * attr_name, cib_t * cib, const char * cmd) - { - int rc = pcmk_ok; -- static bool need_init = TRUE; -- - char *lookup_id = NULL; - char *local_attr_id = NULL; -- char *local_attr_set = NULL; -- -- xmlNode *xml_top = NULL; -- xmlNode *xml_obj = NULL; -- -- bool use_attributes_tag = FALSE; -- resource_t *rsc = find_rsc_or_clone(rsc_id, data_set); -- -- if (rsc == NULL) { -- return -ENXIO; -- } -- -- if (safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) { -- if (do_force == FALSE) { -- rc = find_resource_attr(cib, XML_ATTR_ID, uber_parent(rsc)->id, -- XML_TAG_META_SETS, attr_set, attr_id, -- attr_name, &local_attr_id); -- if (rc == pcmk_ok && BE_QUIET == FALSE) { -- printf("WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)\n", -- uber_parent(rsc)->id, attr_name, local_attr_id); -- printf(" Delete '%s' first or use --force to override\n", local_attr_id); -- } -- return -ENOTUNIQ; -- } - -- } else if(rsc->parent && do_force == FALSE) { -+ if(rsc->parent && do_force == FALSE) { - - switch(rsc->parent->variant) { - case pe_group: - if (BE_QUIET == FALSE) { -- printf("Updating '%s' for '%s' will not apply to its peers in '%s'\n", attr_name, rsc_id, rsc->parent->id); -+ printf("Performing %s of '%s' for '%s' will not apply to its peers in '%s'\n", cmd, attr_name, rsc_id, rsc->parent->id); - } - break; - case pe_master: -@@ -242,7 +215,7 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch - if(rc != pcmk_ok) { - rsc = rsc->parent; - if (BE_QUIET == FALSE) { -- printf("Updating '%s' on '%s', the parent of '%s'\n", attr_name, rsc->id, rsc_id); -+ printf("Performing %s of '%s' on '%s', the parent of '%s'\n", cmd, attr_name, rsc->id, rsc_id); - } - } - break; -@@ -251,7 +224,7 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch - } - - } else if (rsc->parent && BE_QUIET == FALSE) { -- printf("Forcing update of '%s' for '%s' instead of '%s'\n", attr_name, rsc_id, rsc->parent->id); -+ printf("Forcing %s of '%s' for '%s' instead of '%s'\n", cmd, attr_name, rsc_id, rsc->parent->id); - - } else if(rsc->parent == NULL && rsc->children) { - resource_t *child = rsc->children->data; -@@ -263,7 +236,7 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch - if(rc == pcmk_ok) { - rsc = child; - if (BE_QUIET == FALSE) { -- printf("A value for '%s' already exists in child '%s', updating that instead of '%s'\n", attr_name, lookup_id, rsc_id); -+ printf("A value for '%s' already exists in child '%s', performing %s on that instead of '%s'\n", attr_name, lookup_id, cmd, rsc_id); - } - } - -@@ -272,6 +245,51 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch - } - } - -+ return rsc; -+} -+ -+int -+cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const char *attr_id, -+ const char *attr_name, const char *attr_value, bool recursive, -+ cib_t * cib, pe_working_set_t * data_set) -+{ -+ int rc = pcmk_ok; -+ static bool need_init = TRUE; -+ -+ char *lookup_id = NULL; -+ char *local_attr_id = NULL; -+ char *local_attr_set = NULL; -+ -+ xmlNode *xml_top = NULL; -+ xmlNode *xml_obj = NULL; -+ -+ bool use_attributes_tag = FALSE; -+ resource_t *rsc = find_rsc_or_clone(rsc_id, data_set); -+ -+ if (rsc == NULL) { -+ return -ENXIO; -+ } -+ -+ if (safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) { -+ if (do_force == FALSE) { -+ rc = find_resource_attr(cib, XML_ATTR_ID, uber_parent(rsc)->id, -+ XML_TAG_META_SETS, attr_set, attr_id, -+ attr_name, &local_attr_id); -+ if (rc == pcmk_ok && BE_QUIET == FALSE) { -+ printf("WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)\n", -+ uber_parent(rsc)->id, attr_name, local_attr_id); -+ printf(" Delete '%s' first or use --force to override\n", local_attr_id); -+ } -+ free(local_attr_id); -+ if (rc == pcmk_ok) { -+ return -ENOTUNIQ; -+ } -+ } -+ -+ } else { -+ rsc = find_matching_attr_resource(rsc, rsc_id, attr_set, attr_id, attr_name, cib, "update"); -+ } -+ - lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */ - rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, - &local_attr_id); -@@ -401,25 +419,8 @@ cli_resource_delete_attribute(const char *rsc_id, const char *attr_set, const ch - return -ENXIO; - } - -- if(rsc->parent && safe_str_eq(attr_set_type, XML_TAG_META_SETS)) { -- -- switch(rsc->parent->variant) { -- case pe_group: -- if (BE_QUIET == FALSE) { -- printf("Removing '%s' for '%s' will not apply to its peers in '%s'\n", attr_name, rsc_id, rsc->parent->id); -- } -- break; -- case pe_master: -- case pe_clone: -- rsc = rsc->parent; -- if (BE_QUIET == FALSE) { -- printf("Removing '%s' from '%s' for '%s'...\n", attr_name, rsc->id, rsc_id); -- } -- break; -- default: -- break; -- } -- -+ if(safe_str_eq(attr_set_type, XML_TAG_META_SETS)) { -+ rsc = find_matching_attr_resource(rsc, rsc_id, attr_set, attr_id, attr_name, cib, "delete"); - } - - lookup_id = clone_strip(rsc->id); diff --git a/SOURCES/0038-Fix-crm_resource-Correctly-observe-force-when-deleti.patch b/SOURCES/0038-Fix-crm_resource-Correctly-observe-force-when-deleti.patch deleted file mode 100644 index f5aaaea..0000000 --- a/SOURCES/0038-Fix-crm_resource-Correctly-observe-force-when-deleti.patch +++ /dev/null @@ -1,75 +0,0 @@ -From: Andrew Beekhof -Date: Thu, 8 Oct 2015 13:38:07 +1100 -Subject: [PATCH] Fix: crm_resource: Correctly observe --force when deleting - and updating attributes - -(cherry picked from commit bd232e36403ea807635cabd336d8bb3101710891) ---- - tools/crm_resource_runtime.c | 25 +++++++++++++++++++++---- - 1 file changed, 21 insertions(+), 4 deletions(-) - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index 2d51e88..c3f5275 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -123,8 +123,9 @@ find_resource_attr(cib_t * the_cib, const char *attr, const char *rsc, const cha - xmlNode *xml_search = NULL; - char *xpath_string = NULL; - -- CRM_ASSERT(value != NULL); -- *value = NULL; -+ if(value) { -+ *value = NULL; -+ } - - if(the_cib == NULL) { - return -ENOTCONN; -@@ -176,7 +177,7 @@ find_resource_attr(cib_t * the_cib, const char *attr, const char *rsc, const cha - crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child)); - } - -- } else { -+ } else if(value) { - const char *tmp = crm_element_value(xml_search, attr); - - if (tmp) { -@@ -198,8 +199,10 @@ find_matching_attr_resource(resource_t * rsc, const char * rsc_id, const char * - char *lookup_id = NULL; - char *local_attr_id = NULL; - -- if(rsc->parent && do_force == FALSE) { -+ if(do_force == TRUE) { -+ return rsc; - -+ } else if(rsc->parent) { - switch(rsc->parent->variant) { - case pe_group: - if (BE_QUIET == FALSE) { -@@ -270,6 +273,13 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch - return -ENXIO; - } - -+ if(attr_id == NULL -+ && do_force == FALSE -+ && pcmk_ok != find_resource_attr( -+ cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL)) { -+ printf("\n"); -+ } -+ - if (safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) { - if (do_force == FALSE) { - rc = find_resource_attr(cib, XML_ATTR_ID, uber_parent(rsc)->id, -@@ -419,6 +429,13 @@ cli_resource_delete_attribute(const char *rsc_id, const char *attr_set, const ch - return -ENXIO; - } - -+ if(attr_id == NULL -+ && do_force == FALSE -+ && find_resource_attr( -+ cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) != pcmk_ok) { -+ printf("\n"); -+ } -+ - if(safe_str_eq(attr_set_type, XML_TAG_META_SETS)) { - rsc = find_matching_attr_resource(rsc, rsc_id, attr_set, attr_id, attr_name, cib, "delete"); - } diff --git a/SOURCES/0039-prevent-segfault-when-logging.patch b/SOURCES/0039-prevent-segfault-when-logging.patch deleted file mode 100644 index 5764033..0000000 --- a/SOURCES/0039-prevent-segfault-when-logging.patch +++ /dev/null @@ -1,25 +0,0 @@ -From 6c495a49d444404d0ed3fe910ace58befd2db8dc Mon Sep 17 00:00:00 2001 -From: Andrew Beekhof -Date: Wed, 18 Nov 2015 08:48:57 +1100 -Subject: [PATCH] Fix: systemd: Prevent segfaul when logging failed operations - ---- - lib/services/systemd.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/lib/services/systemd.c b/lib/services/systemd.c -index 3d5a600..a851bc6 100644 ---- a/lib/services/systemd.c -+++ b/lib/services/systemd.c -@@ -442,7 +442,7 @@ systemd_exec_result(DBusMessage *reply, svc_action_t *op) - - /* ignore "already started" or "not running" errors */ - if (!systemd_mask_error(op, error.name)) { -- crm_err("Could not issue %s for %s: %s (%s)", op->action, op->rsc, error.message); -+ crm_err("Could not issue %s for %s: %s", op->action, op->rsc, error.message); - } - - } else { --- -1.8.3.1 - diff --git a/SOURCES/004-bz1290592.patch b/SOURCES/004-bz1290592.patch new file mode 100644 index 0000000..0299a77 --- /dev/null +++ b/SOURCES/004-bz1290592.patch @@ -0,0 +1,1134 @@ +From f6ffb93edb68fc20d9fb6a1324bc724ecb131617 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 18 Jul 2016 14:55:12 -0500 +Subject: [PATCH 1/4] Feature: libpengine: allow pe_order_same_node option for + constraints + +With this option, a constraint between two actions applies only if they are +scheduled on the same node. +--- + include/crm/pengine/status.h | 1 + + pengine/graph.c | 11 +++++++++++ + 2 files changed, 12 insertions(+) + +diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h +index d9f2ca5..94aa832 100644 +--- a/include/crm/pengine/status.h ++++ b/include/crm/pengine/status.h +@@ -397,6 +397,7 @@ enum pe_ordering { + pe_order_restart = 0x1000, /* 'then' is runnable if 'first' is optional or runnable */ + pe_order_stonith_stop = 0x2000, /* only applies if the action is non-pseudo */ + pe_order_serialize_only = 0x4000, /* serialize */ ++ pe_order_same_node = 0x8000, /* applies only if 'first' and 'then' are on same node */ + + pe_order_implies_first_printed = 0x10000, /* Like ..implies_first but only ensures 'first' is printed, not mandatory */ + pe_order_implies_then_printed = 0x20000, /* Like ..implies_then but only ensures 'then' is printed, not mandatory */ +diff --git a/pengine/graph.c b/pengine/graph.c +index 0b7252d..9bc6731 100644 +--- a/pengine/graph.c ++++ b/pengine/graph.c +@@ -509,6 +509,17 @@ update_action(action_t * then) + } + } + ++ /* Disable constraint if it only applies when on same node, but isn't */ ++ if (is_set(other->type, pe_order_same_node) ++ && (first_node->details != then_node->details)) { ++ ++ crm_trace("Disabled constraint %s on %s -> %s on %s", ++ other->action->uuid, first_node->details->uname, ++ then->uuid, then_node->details->uname); ++ other->type = pe_order_none; ++ continue; ++ } ++ + clear_bit(changed, pe_graph_updated_first); + + if (first->rsc != then->rsc +-- +1.8.3.1 + + +From 48622e7462f8a9bbb94d9cc925133f3afaa52629 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Mon, 18 Jul 2016 16:25:56 -0500 +Subject: [PATCH 2/4] Fix: pengine: avoid transition loop for start-then-stop + + unfencing + +Partial fix +--- + include/crm/pengine/status.h | 2 +- + pengine/native.c | 37 ++++++++++++++++++++++++------------- + 2 files changed, 25 insertions(+), 14 deletions(-) + +diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h +index 94aa832..c376c73 100644 +--- a/include/crm/pengine/status.h ++++ b/include/crm/pengine/status.h +@@ -206,7 +206,7 @@ struct node_s { + # define pe_rsc_needs_quorum 0x10000000ULL + # define pe_rsc_needs_fencing 0x20000000ULL + # define pe_rsc_needs_unfencing 0x40000000ULL +-# define pe_rsc_have_unfencing 0x80000000ULL ++# define pe_rsc_have_unfencing 0x80000000ULL /* obsolete (not set or used by cluster) */ + + enum pe_graph_flags { + pe_graph_none = 0x00000, +diff --git a/pengine/native.c b/pengine/native.c +index 9f659ef..9d9a2da 100644 +--- a/pengine/native.c ++++ b/pengine/native.c +@@ -1342,30 +1342,41 @@ native_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) + + if (is_stonith == FALSE + && is_set(data_set->flags, pe_flag_enable_unfencing) +- && is_set(rsc->flags, pe_rsc_needs_unfencing) +- && is_not_set(rsc->flags, pe_rsc_have_unfencing)) { ++ && is_set(rsc->flags, pe_rsc_needs_unfencing)) { + /* Check if the node needs to be unfenced first */ + node_t *node = NULL; + GHashTableIter iter; + +- if(rsc != top) { +- /* Only create these constraints once, rsc is almost certainly cloned */ +- set_bit_recursive(top, pe_rsc_have_unfencing); +- } +- + g_hash_table_iter_init(&iter, rsc->allowed_nodes); + while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { + action_t *unfence = pe_fence_op(node, "on", TRUE, data_set); + +- custom_action_order(top, generate_op_key(top->id, top == rsc?RSC_STOP:RSC_STOPPED, 0), NULL, +- NULL, strdup(unfence->uuid), unfence, +- pe_order_optional, data_set); ++ crm_debug("Ordering any stops of %s before %s, and any starts after", ++ rsc->id, unfence->uuid); + +- crm_debug("Stopping %s prior to unfencing %s", top->id, unfence->uuid); ++ /* ++ * It would be more efficient to order clone resources once, ++ * rather than order each instance, but ordering the instance ++ * allows us to avoid unnecessary dependencies that might conflict ++ * with user constraints. ++ * ++ * @TODO: This constraint can still produce a transition loop if the ++ * resource has a stop scheduled on the node being unfenced, and ++ * there is a user ordering constraint to start some other resource ++ * (which will be ordered after the unfence) before stopping this ++ * resource. An example is "start some slow-starting cloned service ++ * before stopping an associated virtual IP that may be moving to ++ * it": ++ * stop this -> unfencing -> start that -> stop this ++ */ ++ custom_action_order(rsc, stop_key(rsc), NULL, ++ NULL, strdup(unfence->uuid), unfence, ++ pe_order_optional|pe_order_same_node, data_set); + + custom_action_order(NULL, strdup(unfence->uuid), unfence, +- top, generate_op_key(top->id, RSC_START, 0), NULL, +- pe_order_implies_then_on_node, data_set); ++ rsc, start_key(rsc), NULL, ++ pe_order_implies_then_on_node|pe_order_same_node, ++ data_set); + } + } + +-- +1.8.3.1 + + +From 1122b1866f496124b346b75ff955be240553d28c Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 19 Jul 2016 15:10:27 -0500 +Subject: [PATCH 3/4] Test: pengine: update regression tests for new unfence + ordering + +--- + pengine/test10/unfence-definition.dot | 14 ++++----- + pengine/test10/unfence-definition.exp | 34 +++++++++------------ + pengine/test10/unfence-definition.summary | 12 ++++---- + pengine/test10/unfence-parameters.dot | 21 ++++++------- + pengine/test10/unfence-parameters.exp | 51 +++++++++++++------------------ + pengine/test10/unfence-parameters.summary | 14 ++++----- + pengine/test10/unfence-startup.dot | 4 +-- + pengine/test10/unfence-startup.exp | 12 ++++---- + pengine/test10/unfence-startup.summary | 2 +- + 9 files changed, 72 insertions(+), 92 deletions(-) + +diff --git a/pengine/test10/unfence-definition.dot b/pengine/test10/unfence-definition.dot +index 1ae2367..a9e7e6b 100644 +--- a/pengine/test10/unfence-definition.dot ++++ b/pengine/test10/unfence-definition.dot +@@ -12,8 +12,6 @@ digraph "g" { + "clvmd-clone_stop_0" [ style=bold color="green" fontcolor="orange"] + "clvmd-clone_stopped_0" -> "clvmd-clone_start_0" [ style = bold] + "clvmd-clone_stopped_0" -> "dlm-clone_stop_0" [ style = bold] +-"clvmd-clone_stopped_0" -> "stonith 'on' virt-1" [ style = bold] +-"clvmd-clone_stopped_0" -> "stonith 'on' virt-3" [ style = bold] + "clvmd-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] + "clvmd:1_monitor_0 virt-2" -> "clvmd-clone_start_0" [ style = bold] + "clvmd:1_monitor_0 virt-2" [ style=bold color="green" fontcolor="black"] +@@ -32,6 +30,7 @@ digraph "g" { + "clvmd_stop_0 virt-1" -> "clvmd-clone_stopped_0" [ style = bold] + "clvmd_stop_0 virt-1" -> "clvmd_start_0 virt-1" [ style = bold] + "clvmd_stop_0 virt-1" -> "dlm_stop_0 virt-1" [ style = bold] ++"clvmd_stop_0 virt-1" -> "stonith 'on' virt-1" [ style = bold] + "clvmd_stop_0 virt-1" [ style=bold color="green" fontcolor="black"] + "dlm-clone_running_0" -> "clvmd-clone_start_0" [ style = bold] + "dlm-clone_running_0" [ style=bold color="green" fontcolor="orange"] +@@ -43,8 +42,6 @@ digraph "g" { + "dlm-clone_stop_0" -> "dlm_stop_0 virt-1" [ style = bold] + "dlm-clone_stop_0" [ style=bold color="green" fontcolor="orange"] + "dlm-clone_stopped_0" -> "dlm-clone_start_0" [ style = bold] +-"dlm-clone_stopped_0" -> "stonith 'on' virt-1" [ style = bold] +-"dlm-clone_stopped_0" -> "stonith 'on' virt-3" [ style = bold] + "dlm-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] + "dlm:2_monitor_0 virt-3" -> "dlm-clone_start_0" [ style = bold] + "dlm:2_monitor_0 virt-3" -> "stonith 'on' virt-3" [ style = bold] +@@ -58,6 +55,7 @@ digraph "g" { + "dlm_stop_0 virt-1" -> "all_stopped" [ style = bold] + "dlm_stop_0 virt-1" -> "dlm-clone_stopped_0" [ style = bold] + "dlm_stop_0 virt-1" -> "dlm_start_0 virt-1" [ style = bold] ++"dlm_stop_0 virt-1" -> "stonith 'on' virt-1" [ style = bold] + "dlm_stop_0 virt-1" [ style=bold color="green" fontcolor="black"] + "fencing_delete_0 virt-1" -> "fencing_start_0 virt-1" [ style = bold] + "fencing_delete_0 virt-1" [ style=bold color="green" fontcolor="black"] +@@ -69,11 +67,11 @@ digraph "g" { + "fencing_stop_0 virt-1" -> "fencing_delete_0 virt-1" [ style = bold] + "fencing_stop_0 virt-1" -> "fencing_start_0 virt-1" [ style = bold] + "fencing_stop_0 virt-1" [ style=bold color="green" fontcolor="black"] +-"stonith 'on' virt-1" -> "clvmd-clone_start_0" [ style = bold] +-"stonith 'on' virt-1" -> "dlm-clone_start_0" [ style = bold] ++"stonith 'on' virt-1" -> "clvmd_start_0 virt-1" [ style = bold] ++"stonith 'on' virt-1" -> "dlm_start_0 virt-1" [ style = bold] + "stonith 'on' virt-1" [ style=bold color="green" fontcolor="black"] +-"stonith 'on' virt-3" -> "clvmd-clone_start_0" [ style = bold] +-"stonith 'on' virt-3" -> "dlm-clone_start_0" [ style = bold] ++"stonith 'on' virt-3" -> "clvmd:2_start_0 virt-3" [ style = bold] ++"stonith 'on' virt-3" -> "dlm:2_start_0 virt-3" [ style = bold] + "stonith 'on' virt-3" -> "fencing_monitor_0 virt-3" [ style = bold] + "stonith 'on' virt-3" [ style=bold color="green" fontcolor="black"] + "stonith 'reboot' virt-4" -> "stonith_complete" [ style = bold] +diff --git a/pengine/test10/unfence-definition.exp b/pengine/test10/unfence-definition.exp +index 9d23a2a..64b9735 100644 +--- a/pengine/test10/unfence-definition.exp ++++ b/pengine/test10/unfence-definition.exp +@@ -69,6 +69,9 @@ + + + ++ ++ ++ + + + +@@ -104,6 +107,9 @@ + + + ++ ++ ++ + + + +@@ -173,12 +179,6 @@ + + + +- +- +- +- +- +- + + + +@@ -195,6 +195,9 @@ + + + ++ ++ ++ + + + +@@ -258,6 +261,9 @@ + + + ++ ++ ++ + + + +@@ -332,12 +338,6 @@ + + + +- +- +- +- +- +- + + + +@@ -387,12 +387,6 @@ + + + +- +- +- +- +- +- + + + +@@ -403,10 +397,10 @@ + + + +- ++ + + +- ++ + + + +diff --git a/pengine/test10/unfence-definition.summary b/pengine/test10/unfence-definition.summary +index a317807..05f8003 100644 +--- a/pengine/test10/unfence-definition.summary ++++ b/pengine/test10/unfence-definition.summary +@@ -26,23 +26,23 @@ Executing cluster transition: + * Pseudo action: clvmd-clone_stop_0 + * Fencing virt-4 (reboot) + * Pseudo action: stonith_complete ++ * Fencing virt-3 (on) ++ * Resource action: fencing monitor on virt-3 ++ * Resource action: fencing stop on virt-1 + * Resource action: clvmd stop on virt-1 + * Pseudo action: clvmd-clone_stopped_0 ++ * Resource action: fencing delete on virt-1 + * Pseudo action: dlm-clone_stop_0 + * Resource action: dlm stop on virt-1 + * Pseudo action: dlm-clone_stopped_0 +- * Fencing virt-3 (on) +- * Fencing virt-1 (on) +- * Resource action: fencing monitor on virt-3 +- * Resource action: fencing stop on virt-1 + * Pseudo action: dlm-clone_start_0 ++ * Fencing virt-1 (on) + * Pseudo action: all_stopped +- * Resource action: fencing delete on virt-1 ++ * Resource action: fencing start on virt-1 + * Resource action: dlm start on virt-1 + * Resource action: dlm start on virt-3 + * Pseudo action: dlm-clone_running_0 + * Pseudo action: clvmd-clone_start_0 +- * Resource action: fencing start on virt-1 + * Resource action: clvmd start on virt-1 + * Resource action: clvmd start on virt-2 + * Resource action: clvmd start on virt-3 +diff --git a/pengine/test10/unfence-parameters.dot b/pengine/test10/unfence-parameters.dot +index 6b23965..c96d314 100644 +--- a/pengine/test10/unfence-parameters.dot ++++ b/pengine/test10/unfence-parameters.dot +@@ -12,9 +12,6 @@ digraph "g" { + "clvmd-clone_stop_0" [ style=bold color="green" fontcolor="orange"] + "clvmd-clone_stopped_0" -> "clvmd-clone_start_0" [ style = bold] + "clvmd-clone_stopped_0" -> "dlm-clone_stop_0" [ style = bold] +-"clvmd-clone_stopped_0" -> "stonith 'on' virt-1" [ style = bold] +-"clvmd-clone_stopped_0" -> "stonith 'on' virt-2" [ style = bold] +-"clvmd-clone_stopped_0" -> "stonith 'on' virt-3" [ style = bold] + "clvmd-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] + "clvmd:1_monitor_0 virt-2" -> "clvmd-clone_start_0" [ style = bold] + "clvmd:1_monitor_0 virt-2" -> "stonith 'on' virt-2" [ style = bold] +@@ -37,6 +34,7 @@ digraph "g" { + "clvmd_stop_0 virt-1" -> "clvmd-clone_stopped_0" [ style = bold] + "clvmd_stop_0 virt-1" -> "clvmd_start_0 virt-1" [ style = bold] + "clvmd_stop_0 virt-1" -> "dlm_stop_0 virt-1" [ style = bold] ++"clvmd_stop_0 virt-1" -> "stonith 'on' virt-1" [ style = bold] + "clvmd_stop_0 virt-1" [ style=bold color="green" fontcolor="black"] + "dlm-clone_running_0" -> "clvmd-clone_start_0" [ style = bold] + "dlm-clone_running_0" [ style=bold color="green" fontcolor="orange"] +@@ -50,9 +48,6 @@ digraph "g" { + "dlm-clone_stop_0" -> "dlm_stop_0 virt-2" [ style = bold] + "dlm-clone_stop_0" [ style=bold color="green" fontcolor="orange"] + "dlm-clone_stopped_0" -> "dlm-clone_start_0" [ style = bold] +-"dlm-clone_stopped_0" -> "stonith 'on' virt-1" [ style = bold] +-"dlm-clone_stopped_0" -> "stonith 'on' virt-2" [ style = bold] +-"dlm-clone_stopped_0" -> "stonith 'on' virt-3" [ style = bold] + "dlm-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] + "dlm:2_monitor_0 virt-3" -> "dlm-clone_start_0" [ style = bold] + "dlm:2_monitor_0 virt-3" -> "stonith 'on' virt-3" [ style = bold] +@@ -72,11 +67,13 @@ digraph "g" { + "dlm_stop_0 virt-1" -> "all_stopped" [ style = bold] + "dlm_stop_0 virt-1" -> "dlm-clone_stopped_0" [ style = bold] + "dlm_stop_0 virt-1" -> "dlm_start_0 virt-1" [ style = bold] ++"dlm_stop_0 virt-1" -> "stonith 'on' virt-1" [ style = bold] + "dlm_stop_0 virt-1" [ style=bold color="green" fontcolor="black"] + "dlm_stop_0 virt-2" -> "all_stopped" [ style = bold] + "dlm_stop_0 virt-2" -> "dlm-clone_stopped_0" [ style = bold] + "dlm_stop_0 virt-2" -> "dlm_start_0 virt-2" [ style = bold] + "dlm_stop_0 virt-2" -> "dlm_stop_0 virt-1" [ style = bold] ++"dlm_stop_0 virt-2" -> "stonith 'on' virt-2" [ style = bold] + "dlm_stop_0 virt-2" [ style=bold color="green" fontcolor="black"] + "fencing_monitor_0 virt-3" -> "fencing_start_0 virt-1" [ style = bold] + "fencing_monitor_0 virt-3" -> "fencing_stop_0 virt-1" [ style = bold] +@@ -85,14 +82,14 @@ digraph "g" { + "fencing_stop_0 virt-1" -> "all_stopped" [ style = bold] + "fencing_stop_0 virt-1" -> "fencing_start_0 virt-1" [ style = bold] + "fencing_stop_0 virt-1" [ style=bold color="green" fontcolor="black"] +-"stonith 'on' virt-1" -> "clvmd-clone_start_0" [ style = bold] +-"stonith 'on' virt-1" -> "dlm-clone_start_0" [ style = bold] ++"stonith 'on' virt-1" -> "clvmd_start_0 virt-1" [ style = bold] ++"stonith 'on' virt-1" -> "dlm_start_0 virt-1" [ style = bold] + "stonith 'on' virt-1" [ style=bold color="green" fontcolor="black"] +-"stonith 'on' virt-2" -> "clvmd-clone_start_0" [ style = bold] +-"stonith 'on' virt-2" -> "dlm-clone_start_0" [ style = bold] ++"stonith 'on' virt-2" -> "clvmd:1_start_0 virt-2" [ style = bold] ++"stonith 'on' virt-2" -> "dlm_start_0 virt-2" [ style = bold] + "stonith 'on' virt-2" [ style=bold color="green" fontcolor="black"] +-"stonith 'on' virt-3" -> "clvmd-clone_start_0" [ style = bold] +-"stonith 'on' virt-3" -> "dlm-clone_start_0" [ style = bold] ++"stonith 'on' virt-3" -> "clvmd:2_start_0 virt-3" [ style = bold] ++"stonith 'on' virt-3" -> "dlm:2_start_0 virt-3" [ style = bold] + "stonith 'on' virt-3" -> "fencing_monitor_0 virt-3" [ style = bold] + "stonith 'on' virt-3" [ style=bold color="green" fontcolor="black"] + "stonith 'reboot' virt-4" -> "stonith_complete" [ style = bold] +diff --git a/pengine/test10/unfence-parameters.exp b/pengine/test10/unfence-parameters.exp +index 48ef3bc..16aa30d 100644 +--- a/pengine/test10/unfence-parameters.exp ++++ b/pengine/test10/unfence-parameters.exp +@@ -53,6 +53,9 @@ + + + ++ ++ ++ + + + +@@ -91,6 +94,9 @@ + + + ++ ++ ++ + + + +@@ -126,6 +132,9 @@ + + + ++ ++ ++ + + + +@@ -204,15 +213,6 @@ + + + +- +- +- +- +- +- +- +- +- + + + +@@ -229,6 +229,9 @@ + + + ++ ++ ++ + + + +@@ -264,6 +267,9 @@ + + + ++ ++ ++ + + + +@@ -295,6 +301,9 @@ + + + ++ ++ ++ + + + +@@ -369,15 +378,6 @@ + + + +- +- +- +- +- +- +- +- +- + + + +@@ -427,12 +427,6 @@ + + + +- +- +- +- +- +- + + + +@@ -446,10 +440,7 @@ + + + +- +- +- +- ++ + + + +@@ -461,10 +452,10 @@ + + + +- ++ + + +- ++ + + + +diff --git a/pengine/test10/unfence-parameters.summary b/pengine/test10/unfence-parameters.summary +index bca4f96..41fed90 100644 +--- a/pengine/test10/unfence-parameters.summary ++++ b/pengine/test10/unfence-parameters.summary +@@ -27,25 +27,25 @@ Executing cluster transition: + * Pseudo action: clvmd-clone_stop_0 + * Fencing virt-4 (reboot) + * Pseudo action: stonith_complete ++ * Fencing virt-3 (on) ++ * Resource action: fencing monitor on virt-3 + * Resource action: clvmd stop on virt-1 + * Pseudo action: clvmd-clone_stopped_0 ++ * Resource action: fencing stop on virt-1 + * Pseudo action: dlm-clone_stop_0 + * Resource action: dlm stop on virt-2 ++ * Fencing virt-2 (on) + * Resource action: dlm stop on virt-1 + * Pseudo action: dlm-clone_stopped_0 +- * Fencing virt-3 (on) +- * Fencing virt-2 (on) +- * Fencing virt-1 (on) +- * Resource action: fencing monitor on virt-3 + * Pseudo action: dlm-clone_start_0 +- * Resource action: fencing stop on virt-1 ++ * Fencing virt-1 (on) ++ * Pseudo action: all_stopped ++ * Resource action: fencing start on virt-1 + * Resource action: dlm start on virt-1 + * Resource action: dlm start on virt-2 + * Resource action: dlm start on virt-3 + * Pseudo action: dlm-clone_running_0 + * Pseudo action: clvmd-clone_start_0 +- * Pseudo action: all_stopped +- * Resource action: fencing start on virt-1 + * Resource action: clvmd start on virt-1 + * Resource action: clvmd start on virt-2 + * Resource action: clvmd start on virt-3 +diff --git a/pengine/test10/unfence-startup.dot b/pengine/test10/unfence-startup.dot +index 97eba4a..20f1367 100644 +--- a/pengine/test10/unfence-startup.dot ++++ b/pengine/test10/unfence-startup.dot +@@ -27,8 +27,8 @@ digraph "g" { + "dlm:2_start_0 virt-3" -> "dlm-clone_running_0" [ style = bold] + "dlm:2_start_0 virt-3" [ style=bold color="green" fontcolor="black"] + "fencing_monitor_0 virt-3" [ style=bold color="green" fontcolor="black"] +-"stonith 'on' virt-3" -> "clvmd-clone_start_0" [ style = bold] +-"stonith 'on' virt-3" -> "dlm-clone_start_0" [ style = bold] ++"stonith 'on' virt-3" -> "clvmd:2_start_0 virt-3" [ style = bold] ++"stonith 'on' virt-3" -> "dlm:2_start_0 virt-3" [ style = bold] + "stonith 'on' virt-3" -> "fencing_monitor_0 virt-3" [ style = bold] + "stonith 'on' virt-3" [ style=bold color="green" fontcolor="black"] + "stonith 'reboot' virt-4" -> "stonith_complete" [ style = bold] +diff --git a/pengine/test10/unfence-startup.exp b/pengine/test10/unfence-startup.exp +index 7595cf3..569fd12 100644 +--- a/pengine/test10/unfence-startup.exp ++++ b/pengine/test10/unfence-startup.exp +@@ -21,6 +21,9 @@ + + + ++ ++ ++ + + + +@@ -60,9 +63,6 @@ + + + +- +- +- + + + +@@ -101,6 +101,9 @@ + + + ++ ++ ++ + + + +@@ -149,9 +152,6 @@ + + + +- +- +- + + + +diff --git a/pengine/test10/unfence-startup.summary b/pengine/test10/unfence-startup.summary +index db0f307..76bc0fc 100644 +--- a/pengine/test10/unfence-startup.summary ++++ b/pengine/test10/unfence-startup.summary +@@ -18,6 +18,7 @@ Transition Summary: + + Executing cluster transition: + * Resource action: dlm monitor on virt-3 ++ * Pseudo action: dlm-clone_start_0 + * Resource action: clvmd monitor on virt-2 + * Resource action: clvmd monitor on virt-3 + * Fencing virt-4 (reboot) +@@ -25,7 +26,6 @@ Executing cluster transition: + * Fencing virt-3 (on) + * Pseudo action: all_stopped + * Resource action: fencing monitor on virt-3 +- * Pseudo action: dlm-clone_start_0 + * Resource action: dlm start on virt-3 + * Pseudo action: dlm-clone_running_0 + * Pseudo action: clvmd-clone_start_0 +-- +1.8.3.1 + + +From 4a95897ab5b668f35a64e5d3818046adcafd3897 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 16 Jun 2016 13:49:40 -0500 +Subject: [PATCH 4/4] Test: pengine: add regression test for start-then-stop + + unfencing + +--- + pengine/regression.sh | 1 + + pengine/test10/start-then-stop-with-unfence.dot | 29 ++++ + pengine/test10/start-then-stop-with-unfence.exp | 168 +++++++++++++++++++++ + pengine/test10/start-then-stop-with-unfence.scores | 19 +++ + .../test10/start-then-stop-with-unfence.summary | 42 ++++++ + pengine/test10/start-then-stop-with-unfence.xml | 149 ++++++++++++++++++ + 6 files changed, 408 insertions(+) + create mode 100644 pengine/test10/start-then-stop-with-unfence.dot + create mode 100644 pengine/test10/start-then-stop-with-unfence.exp + create mode 100644 pengine/test10/start-then-stop-with-unfence.scores + create mode 100644 pengine/test10/start-then-stop-with-unfence.summary + create mode 100644 pengine/test10/start-then-stop-with-unfence.xml + +diff --git a/pengine/regression.sh b/pengine/regression.sh +index f86d0f1..22ad3bf 100755 +--- a/pengine/regression.sh ++++ b/pengine/regression.sh +@@ -534,6 +534,7 @@ do_test bug-5069-op-disabled "Test on-fail-ignore with failure when monitor is d + do_test obsolete-lrm-resource "cl#5115 - Do not use obsolete lrm_resource sections" + do_test expire-non-blocked-failure "Ignore failure-timeout only if the failed operation has on-fail=block" + do_test asymmetrical-order-move "Respect asymmetrical ordering when trying to move resources" ++do_test start-then-stop-with-unfence "Avoid graph loop with start-then-stop constraint plus unfencing" + + do_test ignore_stonith_rsc_order1 "cl#5056- Ignore order constraint between stonith and non-stonith rsc." + do_test ignore_stonith_rsc_order2 "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith." +diff --git a/pengine/test10/start-then-stop-with-unfence.dot b/pengine/test10/start-then-stop-with-unfence.dot +new file mode 100644 +index 0000000..d8f7a71 +--- /dev/null ++++ b/pengine/test10/start-then-stop-with-unfence.dot +@@ -0,0 +1,29 @@ ++digraph "g" { ++"all_stopped" [ style=bold color="green" fontcolor="orange"] ++"ip1_monitor_10000 rhel7-node1.example.com" [ style=bold color="green" fontcolor="black"] ++"ip1_start_0 rhel7-node1.example.com" -> "ip1_monitor_10000 rhel7-node1.example.com" [ style = bold] ++"ip1_start_0 rhel7-node1.example.com" [ style=bold color="green" fontcolor="black"] ++"ip1_stop_0 rhel7-node2.example.com" -> "all_stopped" [ style = bold] ++"ip1_stop_0 rhel7-node2.example.com" -> "ip1_start_0 rhel7-node1.example.com" [ style = bold] ++"ip1_stop_0 rhel7-node2.example.com" [ style=bold color="green" fontcolor="black"] ++"jrummy-clone_running_0" -> "ip1_stop_0 rhel7-node2.example.com" [ style = bold] ++"jrummy-clone_running_0" [ style=bold color="green" fontcolor="orange"] ++"jrummy-clone_start_0" -> "jrummy-clone_running_0" [ style = bold] ++"jrummy-clone_start_0" -> "jrummy_start_0 rhel7-node1.example.com" [ style = bold] ++"jrummy-clone_start_0" [ style=bold color="green" fontcolor="orange"] ++"jrummy_monitor_10000 rhel7-node1.example.com" [ style=bold color="green" fontcolor="black"] ++"jrummy_start_0 rhel7-node1.example.com" -> "jrummy-clone_running_0" [ style = bold] ++"jrummy_start_0 rhel7-node1.example.com" -> "jrummy_monitor_10000 rhel7-node1.example.com" [ style = bold] ++"jrummy_start_0 rhel7-node1.example.com" [ style=bold color="green" fontcolor="black"] ++"mpath-node1_monitor_0 rhel7-node1.example.com" -> "mpath-node1_start_0 rhel7-node1.example.com" [ style = bold] ++"mpath-node1_monitor_0 rhel7-node1.example.com" [ style=bold color="green" fontcolor="black"] ++"mpath-node1_monitor_60000 rhel7-node1.example.com" [ style=bold color="green" fontcolor="black"] ++"mpath-node1_start_0 rhel7-node1.example.com" -> "mpath-node1_monitor_60000 rhel7-node1.example.com" [ style = bold] ++"mpath-node1_start_0 rhel7-node1.example.com" [ style=bold color="green" fontcolor="black"] ++"mpath-node2_monitor_0 rhel7-node1.example.com" [ style=bold color="green" fontcolor="black"] ++"stonith 'on' rhel7-node1.example.com" -> "ip1_start_0 rhel7-node1.example.com" [ style = bold] ++"stonith 'on' rhel7-node1.example.com" -> "jrummy_start_0 rhel7-node1.example.com" [ style = bold] ++"stonith 'on' rhel7-node1.example.com" -> "mpath-node1_monitor_0 rhel7-node1.example.com" [ style = bold] ++"stonith 'on' rhel7-node1.example.com" -> "mpath-node2_monitor_0 rhel7-node1.example.com" [ style = bold] ++"stonith 'on' rhel7-node1.example.com" [ style=bold color="green" fontcolor="black"] ++} +diff --git a/pengine/test10/start-then-stop-with-unfence.exp b/pengine/test10/start-then-stop-with-unfence.exp +new file mode 100644 +index 0000000..aeee1fc +--- /dev/null ++++ b/pengine/test10/start-then-stop-with-unfence.exp +@@ -0,0 +1,168 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/pengine/test10/start-then-stop-with-unfence.scores b/pengine/test10/start-then-stop-with-unfence.scores +new file mode 100644 +index 0000000..d353bef +--- /dev/null ++++ b/pengine/test10/start-then-stop-with-unfence.scores +@@ -0,0 +1,19 @@ ++Allocation scores: ++clone_color: jrummy-clone allocation score on rhel7-node1.example.com: 500 ++clone_color: jrummy-clone allocation score on rhel7-node2.example.com: 500 ++clone_color: jrummy:0 allocation score on rhel7-node1.example.com: 0 ++clone_color: jrummy:0 allocation score on rhel7-node2.example.com: 1 ++clone_color: jrummy:1 allocation score on rhel7-node1.example.com: 0 ++clone_color: jrummy:1 allocation score on rhel7-node2.example.com: 0 ++native_color: ip1 allocation score on rhel7-node1.example.com: 500 ++native_color: ip1 allocation score on rhel7-node2.example.com: 0 ++native_color: ip2 allocation score on rhel7-node1.example.com: 0 ++native_color: ip2 allocation score on rhel7-node2.example.com: 500 ++native_color: jrummy:0 allocation score on rhel7-node1.example.com: 0 ++native_color: jrummy:0 allocation score on rhel7-node2.example.com: 1 ++native_color: jrummy:1 allocation score on rhel7-node1.example.com: 0 ++native_color: jrummy:1 allocation score on rhel7-node2.example.com: -INFINITY ++native_color: mpath-node1 allocation score on rhel7-node1.example.com: 0 ++native_color: mpath-node1 allocation score on rhel7-node2.example.com: 0 ++native_color: mpath-node2 allocation score on rhel7-node1.example.com: 0 ++native_color: mpath-node2 allocation score on rhel7-node2.example.com: 0 +diff --git a/pengine/test10/start-then-stop-with-unfence.summary b/pengine/test10/start-then-stop-with-unfence.summary +new file mode 100644 +index 0000000..df7d9e3 +--- /dev/null ++++ b/pengine/test10/start-then-stop-with-unfence.summary +@@ -0,0 +1,42 @@ ++ ++Current cluster status: ++Online: [ rhel7-node1.example.com rhel7-node2.example.com ] ++ ++ mpath-node2 (stonith:fence_mpath): Started rhel7-node2.example.com ++ mpath-node1 (stonith:fence_mpath): Stopped ++ ip1 (ocf::heartbeat:IPaddr2): Started rhel7-node2.example.com ++ ip2 (ocf::heartbeat:IPaddr2): Started rhel7-node2.example.com ++ Clone Set: jrummy-clone [jrummy] ++ Started: [ rhel7-node2.example.com ] ++ Stopped: [ rhel7-node1.example.com ] ++ ++Transition Summary: ++ * Start mpath-node1 (rhel7-node1.example.com) ++ * Move ip1 (Started rhel7-node2.example.com -> rhel7-node1.example.com) ++ * Start jrummy:1 (rhel7-node1.example.com) ++ ++Executing cluster transition: ++ * Pseudo action: jrummy-clone_start_0 ++ * Fencing rhel7-node1.example.com (on) ++ * Resource action: mpath-node2 monitor on rhel7-node1.example.com ++ * Resource action: mpath-node1 monitor on rhel7-node1.example.com ++ * Resource action: jrummy start on rhel7-node1.example.com ++ * Pseudo action: jrummy-clone_running_0 ++ * Resource action: mpath-node1 start on rhel7-node1.example.com ++ * Resource action: ip1 stop on rhel7-node2.example.com ++ * Resource action: jrummy monitor=10000 on rhel7-node1.example.com ++ * Pseudo action: all_stopped ++ * Resource action: mpath-node1 monitor=60000 on rhel7-node1.example.com ++ * Resource action: ip1 start on rhel7-node1.example.com ++ * Resource action: ip1 monitor=10000 on rhel7-node1.example.com ++ ++Revised cluster status: ++Online: [ rhel7-node1.example.com rhel7-node2.example.com ] ++ ++ mpath-node2 (stonith:fence_mpath): Started rhel7-node2.example.com ++ mpath-node1 (stonith:fence_mpath): Started rhel7-node1.example.com ++ ip1 (ocf::heartbeat:IPaddr2): Started rhel7-node1.example.com ++ ip2 (ocf::heartbeat:IPaddr2): Started rhel7-node2.example.com ++ Clone Set: jrummy-clone [jrummy] ++ Started: [ rhel7-node1.example.com rhel7-node2.example.com ] ++ +diff --git a/pengine/test10/start-then-stop-with-unfence.xml b/pengine/test10/start-then-stop-with-unfence.xml +new file mode 100644 +index 0000000..499022e +--- /dev/null ++++ b/pengine/test10/start-then-stop-with-unfence.xml +@@ -0,0 +1,149 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +-- +1.8.3.1 + diff --git a/SOURCES/0040-update-top-format-in-HealthCPU.patch b/SOURCES/0040-update-top-format-in-HealthCPU.patch deleted file mode 100644 index e4be29a..0000000 --- a/SOURCES/0040-update-top-format-in-HealthCPU.patch +++ /dev/null @@ -1,27 +0,0 @@ -From e8b884997f2d49871c6a19b36095a66e377f54e4 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 3 Dec 2015 15:29:14 -0600 -Subject: [PATCH] Fix: resources: allow for top output with or without percent - sign in HealthCPU - -Problem found and patch provided by Malcome Cowe . ---- - extra/resources/HealthCPU | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/extra/resources/HealthCPU b/extra/resources/HealthCPU -index d320320..1ceaa01 100644 ---- a/extra/resources/HealthCPU -+++ b/extra/resources/HealthCPU -@@ -136,7 +136,7 @@ dummy_monitor() { - - if [ -f ${OCF_RESKEY_state} ]; then - -- IDLE=`top -b -n2 | grep Cpu | tail -1 | awk -F",|\.[0-9]%id" '{ print $4 }'` -+ IDLE=`top -b -n2 | grep Cpu | tail -1 | awk -F",|.[0-9][ %]id" '{ print $4 }'` - # echo "System idle: " $IDLE - # echo "$OCF_RESKEY_red_limit" - # echo $OCF_RESKEY_yellow_limit --- -1.8.3.1 - diff --git a/SOURCES/0041-delete-fence-attributes-correctly.patch b/SOURCES/0041-delete-fence-attributes-correctly.patch deleted file mode 100644 index 06be3a6..0000000 --- a/SOURCES/0041-delete-fence-attributes-correctly.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 98e69e033835b3d4dfdc8c9cabacae28770725f1 Mon Sep 17 00:00:00 2001 -From: Klaus Wenninger -Date: Wed, 9 Dec 2015 15:01:25 +0100 -Subject: [PATCH] Fix RHBZ#1287315: stonithd: Trigger cib_devices_update in - case of deletion of just an attribute - ---- - fencing/main.c | 7 ++++++- - 1 file changed, 6 insertions(+), 1 deletion(-) - -diff --git a/fencing/main.c b/fencing/main.c -index e9831f0..0dc4492 100644 ---- a/fencing/main.c -+++ b/fencing/main.c -@@ -796,8 +796,13 @@ update_cib_stonith_devices_v2(const char *event, xmlNode * msg) - } else if(safe_str_eq(op, "delete") && strstr(xpath, XML_CIB_TAG_RESOURCE)) { - const char *rsc_id = NULL; - char *search = NULL; -- char *mutable = strdup(xpath); -+ char *mutable = NULL; - -+ if (strstr(xpath, XML_TAG_ATTR_SETS)) { -+ needs_update = TRUE; -+ break; -+ } -+ mutable = strdup(xpath); - rsc_id = strstr(mutable, "primitive[@id=\'") + strlen("primitive[@id=\'"); - search = strchr(rsc_id, '\''); - search[0] = 0; --- -1.8.3.1 - diff --git a/SOURCES/0042-handle-systemd-shutdown.patch b/SOURCES/0042-handle-systemd-shutdown.patch deleted file mode 100644 index 9b7a51c..0000000 --- a/SOURCES/0042-handle-systemd-shutdown.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 6aae8542abedc755b90c8c49aa5c429718fd12f1 Mon Sep 17 00:00:00 2001 -From: Klaus Wenninger -Date: Tue, 12 Jan 2016 15:46:26 +0100 -Subject: [PATCH] Fix RHBZ#1286316: Do an ordered shutdown of systemd resources - have lrmd wait till systemd actually starts bringing down systemd - resources instead of being confused if service is still active on first - status send a reload to systemd whenever a unitfile is changed instead of - doing this just with every 10th change - ---- - lib/services/systemd.c | 11 ++++------- - lrmd/lrmd.c | 2 ++ - 2 files changed, 6 insertions(+), 7 deletions(-) - -diff --git a/lib/services/systemd.c b/lib/services/systemd.c -index a851bc6..eb5f8aa 100644 ---- a/lib/services/systemd.c -+++ b/lib/services/systemd.c -@@ -150,16 +150,13 @@ systemd_daemon_reload(int timeout) - { - static unsigned int reload_count = 0; - const char *method = "Reload"; -- -+ DBusMessage *msg = systemd_new_method(BUS_NAME".Manager", method); - - reload_count++; -- if(reload_count % 10 == 0) { -- DBusMessage *msg = systemd_new_method(BUS_NAME".Manager", method); -+ CRM_ASSERT(msg != NULL); -+ pcmk_dbus_send(msg, systemd_proxy, systemd_daemon_reload_complete, GUINT_TO_POINTER(reload_count), timeout); -+ dbus_message_unref(msg); - -- CRM_ASSERT(msg != NULL); -- pcmk_dbus_send(msg, systemd_proxy, systemd_daemon_reload_complete, GUINT_TO_POINTER(reload_count), timeout); -- dbus_message_unref(msg); -- } - return TRUE; - } - -diff --git a/lrmd/lrmd.c b/lrmd/lrmd.c -index a64b430..518d5d1 100644 ---- a/lrmd/lrmd.c -+++ b/lrmd/lrmd.c -@@ -900,6 +900,8 @@ action_complete(svc_action_t * action) - /* Ok, so this is the follow up monitor action to check if start actually completed */ - if(cmd->lrmd_op_status == PCMK_LRM_OP_DONE && cmd->exec_rc == PCMK_OCF_PENDING) { - goagain = true; -+ } else if(cmd->exec_rc == PCMK_OCF_OK && safe_str_eq(cmd->real_action, "stop")) { -+ goagain = true; - - } else { - #ifdef HAVE_SYS_TIMEB_H --- -1.8.3.1 - diff --git a/SOURCES/0043-cts-fix-for-command-lines.patch b/SOURCES/0043-cts-fix-for-command-lines.patch deleted file mode 100644 index 120f87d..0000000 --- a/SOURCES/0043-cts-fix-for-command-lines.patch +++ /dev/null @@ -1,125 +0,0 @@ -From 94ebc967f2e74301ef5e10ed102832168503c7d9 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Thu, 1 Oct 2015 12:00:26 -0500 -Subject: [PATCH] Test: CTS: get Reattach test working again and up-to-date - -Prevously, Reattach relied on command lines being logged, -which 8dae683 removed. Now, it doesn't. - -Previously, Reattach used the now-deprecated is-managed-default cluster option; -now, it uses the is-managed option in rsc_defaults. ---- - cts/CTStests.py | 59 ++++++++++++++++++++++++++++----------------------------- - 1 file changed, 29 insertions(+), 30 deletions(-) - -diff --git a/cts/CTStests.py b/cts/CTStests.py -index ddd8c4a..e4207aa 100644 ---- a/cts/CTStests.py -+++ b/cts/CTStests.py -@@ -1693,6 +1693,19 @@ class Reattach(CTSTest): - self.stopall = SimulStopLite(cm) - self.is_unsafe = 0 # Handled by canrunnow() - -+ def _is_managed(self, node): -+ is_managed = self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -Q -G -d true", 1) -+ is_managed = is_managed[:-1] # Strip off the newline -+ return is_managed == "true" -+ -+ def _set_unmanaged(self, node): -+ self.debug("Disable resource management") -+ self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -v false") -+ -+ def _set_managed(self, node): -+ self.debug("Re-enable resource management") -+ self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -D") -+ - def setup(self, node): - attempt = 0 - if not self.startall(None): -@@ -1717,17 +1730,11 @@ class Reattach(CTSTest): - start = StartTest(self.CM) - start(node) - -- is_managed = self.rsh(node, "crm_attribute -Q -G -t crm_config -n is-managed-default -d true", 1) -- is_managed = is_managed[:-1] # Strip off the newline -- if is_managed != "true": -- self.logger.log("Attempting to re-enable resource management on %s (%s)" % (node, is_managed)) -- managed = self.create_watch(["is-managed-default"], 60) -- managed.setwatch() -- -- self.rsh(node, "crm_attribute -V -D -n is-managed-default") -- -- if not managed.lookforall(): -- self.logger.log("Patterns not found: " + repr(managed.unmatched)) -+ if not self._is_managed(node): -+ self.logger.log("Attempting to re-enable resource management on %s" % node) -+ self._set_managed(node) -+ self.CM.cluster_stable() -+ if not self._is_managed(node): - self.logger.log("Could not re-enable resource management") - return 0 - -@@ -1744,11 +1751,12 @@ class Reattach(CTSTest): - self.incr("calls") - - pats = [] -- managed = self.create_watch(["is-managed-default"], 60) -+ # Conveniently, pengine will display this message when disabling management, -+ # even if fencing is not enabled, so we can rely on it. -+ managed = self.create_watch(["Delaying fencing operations"], 60) - managed.setwatch() - -- self.debug("Disable resource management") -- self.rsh(node, "crm_attribute -V -n is-managed-default -v false") -+ self._set_unmanaged(node) - - if not managed.lookforall(): - self.logger.log("Patterns not found: " + repr(managed.unmatched)) -@@ -1767,37 +1775,28 @@ class Reattach(CTSTest): - self.debug("Shutting down the cluster") - ret = self.stopall(None) - if not ret: -- self.debug("Re-enable resource management") -- self.rsh(node, "crm_attribute -V -D -n is-managed-default") -+ self._set_managed(node) - return self.failure("Couldn't shut down the cluster") - - self.debug("Bringing the cluster back up") - ret = self.startall(None) - time.sleep(5) # allow ping to update the CIB - if not ret: -- self.debug("Re-enable resource management") -- self.rsh(node, "crm_attribute -V -D -n is-managed-default") -+ self._set_managed(node) - return self.failure("Couldn't restart the cluster") - - if self.local_badnews("ResourceActivity:", watch): -- self.debug("Re-enable resource management") -- self.rsh(node, "crm_attribute -V -D -n is-managed-default") -+ self._set_managed(node) - return self.failure("Resources stopped or started during cluster restart") - - watch = self.create_watch(pats, 60, "StartupActivity") - watch.setwatch() - -- managed = self.create_watch(["is-managed-default"], 60) -- managed.setwatch() -- -- self.debug("Re-enable resource management") -- self.rsh(node, "crm_attribute -V -D -n is-managed-default") -- -- if not managed.lookforall(): -- self.logger.log("Patterns not found: " + repr(managed.unmatched)) -- return self.failure("Resource management not enabled") -- -+ # Re-enable resource management (and verify it happened). -+ self._set_managed(node) - self.CM.cluster_stable() -+ if not self._is_managed(node): -+ return self.failure("Could not re-enable resource management") - - # Ignore actions for STONITH resources - ignore = [] --- -1.8.3.1 - diff --git a/SOURCES/005-avoid-null-dereference.patch b/SOURCES/005-avoid-null-dereference.patch new file mode 100644 index 0000000..ba2c54e --- /dev/null +++ b/SOURCES/005-avoid-null-dereference.patch @@ -0,0 +1,26 @@ +From 8eb3c07119fec6da0712b3940706a14ad3a66483 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Tue, 26 Jul 2016 14:16:32 -0500 +Subject: [PATCH] Fix: pengine: avoid null dereference in new same-node + ordering option + +--- + pengine/graph.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/pengine/graph.c b/pengine/graph.c +index a713c71..9b55a4e 100644 +--- a/pengine/graph.c ++++ b/pengine/graph.c +@@ -510,7 +510,7 @@ update_action(action_t * then) + } + + /* Disable constraint if it only applies when on same node, but isn't */ +- if (is_set(other->type, pe_order_same_node) ++ if (is_set(other->type, pe_order_same_node) && first_node && then_node + && (first_node->details != then_node->details)) { + + crm_trace("Disabled constraint %s on %s -> %s on %s", +-- +1.8.3.1 + diff --git a/SOURCES/006-alert-snmp-quoting.patch b/SOURCES/006-alert-snmp-quoting.patch new file mode 100644 index 0000000..d3d6ee5 --- /dev/null +++ b/SOURCES/006-alert-snmp-quoting.patch @@ -0,0 +1,26 @@ +From 443816c624635ffbddfad4ccd26e216fc080a1e9 Mon Sep 17 00:00:00 2001 +From: Klaus Wenninger +Date: Fri, 24 Jun 2016 11:39:15 +0200 +Subject: [PATCH] fix: extra: correct quoting of timestamp-format in + alert-snmp-example + +--- + extra/alerts/alert_snmp.sh.sample | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/extra/alerts/alert_snmp.sh.sample b/extra/alerts/alert_snmp.sh.sample +index 3e7ee6c..fcdcd7e 100644 +--- a/extra/alerts/alert_snmp.sh.sample ++++ b/extra/alerts/alert_snmp.sh.sample +@@ -34,7 +34,7 @@ + # + # + # +-# ++# + # + # + # +-- +1.8.3.1 + diff --git a/SOURCES/007-cib-callback-unregistration.patch b/SOURCES/007-cib-callback-unregistration.patch new file mode 100644 index 0000000..d034411 --- /dev/null +++ b/SOURCES/007-cib-callback-unregistration.patch @@ -0,0 +1,75 @@ +From 9b4ee98cbfdadcd9f9bc479de0c14a233b46bda7 Mon Sep 17 00:00:00 2001 +From: HideoYamauchi +Date: Tue, 26 Jul 2016 13:31:42 +0900 +Subject: [PATCH] High: lib: Correction of the deletion of the notice + registration. + +--- + lib/cib/cib_client.c | 30 ++++++++++++++++++++++++++++-- + 1 file changed, 28 insertions(+), 2 deletions(-) + +diff --git a/lib/cib/cib_client.c b/lib/cib/cib_client.c +index f7a19b8..b0d156c 100644 +--- a/lib/cib/cib_client.c ++++ b/lib/cib/cib_client.c +@@ -477,6 +477,23 @@ cib_client_add_notify_callback(cib_t * cib, const char *event, + return pcmk_ok; + } + ++static int ++get_notify_list_event_count(cib_t * cib, const char *event) ++{ ++ GList *l = NULL; ++ int count = 0; ++ ++ for (l = g_list_first(cib->notify_list); l; l = g_list_next(l)) { ++ cib_notify_client_t *client = (cib_notify_client_t *)l->data; ++ ++ if (strcmp(client->event, event) == 0) { ++ count++; ++ } ++ } ++ crm_trace("event(%s) count : %d", event, count); ++ return count; ++} ++ + int + cib_client_del_notify_callback(cib_t * cib, const char *event, + void (*callback) (const char *event, xmlNode * msg)) +@@ -488,6 +505,11 @@ cib_client_del_notify_callback(cib_t * cib, const char *event, + return -EPROTONOSUPPORT; + } + ++ if (get_notify_list_event_count(cib, event) == 0) { ++ crm_debug("The callback of the event does not exist(%s)", event); ++ return pcmk_ok; ++ } ++ + crm_debug("Removing callback for %s events", event); + + new_client = calloc(1, sizeof(cib_notify_client_t)); +@@ -496,8 +518,6 @@ cib_client_del_notify_callback(cib_t * cib, const char *event, + + list_item = g_list_find_custom(cib->notify_list, new_client, ciblib_GCompareFunc); + +- cib->cmds->register_notification(cib, event, 0); +- + if (list_item != NULL) { + cib_notify_client_t *list_client = list_item->data; + +@@ -509,6 +529,12 @@ cib_client_del_notify_callback(cib_t * cib, const char *event, + } else { + crm_trace("Callback not present"); + } ++ ++ if (get_notify_list_event_count(cib, event) == 0) { ++ /* When there is not the registration of the event, the processing turns off a notice. */ ++ cib->cmds->register_notification(cib, event, 0); ++ } ++ + free(new_client); + return pcmk_ok; + } +-- +1.8.3.1 + diff --git a/SOURCES/008-crm_mon-headings.patch b/SOURCES/008-crm_mon-headings.patch new file mode 100644 index 0000000..ab9c61c --- /dev/null +++ b/SOURCES/008-crm_mon-headings.patch @@ -0,0 +1,330 @@ +From 11cbc8c2839f5579f682f13c48a18da750c4c079 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Wed, 3 Aug 2016 15:36:08 -0500 +Subject: [PATCH] Low: tools: make crm_mon resources section more consistent + +Always prints a header in HTML and console output, and +indicates if there are no resources of the requested type to show. +--- + tools/crm_mon.c | 284 ++++++++++++++++++++++++++++++++++---------------------- + 1 file changed, 174 insertions(+), 110 deletions(-) + +diff --git a/tools/crm_mon.c b/tools/crm_mon.c +index a141852..4352641 100644 +--- a/tools/crm_mon.c ++++ b/tools/crm_mon.c +@@ -1042,6 +1042,174 @@ print_node_end(FILE *stream) + + /*! + * \internal ++ * \brief Print resources section heading appropriate to options ++ * ++ * \param[in] stream File stream to display output to ++ */ ++static void ++print_resources_heading(FILE *stream) ++{ ++ const char *heading; ++ ++ if (group_by_node) { ++ ++ /* Active resources have already been printed by node */ ++ heading = (inactive_resources? "Inactive resources" : NULL); ++ ++ } else if (inactive_resources) { ++ heading = "Full list of resources"; ++ ++ } else { ++ heading = "Active resources"; ++ } ++ ++ /* Print section heading */ ++ switch (output_format) { ++ case mon_output_plain: ++ case mon_output_console: ++ print_as("\n%s:\n\n", heading); ++ break; ++ ++ case mon_output_html: ++ case mon_output_cgi: ++ fprintf(stream, "
\n

%s

\n", heading); ++ break; ++ ++ case mon_output_xml: ++ fprintf(stream, " \n"); ++ break; ++ ++ default: ++ break; ++ } ++ ++} ++ ++/*! ++ * \internal ++ * \brief Print whatever resource section closing is appropriate ++ * ++ * \param[in] stream File stream to display output to ++ */ ++static void ++print_resources_closing(FILE *stream, gboolean printed_heading) ++{ ++ const char *heading; ++ ++ /* What type of resources we did or did not display */ ++ if (group_by_node) { ++ heading = "inactive "; ++ } else if (inactive_resources) { ++ heading = ""; ++ } else { ++ heading = "active "; ++ } ++ ++ switch (output_format) { ++ case mon_output_plain: ++ case mon_output_console: ++ if (!printed_heading) { ++ print_as("\nNo %sresources\n\n", heading); ++ } ++ break; ++ ++ case mon_output_html: ++ case mon_output_cgi: ++ if (!printed_heading) { ++ fprintf(stream, "
\n

No %sresources

\n", heading); ++ } ++ break; ++ ++ case mon_output_xml: ++ fprintf(stream, " %s\n", ++ (printed_heading? "
" : "")); ++ break; ++ ++ default: ++ break; ++ } ++} ++ ++/*! ++ * \internal ++ * \brief Print whatever resource section(s) are appropriate ++ * ++ * \param[in] stream File stream to display output to ++ * \param[in] data_set Cluster state to display ++ * \param[in] print_opts Bitmask of pe_print_options ++ */ ++static void ++print_resources(FILE *stream, pe_working_set_t *data_set, int print_opts) ++{ ++ GListPtr rsc_iter; ++ const char *prefix = NULL; ++ gboolean printed_heading = FALSE; ++ gboolean brief_output = print_brief; ++ ++ /* If we already showed active resources by node, and ++ * we're not showing inactive resources, we have nothing to do ++ */ ++ if (group_by_node && !inactive_resources) { ++ return; ++ } ++ ++ /* XML uses an indent, and ignores brief option for resources */ ++ if (output_format == mon_output_xml) { ++ prefix = " "; ++ brief_output = FALSE; ++ } ++ ++ /* If we haven't already printed resources grouped by node, ++ * and brief output was requested, print resource summary */ ++ if (brief_output && !group_by_node) { ++ print_resources_heading(stream); ++ printed_heading = TRUE; ++ print_rscs_brief(data_set->resources, NULL, print_opts, stream, ++ inactive_resources); ++ } ++ ++ /* For each resource, display it if appropriate */ ++ for (rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) { ++ resource_t *rsc = (resource_t *) rsc_iter->data; ++ ++ /* Complex resources may have some sub-resources active and some inactive */ ++ gboolean is_active = rsc->fns->active(rsc, TRUE); ++ gboolean partially_active = rsc->fns->active(rsc, FALSE); ++ ++ /* Skip inactive orphans (deleted but still in CIB) */ ++ if (is_set(rsc->flags, pe_rsc_orphan) && !is_active) { ++ continue; ++ ++ /* Skip active resources if we already displayed them by node */ ++ } else if (group_by_node) { ++ if (is_active) { ++ continue; ++ } ++ ++ /* Skip primitives already counted in a brief summary */ ++ } else if (brief_output && (rsc->variant == pe_native)) { ++ continue; ++ ++ /* Skip resources that aren't at least partially active, ++ * unless we're displaying inactive resources ++ */ ++ } else if (!partially_active && !inactive_resources) { ++ continue; ++ } ++ ++ /* Print this resource */ ++ if (printed_heading == FALSE) { ++ print_resources_heading(stream); ++ printed_heading = TRUE; ++ } ++ rsc->fns->print(rsc, prefix, print_opts, stream); ++ } ++ ++ print_resources_closing(stream, printed_heading); ++} ++ ++/*! ++ * \internal + * \brief Print heading for resource history + * + * \param[in] stream File stream to display output to +@@ -2852,58 +3020,8 @@ print_status(pe_working_set_t * data_set) + free(online_guest_nodes); + } + +- /* If we haven't already displayed resources grouped by node, +- * or we need to print inactive resources, print a resources section */ +- if (group_by_node == FALSE || inactive_resources) { +- +- /* If we're printing inactive resources, display a heading */ +- if (inactive_resources) { +- if (group_by_node == FALSE) { +- print_as("\nFull list of resources:\n"); +- } else { +- print_as("\nInactive resources:\n"); +- } +- } +- print_as("\n"); +- +- /* If we haven't already printed resources grouped by node, +- * and brief output was requested, print resource summary */ +- if (print_brief && group_by_node == FALSE) { +- print_rscs_brief(data_set->resources, NULL, print_opts, stdout, +- inactive_resources); +- } +- +- /* For each resource, display it if appropriate */ +- for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { +- resource_t *rsc = (resource_t *) gIter->data; +- +- /* Complex resources may have some sub-resources active and some inactive */ +- gboolean is_active = rsc->fns->active(rsc, TRUE); +- gboolean partially_active = rsc->fns->active(rsc, FALSE); +- +- /* Always ignore inactive orphan resources (deleted but not yet gone from CIB) */ +- if (is_set(rsc->flags, pe_rsc_orphan) && (is_active == FALSE)) { +- continue; +- } +- +- /* If we already printed resources grouped by node, +- * only print inactive resources, if that was requested */ +- if (group_by_node == TRUE) { +- if ((is_active == FALSE) && inactive_resources) { +- rsc->fns->print(rsc, NULL, print_opts, stdout); +- } +- continue; +- } +- +- /* Otherwise, print resource if it's at least partially active +- * or we're displaying inactive resources, +- * except for primitive resources already counted in a brief summary */ +- if (!(print_brief && (rsc->variant == pe_native)) +- && (partially_active || inactive_resources)) { +- rsc->fns->print(rsc, NULL, print_opts, stdout); +- } +- } +- } ++ /* Print resources section, if needed */ ++ print_resources(stdout, data_set, print_opts); + + /* print Node Attributes section if requested */ + if (show & mon_show_attributes) { +@@ -3009,28 +3127,8 @@ print_xml_status(pe_working_set_t * data_set) + } + fprintf(stream, " \n"); + +- /*** RESOURCES ***/ +- if (group_by_node == FALSE || inactive_resources) { +- fprintf(stream, " \n"); +- for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { +- resource_t *rsc = (resource_t *) gIter->data; +- gboolean is_active = rsc->fns->active(rsc, TRUE); +- gboolean partially_active = rsc->fns->active(rsc, FALSE); +- +- if (is_set(rsc->flags, pe_rsc_orphan) && is_active == FALSE) { +- continue; +- +- } else if (group_by_node == FALSE) { +- if (partially_active || inactive_resources) { +- rsc->fns->print(rsc, " ", print_opts, stream); +- } +- +- } else if (is_active == FALSE && inactive_resources) { +- rsc->fns->print(rsc, " ", print_opts, stream); +- } +- } +- fprintf(stream, " \n"); +- } ++ /* Print resources section, if needed */ ++ print_resources(stream, data_set, print_opts); + + /* print Node Attributes section if requested */ + if (show & mon_show_attributes) { +@@ -3153,42 +3251,8 @@ print_html_status(pe_working_set_t * data_set, const char *filename) + } + fprintf(stream, "\n"); + +- if (group_by_node && inactive_resources) { +- fprintf(stream, "

Inactive Resources

\n"); +- +- } else if (group_by_node == FALSE) { +- fprintf(stream, "
\n

Resource List

\n"); +- } +- +- if (group_by_node == FALSE || inactive_resources) { +- if (print_brief && group_by_node == FALSE) { +- print_rscs_brief(data_set->resources, NULL, print_opts, stream, +- inactive_resources); +- } +- +- for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) { +- resource_t *rsc = (resource_t *) gIter->data; +- gboolean is_active = rsc->fns->active(rsc, TRUE); +- gboolean partially_active = rsc->fns->active(rsc, FALSE); +- +- if (print_brief && group_by_node == FALSE +- && rsc->variant == pe_native) { +- continue; +- } +- +- if (is_set(rsc->flags, pe_rsc_orphan) && is_active == FALSE) { +- continue; +- +- } else if (group_by_node == FALSE) { +- if (partially_active || inactive_resources) { +- rsc->fns->print(rsc, NULL, print_opts, stream); +- } +- +- } else if (is_active == FALSE && inactive_resources) { +- rsc->fns->print(rsc, NULL, print_opts, stream); +- } +- } +- } ++ /* Print resources section, if needed */ ++ print_resources(stream, data_set, print_opts); + + /* print Node Attributes section if requested */ + if (show & mon_show_attributes) { +-- +1.8.3.1 + diff --git a/SOURCES/009-crm_mon-schema.patch b/SOURCES/009-crm_mon-schema.patch new file mode 100644 index 0000000..668d6a7 --- /dev/null +++ b/SOURCES/009-crm_mon-schema.patch @@ -0,0 +1,34 @@ +From e8d324a69071760b0f4fe47689c978bf08a5b669 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 5 Aug 2016 09:41:23 -0500 +Subject: [PATCH] Low: tools: make crm_mon XML schema handle resources with + multiple active + +--- + xml/crm_mon.rng | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/xml/crm_mon.rng b/xml/crm_mon.rng +index 008c3ef..653c15e 100644 +--- a/xml/crm_mon.rng ++++ b/xml/crm_mon.rng +@@ -264,14 +264,14 @@ + + + +- ++ + + + + + + +- ++ + + + +-- +1.8.3.1 + diff --git a/SOURCES/010-memory-checks.patch b/SOURCES/010-memory-checks.patch new file mode 100644 index 0000000..3308c55 --- /dev/null +++ b/SOURCES/010-memory-checks.patch @@ -0,0 +1,66 @@ +From 02e303ea3ac4a1bef70ad1e79ae402e6ad02ebd3 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 5 Aug 2016 10:33:35 -0500 +Subject: [PATCH] Low: libcib,libfencing,libtransition: handle memory + allocation errors without CRM_CHECK() + +makes coverity happy +--- + lib/cib/cib_attrs.c | 5 ++++- + lib/fencing/st_client.c | 6 +++++- + lib/transition/unpack.c | 6 +++++- + 3 files changed, 14 insertions(+), 3 deletions(-) + +diff --git a/lib/cib/cib_attrs.c b/lib/cib/cib_attrs.c +index d929fa2..1e1a369 100644 +--- a/lib/cib/cib_attrs.c ++++ b/lib/cib/cib_attrs.c +@@ -93,7 +93,10 @@ find_nvpair_attr_delegate(cib_t * the_cib, const char *attr, const char *section + } + + xpath_string = calloc(1, xpath_max); +- CRM_CHECK(xpath_string != NULL, return -ENOMEM); ++ if (xpath_string == NULL) { ++ crm_perror(LOG_CRIT, "Could not create xpath"); ++ return -ENOMEM; ++ } + + attr_snprintf(xpath_string, offset, xpath_max, "%.128s", get_object_path(section)); + +diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c +index 6d4dce1..e1eda35 100644 +--- a/lib/fencing/st_client.c ++++ b/lib/fencing/st_client.c +@@ -353,7 +353,11 @@ create_level_registration_xml(const char *node, const char *pattern, + + crm_trace("Adding %s (%dc) at offset %d", device_list->value, adding, len); + list = realloc_safe(list, len + adding + 1); /* +1 EOS */ +- CRM_CHECK(list != NULL, free_xml(data); return NULL); ++ if (list == NULL) { ++ crm_perror(LOG_CRIT, "Could not create device list"); ++ free_xml(data); ++ return NULL; ++ } + sprintf(list + len, "%s%s", len?",":"", device_list->value); + len += adding; + } +diff --git a/lib/transition/unpack.c b/lib/transition/unpack.c +index 7a8c656..bdb01d1 100644 +--- a/lib/transition/unpack.c ++++ b/lib/transition/unpack.c +@@ -41,7 +41,11 @@ unpack_action(synapse_t * parent, xmlNode * xml_action) + } + + action = calloc(1, sizeof(crm_action_t)); +- CRM_CHECK(action != NULL, return NULL); ++ if (action == NULL) { ++ crm_perror(LOG_CRIT, "Cannot unpack action"); ++ crm_log_xml_trace(xml_action, "Lost action"); ++ return NULL; ++ } + + action->id = crm_parse_int(value, NULL); + action->type = action_type_rsc; +-- +1.8.3.1 + diff --git a/SOURCES/0100-Refactor-lrmd-handle-shutdown-a-little-more-cleanly.patch b/SOURCES/0100-Refactor-lrmd-handle-shutdown-a-little-more-cleanly.patch deleted file mode 100644 index faf4146..0000000 --- a/SOURCES/0100-Refactor-lrmd-handle-shutdown-a-little-more-cleanly.patch +++ /dev/null @@ -1,71 +0,0 @@ -From f289115b5a3693934bb3140725e2dc9aef3a6a13 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 22 Dec 2015 12:24:14 -0600 -Subject: [PATCH] Refactor: lrmd: handle shutdown a little more cleanly - ---- - lrmd/main.c | 33 +++++++++++++++++---------------- - 1 file changed, 17 insertions(+), 16 deletions(-) - -diff --git a/lrmd/main.c b/lrmd/main.c -index a3b7929..73519e2 100644 ---- a/lrmd/main.c -+++ b/lrmd/main.c -@@ -231,9 +231,23 @@ void - lrmd_shutdown(int nsig) - { - crm_info("Terminating with %d clients", crm_hash_table_size(client_connections)); -+ -+ if (stonith_api) { -+ stonith_api->cmds->remove_notification(stonith_api, T_STONITH_NOTIFY_DISCONNECT); -+ stonith_api->cmds->disconnect(stonith_api); -+ stonith_api_delete(stonith_api); -+ } - if (ipcs) { - mainloop_del_ipc_server(ipcs); - } -+ -+#ifdef ENABLE_PCMK_REMOTE -+ lrmd_tls_server_destroy(); -+ ipc_proxy_cleanup(); -+#endif -+ -+ crm_client_cleanup(); -+ g_hash_table_destroy(rsc_list); - crm_exit(pcmk_ok); - } - -@@ -255,7 +269,6 @@ static struct crm_option long_options[] = { - int - main(int argc, char **argv) - { -- int rc = 0; - int flag = 0; - int index = 0; - const char *option = NULL; -@@ -349,19 +362,7 @@ main(int argc, char **argv) - crm_info("Starting"); - g_main_run(mainloop); - -- mainloop_del_ipc_server(ipcs); --#ifdef ENABLE_PCMK_REMOTE -- lrmd_tls_server_destroy(); -- ipc_proxy_cleanup(); --#endif -- crm_client_cleanup(); -- -- g_hash_table_destroy(rsc_list); -- -- if (stonith_api) { -- stonith_api->cmds->disconnect(stonith_api); -- stonith_api_delete(stonith_api); -- } -- -- return rc; -+ /* should never get here */ -+ lrmd_shutdown(SIGTERM); -+ return pcmk_ok; - } --- -1.8.3.1 - diff --git a/SOURCES/0101-Refactor-lrmd-make-proxied-IPC-providers-clients-opa.patch b/SOURCES/0101-Refactor-lrmd-make-proxied-IPC-providers-clients-opa.patch deleted file mode 100644 index 43219a8..0000000 --- a/SOURCES/0101-Refactor-lrmd-make-proxied-IPC-providers-clients-opa.patch +++ /dev/null @@ -1,111 +0,0 @@ -From 68e7bb19d69a999443524ba79203979b35f54e83 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 22 Dec 2015 11:41:56 -0600 -Subject: [PATCH 101/105] Refactor: lrmd: make proxied IPC providers/clients - opaque - -This removes an unused extern declaration in crmd.h, -makes the ipc_providers and ipc_clients tables static to ipc_proxy.c, -and adds an ipc_proxy_get_provider() function for future use. ---- - crmd/crmd.h | 1 - - lrmd/ipc_proxy.c | 48 ++++++++++++++++++++++++++++++------------------ - lrmd/lrmd_private.h | 1 + - 3 files changed, 31 insertions(+), 19 deletions(-) - -diff --git a/crmd/crmd.h b/crmd/crmd.h -index 031f414..6039c85 100644 ---- a/crmd/crmd.h -+++ b/crmd/crmd.h -@@ -24,7 +24,6 @@ - # define DAEMON_DEBUG DEVEL_DIR"/"SYS_NAME".debug" - - extern GMainLoop *crmd_mainloop; --extern GHashTable *ipc_clients; - extern bool no_quorum_suicide_escalation; - - extern void crmd_metadata(void); -diff --git a/lrmd/ipc_proxy.c b/lrmd/ipc_proxy.c -index 84fb3ec..d95a396 100644 ---- a/lrmd/ipc_proxy.c -+++ b/lrmd/ipc_proxy.c -@@ -42,34 +42,46 @@ static qb_ipcs_service_t *crmd_ipcs = NULL; - static qb_ipcs_service_t *stonith_ipcs = NULL; - - /* ipc providers == crmd clients connecting from cluster nodes */ --GHashTable *ipc_providers; -+static GHashTable *ipc_providers = NULL; - /* ipc clients == things like cibadmin, crm_resource, connecting locally */ --GHashTable *ipc_clients; -+static GHashTable *ipc_clients = NULL; -+ -+/*! -+ * \internal -+ * \brief Get an IPC proxy provider -+ * -+ * \return Pointer to a provider if one exists, NULL otherwise -+ * -+ * \note Grab the first provider available; any provider will work, and usually -+ * there will be only one. These are client connections originating from a -+ * cluster node's crmd. -+ */ -+crm_client_t * -+ipc_proxy_get_provider() -+{ -+ if (ipc_providers) { -+ GHashTableIter iter; -+ gpointer key = NULL; -+ gpointer value = NULL; -+ -+ g_hash_table_iter_init(&iter, ipc_providers); -+ if (g_hash_table_iter_next(&iter, &key, &value)) { -+ return (crm_client_t*)value; -+ } -+ } -+ return NULL; -+} - - static int32_t - ipc_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid, const char *ipc_channel) - { -- void *key = NULL; -- void *value = NULL; - crm_client_t *client; -- crm_client_t *ipc_proxy = NULL; -- GHashTableIter iter; -+ crm_client_t *ipc_proxy = ipc_proxy_get_provider(); - xmlNode *msg; - - crm_trace("Connection %p on channel %s", c, ipc_channel); - -- if (g_hash_table_size(ipc_providers) == 0) { -- crm_err("No ipc providers available for uid %d gid %d", uid, gid); -- return -EREMOTEIO; -- } -- -- g_hash_table_iter_init(&iter, ipc_providers); -- if (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { -- /* grab the first provider available, any provider in this -- * table will work. Usually there will only be one. These are -- * lrmd client connections originating for a cluster node's crmd. */ -- ipc_proxy = value; -- } else { -+ if (ipc_proxy == NULL) { - crm_err("No ipc providers available for uid %d gid %d", uid, gid); - return -EREMOTEIO; - } -diff --git a/lrmd/lrmd_private.h b/lrmd/lrmd_private.h -index ddb1506..52f79b8 100644 ---- a/lrmd/lrmd_private.h -+++ b/lrmd/lrmd_private.h -@@ -103,6 +103,7 @@ void ipc_proxy_cleanup(void); - void ipc_proxy_add_provider(crm_client_t *client); - void ipc_proxy_remove_provider(crm_client_t *client); - void ipc_proxy_forward_client(crm_client_t *client, xmlNode *xml); -+crm_client_t *ipc_proxy_get_provider(void); - #endif - - #endif --- -1.8.3.1 - diff --git a/SOURCES/0102-Refactor-crmd-lrmd-liblrmd-use-defined-constants-for.patch b/SOURCES/0102-Refactor-crmd-lrmd-liblrmd-use-defined-constants-for.patch deleted file mode 100644 index b22ffd3..0000000 --- a/SOURCES/0102-Refactor-crmd-lrmd-liblrmd-use-defined-constants-for.patch +++ /dev/null @@ -1,181 +0,0 @@ -From 6239d1dd84a50585a30175978be7e6d8ffb0b155 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 22 Dec 2015 15:59:21 -0600 -Subject: [PATCH 102/105] Refactor: crmd,lrmd,liblrmd: use defined constants - for lrmd IPC operations - -Reduces chance of typos. ---- - crmd/lrm_state.c | 6 +++--- - include/crm/lrmd.h | 6 ++++++ - lib/lrmd/proxy_common.c | 6 +++--- - lrmd/ipc_proxy.c | 14 +++++++------- - lrmd/remote_ctl.c | 6 +++--- - 5 files changed, 22 insertions(+), 16 deletions(-) - -diff --git a/crmd/lrm_state.c b/crmd/lrm_state.c -index 0e52ff6..497d3f9 100644 ---- a/crmd/lrm_state.c -+++ b/crmd/lrm_state.c -@@ -481,7 +481,7 @@ remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) - crm_element_value_int(msg, F_LRMD_IPC_MSG_ID, &msg_id); - - /* This is msg from remote ipc client going to real ipc server */ -- if (safe_str_eq(op, "new")) { -+ if (safe_str_eq(op, LRMD_IPC_OP_NEW)) { - const char *channel = crm_element_value(msg, F_LRMD_IPC_IPC_SERVER); - - CRM_CHECK(channel != NULL, return); -@@ -490,10 +490,10 @@ remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) - remote_proxy_notify_destroy(lrmd, session); - } - crm_trace("new remote proxy client established to %s, session id %s", channel, session); -- } else if (safe_str_eq(op, "destroy")) { -+ } else if (safe_str_eq(op, LRMD_IPC_OP_DESTROY)) { - remote_proxy_end_session(session); - -- } else if (safe_str_eq(op, "request")) { -+ } else if (safe_str_eq(op, LRMD_IPC_OP_REQUEST)) { - int flags = 0; - xmlNode *request = get_message_xml(msg, F_LRMD_IPC_MSG); - const char *name = crm_element_value(msg, F_LRMD_IPC_CLIENT); -diff --git a/include/crm/lrmd.h b/include/crm/lrmd.h -index 5a3c6ce..5c74798 100644 ---- a/include/crm/lrmd.h -+++ b/include/crm/lrmd.h -@@ -90,6 +90,12 @@ typedef struct lrmd_key_value_s { - #define LRMD_OP_POKE "lrmd_rsc_poke" - #define LRMD_OP_NEW_CLIENT "lrmd_rsc_new_client" - -+#define LRMD_IPC_OP_NEW "new" -+#define LRMD_IPC_OP_DESTROY "destroy" -+#define LRMD_IPC_OP_EVENT "event" -+#define LRMD_IPC_OP_REQUEST "request" -+#define LRMD_IPC_OP_RESPONSE "response" -+ - #define F_LRMD_IPC_OP "lrmd_ipc_op" - #define F_LRMD_IPC_IPC_SERVER "lrmd_ipc_server" - #define F_LRMD_IPC_SESSION "lrmd_ipc_session" -diff --git a/lib/lrmd/proxy_common.c b/lib/lrmd/proxy_common.c -index 50c59c3..a0f5e62 100644 ---- a/lib/lrmd/proxy_common.c -+++ b/lib/lrmd/proxy_common.c -@@ -39,7 +39,7 @@ remote_proxy_notify_destroy(lrmd_t *lrmd, const char *session_id) - { - /* sending to the remote node that an ipc connection has been destroyed */ - xmlNode *msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); -- crm_xml_add(msg, F_LRMD_IPC_OP, "destroy"); -+ crm_xml_add(msg, F_LRMD_IPC_OP, LRMD_IPC_OP_DESTROY); - crm_xml_add(msg, F_LRMD_IPC_SESSION, session_id); - lrmd_internal_proxy_send(lrmd, msg); - free_xml(msg); -@@ -50,7 +50,7 @@ remote_proxy_relay_event(lrmd_t *lrmd, const char *session_id, xmlNode *msg) - { - /* sending to the remote node an event msg. */ - xmlNode *event = create_xml_node(NULL, T_LRMD_IPC_PROXY); -- crm_xml_add(event, F_LRMD_IPC_OP, "event"); -+ crm_xml_add(event, F_LRMD_IPC_OP, LRMD_IPC_OP_EVENT); - crm_xml_add(event, F_LRMD_IPC_SESSION, session_id); - add_message_xml(event, F_LRMD_IPC_MSG, msg); - crm_log_xml_explicit(event, "EventForProxy"); -@@ -63,7 +63,7 @@ remote_proxy_relay_response(lrmd_t *lrmd, const char *session_id, xmlNode *msg, - { - /* sending to the remote node a response msg. */ - xmlNode *response = create_xml_node(NULL, T_LRMD_IPC_PROXY); -- crm_xml_add(response, F_LRMD_IPC_OP, "response"); -+ crm_xml_add(response, F_LRMD_IPC_OP, LRMD_IPC_OP_RESPONSE); - crm_xml_add(response, F_LRMD_IPC_SESSION, session_id); - crm_xml_add_int(response, F_LRMD_IPC_MSG_ID, msg_id); - add_message_xml(response, F_LRMD_IPC_MSG, msg); -diff --git a/lrmd/ipc_proxy.c b/lrmd/ipc_proxy.c -index d95a396..164a9ff 100644 ---- a/lrmd/ipc_proxy.c -+++ b/lrmd/ipc_proxy.c -@@ -101,7 +101,7 @@ ipc_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid, const char *ipc - g_hash_table_insert(ipc_clients, client->id, client); - - msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); -- crm_xml_add(msg, F_LRMD_IPC_OP, "new"); -+ crm_xml_add(msg, F_LRMD_IPC_OP, LRMD_IPC_OP_NEW); - crm_xml_add(msg, F_LRMD_IPC_IPC_SERVER, ipc_channel); - crm_xml_add(msg, F_LRMD_IPC_SESSION, client->id); - lrmd_server_send_notify(ipc_proxy, msg); -@@ -157,7 +157,7 @@ ipc_proxy_forward_client(crm_client_t *ipc_proxy, xmlNode *xml) - - if (ipc_client == NULL) { - xmlNode *msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); -- crm_xml_add(msg, F_LRMD_IPC_OP, "destroy"); -+ crm_xml_add(msg, F_LRMD_IPC_OP, LRMD_IPC_OP_DESTROY); - crm_xml_add(msg, F_LRMD_IPC_SESSION, session); - lrmd_server_send_notify(ipc_proxy, msg); - free_xml(msg); -@@ -176,11 +176,11 @@ ipc_proxy_forward_client(crm_client_t *ipc_proxy, xmlNode *xml) - * and forwarding it to connection 1. - */ - -- if (safe_str_eq(msg_type, "event")) { -+ if (safe_str_eq(msg_type, LRMD_IPC_OP_EVENT)) { - crm_trace("Sending event to %s", ipc_client->id); - rc = crm_ipcs_send(ipc_client, 0, msg, crm_ipc_server_event); - -- } else if (safe_str_eq(msg_type, "response")) { -+ } else if (safe_str_eq(msg_type, LRMD_IPC_OP_RESPONSE)) { - int msg_id = 0; - - crm_element_value_int(xml, F_LRMD_IPC_MSG_ID, &msg_id); -@@ -190,7 +190,7 @@ ipc_proxy_forward_client(crm_client_t *ipc_proxy, xmlNode *xml) - CRM_LOG_ASSERT(msg_id == ipc_client->request_id); - ipc_client->request_id = 0; - -- } else if (safe_str_eq(msg_type, "destroy")) { -+ } else if (safe_str_eq(msg_type, LRMD_IPC_OP_DESTROY)) { - qb_ipcs_disconnect(ipc_client->ipcs); - - } else { -@@ -245,7 +245,7 @@ ipc_proxy_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) - client->request_id = id; - - msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); -- crm_xml_add(msg, F_LRMD_IPC_OP, "request"); -+ crm_xml_add(msg, F_LRMD_IPC_OP, LRMD_IPC_OP_REQUEST); - crm_xml_add(msg, F_LRMD_IPC_SESSION, client->id); - crm_xml_add(msg, F_LRMD_IPC_CLIENT, crm_client_name(client)); - crm_xml_add(msg, F_LRMD_IPC_USER, client->user); -@@ -275,7 +275,7 @@ ipc_proxy_closed(qb_ipcs_connection_t * c) - - if (ipc_proxy) { - xmlNode *msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); -- crm_xml_add(msg, F_LRMD_IPC_OP, "destroy"); -+ crm_xml_add(msg, F_LRMD_IPC_OP, LRMD_IPC_OP_DESTROY); - crm_xml_add(msg, F_LRMD_IPC_SESSION, client->id); - lrmd_server_send_notify(ipc_proxy, msg); - free_xml(msg); -diff --git a/lrmd/remote_ctl.c b/lrmd/remote_ctl.c -index ad85954..1983c88 100644 ---- a/lrmd/remote_ctl.c -+++ b/lrmd/remote_ctl.c -@@ -333,7 +333,7 @@ remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) - crm_element_value_int(msg, F_LRMD_IPC_MSG_ID, &msg_id); - - /* This is msg from remote ipc client going to real ipc server */ -- if (safe_str_eq(op, "new")) { -+ if (safe_str_eq(op, LRMD_IPC_OP_NEW)) { - const char *channel = crm_element_value(msg, F_LRMD_IPC_IPC_SERVER); - - CRM_CHECK(channel != NULL, return); -@@ -342,10 +342,10 @@ remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) - remote_proxy_notify_destroy(lrmd, session); - } - crm_info("new remote proxy client established to %s, session id %s", channel, session); -- } else if (safe_str_eq(op, "destroy")) { -+ } else if (safe_str_eq(op, LRMD_IPC_OP_DESTROY)) { - remote_proxy_end_session(session); - -- } else if (safe_str_eq(op, "request")) { -+ } else if (safe_str_eq(op, LRMD_IPC_OP_REQUEST)) { - int flags = 0; - xmlNode *request = get_message_xml(msg, F_LRMD_IPC_MSG); - const char *name = crm_element_value(msg, F_LRMD_IPC_CLIENT); --- -1.8.3.1 - diff --git a/SOURCES/0103-Test-cts-simulate-pacemaker_remote-failure-with-kill.patch b/SOURCES/0103-Test-cts-simulate-pacemaker_remote-failure-with-kill.patch deleted file mode 100644 index 3a1ecd0..0000000 --- a/SOURCES/0103-Test-cts-simulate-pacemaker_remote-failure-with-kill.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 48246b5916745a56cb0ceb7b4e148b9e587708fe Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 23 Dec 2015 14:36:51 -0600 -Subject: [PATCH 103/105] Test: cts: simulate pacemaker_remote failure with - kill - -Previously, failure was simulated by stopping pacemaker_remote, but -that will eventually cause a graceful stop rather than a failure, -so first kill the process. - -rebased commit to 1.1.14-rc5 ---- - cts/CTStests.py | 14 +++++++++++--- - 1 file changed, 11 insertions(+), 3 deletions(-) - -diff --git a/cts/CTStests.py b/cts/CTStests.py -index e6f3abe..fb1c5f2 100644 ---- a/cts/CTStests.py -+++ b/cts/CTStests.py -@@ -2764,6 +2764,14 @@ class RemoteDriver(CTSTest): - self.pcmk_started = 1 - break - -+ def kill_pcmk_remote(self, node): -+ """ Simulate a Pacemaker Remote daemon failure. """ -+ -+ # We kill the process to prevent a graceful stop, -+ # then stop it to prevent the OS from restarting it. -+ self.rsh(node, "killall -9 pacemaker_remoted") -+ self.stop_pcmk_remote(node) -+ - def start_metal(self, node): - pcmk_started = 0 - -@@ -2855,7 +2863,7 @@ class RemoteDriver(CTSTest): - - # force stop the pcmk remote daemon. this will result in fencing - self.debug("Force stopped active remote node") -- self.stop_pcmk_remote(node) -+ self.kill_pcmk_remote(node) - - self.debug("Waiting for remote node to be fenced.") - self.set_timer("remoteMetalFence") --- -1.8.3.1 - diff --git a/SOURCES/0104-Feature-lrmd-liblrmd-add-lrmd-IPC-operations-for-req.patch b/SOURCES/0104-Feature-lrmd-liblrmd-add-lrmd-IPC-operations-for-req.patch deleted file mode 100644 index f956820..0000000 --- a/SOURCES/0104-Feature-lrmd-liblrmd-add-lrmd-IPC-operations-for-req.patch +++ /dev/null @@ -1,119 +0,0 @@ -From 29cc1018cb98b1ff864f2aed090cb6b591963275 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 23 Dec 2015 15:01:48 -0600 -Subject: [PATCH 104/105] Feature: lrmd,liblrmd: add lrmd IPC operations for - requesting and acknowledging shutdown - -This adds two new lrmd IPC operations, LRMD_IPC_OP_SHUTDOWN_REQ and -LRMD_IPC_OP_SHUTDOWN_ACK, along with functions to send them. -This will support the ability to stop pacemaker_remote gracefully. - -At this point, no code uses these new operations. ---- - include/crm/lrmd.h | 2 ++ - include/crm_internal.h | 1 + - lib/lrmd/proxy_common.c | 14 ++++++++++++++ - lrmd/ipc_proxy.c | 24 ++++++++++++++++++++++++ - lrmd/lrmd_private.h | 1 + - 5 files changed, 42 insertions(+) - -diff --git a/include/crm/lrmd.h b/include/crm/lrmd.h -index 5c74798..6660fb9 100644 ---- a/include/crm/lrmd.h -+++ b/include/crm/lrmd.h -@@ -95,6 +95,8 @@ typedef struct lrmd_key_value_s { - #define LRMD_IPC_OP_EVENT "event" - #define LRMD_IPC_OP_REQUEST "request" - #define LRMD_IPC_OP_RESPONSE "response" -+#define LRMD_IPC_OP_SHUTDOWN_REQ "shutdown_req" -+#define LRMD_IPC_OP_SHUTDOWN_ACK "shutdown_ack" - - #define F_LRMD_IPC_OP "lrmd_ipc_op" - #define F_LRMD_IPC_IPC_SERVER "lrmd_ipc_server" -diff --git a/include/crm_internal.h b/include/crm_internal.h -index e0bbb06..c5fbcb7 100644 ---- a/include/crm_internal.h -+++ b/include/crm_internal.h -@@ -380,6 +380,7 @@ typedef struct remote_proxy_s { - - } remote_proxy_t; - void remote_proxy_notify_destroy(lrmd_t *lrmd, const char *session_id); -+void remote_proxy_ack_shutdown(lrmd_t *lrmd); - void remote_proxy_relay_event(lrmd_t *lrmd, const char *session_id, xmlNode *msg); - void remote_proxy_relay_response(lrmd_t *lrmd, const char *session_id, xmlNode *msg, int msg_id); - void remote_proxy_end_session(const char *session); -diff --git a/lib/lrmd/proxy_common.c b/lib/lrmd/proxy_common.c -index a0f5e62..eb17e4e 100644 ---- a/lib/lrmd/proxy_common.c -+++ b/lib/lrmd/proxy_common.c -@@ -45,6 +45,20 @@ remote_proxy_notify_destroy(lrmd_t *lrmd, const char *session_id) - free_xml(msg); - } - -+/*! -+ * \brief Send an acknowledgment of a remote proxy shutdown request. -+ * -+ * \param[in] lrmd Connection to proxy -+ */ -+void -+remote_proxy_ack_shutdown(lrmd_t *lrmd) -+{ -+ xmlNode *msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); -+ crm_xml_add(msg, F_LRMD_IPC_OP, LRMD_IPC_OP_SHUTDOWN_ACK); -+ lrmd_internal_proxy_send(lrmd, msg); -+ free_xml(msg); -+} -+ - void - remote_proxy_relay_event(lrmd_t *lrmd, const char *session_id, xmlNode *msg) - { -diff --git a/lrmd/ipc_proxy.c b/lrmd/ipc_proxy.c -index 164a9ff..9633a67 100644 ---- a/lrmd/ipc_proxy.c -+++ b/lrmd/ipc_proxy.c -@@ -259,6 +259,30 @@ ipc_proxy_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) - return 0; - } - -+/*! -+ * \internal -+ * \brief Notify a proxy provider that we wish to shut down -+ * -+ * \return 0 on success, -1 on error -+ */ -+int -+ipc_proxy_shutdown_req(crm_client_t *ipc_proxy) -+{ -+ xmlNode *msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); -+ int rc; -+ -+ crm_xml_add(msg, F_LRMD_IPC_OP, LRMD_IPC_OP_SHUTDOWN_REQ); -+ -+ /* We don't really have a session, but crmd needs this attribute -+ * to recognize this as proxy communication. -+ */ -+ crm_xml_add(msg, F_LRMD_IPC_SESSION, "0"); -+ -+ rc = (lrmd_server_send_notify(ipc_proxy, msg) < 0)? -1 : 0; -+ free_xml(msg); -+ return rc; -+} -+ - static int32_t - ipc_proxy_closed(qb_ipcs_connection_t * c) - { -diff --git a/lrmd/lrmd_private.h b/lrmd/lrmd_private.h -index 52f79b8..78f14c9 100644 ---- a/lrmd/lrmd_private.h -+++ b/lrmd/lrmd_private.h -@@ -104,6 +104,7 @@ void ipc_proxy_add_provider(crm_client_t *client); - void ipc_proxy_remove_provider(crm_client_t *client); - void ipc_proxy_forward_client(crm_client_t *client, xmlNode *xml); - crm_client_t *ipc_proxy_get_provider(void); -+int ipc_proxy_shutdown_req(crm_client_t *ipc_proxy); - #endif - - #endif --- -1.8.3.1 - diff --git a/SOURCES/0105-Feature-crmd-support-graceful-pacemaker_remote-stops.patch b/SOURCES/0105-Feature-crmd-support-graceful-pacemaker_remote-stops.patch deleted file mode 100644 index 523c089..0000000 --- a/SOURCES/0105-Feature-crmd-support-graceful-pacemaker_remote-stops.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 8eebc8a30a55645164d3c41acaf028dd75fab275 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 23 Dec 2015 15:18:38 -0600 -Subject: [PATCH 105/105] Feature: crmd: support graceful pacemaker_remote - stops - -NOT YET IMPLEMENTED. This just is a placeholder. ---- - crmd/lrm_state.c | 7 +++++++ - 1 file changed, 7 insertions(+) - -diff --git a/crmd/lrm_state.c b/crmd/lrm_state.c -index 497d3f9..5ee5b83 100644 ---- a/crmd/lrm_state.c -+++ b/crmd/lrm_state.c -@@ -478,6 +478,13 @@ remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) - CRM_CHECK(op != NULL, return); - CRM_CHECK(session != NULL, return); - -+ if (safe_str_eq(op, LRMD_IPC_OP_SHUTDOWN_REQ)) { -+ crm_warn("Graceful proxy shutdown not yet supported"); -+ /* TODO: uncomment this, then put node in standby: */ -+ /* remote_proxy_ack_shutdown(lrmd); */ -+ return; -+ } -+ - crm_element_value_int(msg, F_LRMD_IPC_MSG_ID, &msg_id); - - /* This is msg from remote ipc client going to real ipc server */ --- -1.8.3.1 - diff --git a/SOURCES/0106-Feature-pacemaker_remote-support-graceful-stops.patch b/SOURCES/0106-Feature-pacemaker_remote-support-graceful-stops.patch deleted file mode 100644 index 5fd1506..0000000 --- a/SOURCES/0106-Feature-pacemaker_remote-support-graceful-stops.patch +++ /dev/null @@ -1,263 +0,0 @@ -From c83dc10b975aa70a3da85dc2e63cec99a0b729b2 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Wed, 23 Dec 2015 15:19:28 -0600 -Subject: [PATCH] Feature: pacemaker_remote: support graceful stops - -When pacemaker_remote gets an interrupt signal, if there are any connected -proxy providers, it will send an lrmd IPC op for a shutdown request, -and stop accepting new provider connections. If the provider acknowledges the -request, pacemaker_remote will wait until all providers disconnect before -exiting itself. This gives the cluster the opportunity to stop any resources -running on the node that is shutting down. - -If the provider is an older version that does not support graceful stops, -pacemaker_remote will time out waiting for the ack, then exit immediately. - -Since we are now waiting for resources to exit, the systemd stop timeout -for pacemaker_remote has been raised to match pacemaker's. ---- - lrmd/ipc_proxy.c | 12 +++- - lrmd/lrmd_private.h | 4 +- - lrmd/main.c | 121 +++++++++++++++++++++++++++++++++++++-- - lrmd/pacemaker_remote.service.in | 4 +- - lrmd/tls_backend.c | 3 +- - 5 files changed, 135 insertions(+), 9 deletions(-) - -diff --git a/lrmd/ipc_proxy.c b/lrmd/ipc_proxy.c -index 9633a67..07c13ab 100644 ---- a/lrmd/ipc_proxy.c -+++ b/lrmd/ipc_proxy.c -@@ -152,9 +152,19 @@ ipc_proxy_forward_client(crm_client_t *ipc_proxy, xmlNode *xml) - const char *session = crm_element_value(xml, F_LRMD_IPC_SESSION); - const char *msg_type = crm_element_value(xml, F_LRMD_IPC_OP); - xmlNode *msg = get_message_xml(xml, F_LRMD_IPC_MSG); -- crm_client_t *ipc_client = crm_client_get_by_id(session); -+ crm_client_t *ipc_client; - int rc = 0; - -+ /* If the IPC provider is acknowledging our shutdown request, -+ * defuse the short exit timer to give the cluster time to -+ * stop any resources we're running. -+ */ -+ if (safe_str_eq(msg_type, LRMD_IPC_OP_SHUTDOWN_ACK)) { -+ handle_shutdown_ack(); -+ return; -+ } -+ -+ ipc_client = crm_client_get_by_id(session); - if (ipc_client == NULL) { - xmlNode *msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); - crm_xml_add(msg, F_LRMD_IPC_OP, LRMD_IPC_OP_DESTROY); -diff --git a/lrmd/lrmd_private.h b/lrmd/lrmd_private.h -index 78f14c9..29146f5 100644 ---- a/lrmd/lrmd_private.h -+++ b/lrmd/lrmd_private.h -@@ -80,7 +80,9 @@ void process_lrmd_message(crm_client_t * client, uint32_t id, xmlNode * request) - - void free_rsc(gpointer data); - --void lrmd_shutdown(int nsig); -+void handle_shutdown_ack(void); -+ -+void lrmd_client_destroy(crm_client_t *client); - - void client_disconnect_cleanup(const char *client_id); - -diff --git a/lrmd/main.c b/lrmd/main.c -index 73519e2..98a1412 100644 ---- a/lrmd/main.c -+++ b/lrmd/main.c -@@ -40,6 +40,16 @@ static qb_ipcs_service_t *ipcs = NULL; - stonith_t *stonith_api = NULL; - int lrmd_call_id = 0; - -+#ifdef ENABLE_PCMK_REMOTE -+/* whether shutdown request has been sent */ -+static volatile sig_atomic_t shutting_down = FALSE; -+ -+/* timer for waiting for acknowledgment of shutdown request */ -+static volatile guint shutdown_ack_timer = 0; -+ -+static gboolean lrmd_exit(gpointer data); -+#endif -+ - static void - stonith_connection_destroy_cb(stonith_t * st, stonith_event_t * e) - { -@@ -151,6 +161,27 @@ lrmd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) - return 0; - } - -+/*! -+ * \internal -+ * \brief Free a client connection, and exit if appropriate -+ * -+ * \param[in] client Client connection to free -+ */ -+void -+lrmd_client_destroy(crm_client_t *client) -+{ -+ crm_client_destroy(client); -+ -+#ifdef ENABLE_PCMK_REMOTE -+ /* If we were waiting to shut down, we can now safely do so -+ * if there are no more proxied IPC providers -+ */ -+ if (shutting_down && (ipc_proxy_get_provider() == NULL)) { -+ lrmd_exit(NULL); -+ } -+#endif -+} -+ - static int32_t - lrmd_ipc_closed(qb_ipcs_connection_t * c) - { -@@ -165,7 +196,7 @@ lrmd_ipc_closed(qb_ipcs_connection_t * c) - #ifdef ENABLE_PCMK_REMOTE - ipc_proxy_remove_provider(client); - #endif -- crm_client_destroy(client); -+ lrmd_client_destroy(client); - return 0; - } - -@@ -227,8 +258,17 @@ lrmd_server_send_notify(crm_client_t * client, xmlNode * msg) - return -1; - } - --void --lrmd_shutdown(int nsig) -+/*! -+ * \internal -+ * \brief Clean up and exit immediately -+ * -+ * \param[in] data Ignored -+ * -+ * \return Doesn't return -+ * \note This can be used as a timer callback. -+ */ -+static gboolean -+lrmd_exit(gpointer data) - { - crm_info("Terminating with %d clients", crm_hash_table_size(client_connections)); - -@@ -249,6 +289,79 @@ lrmd_shutdown(int nsig) - crm_client_cleanup(); - g_hash_table_destroy(rsc_list); - crm_exit(pcmk_ok); -+ return FALSE; -+} -+ -+/*! -+ * \internal -+ * \brief Request cluster shutdown if appropriate, otherwise exit immediately -+ * -+ * \param[in] nsig Signal that caused invocation (ignored) -+ */ -+static void -+lrmd_shutdown(int nsig) -+{ -+#ifdef ENABLE_PCMK_REMOTE -+ crm_client_t *ipc_proxy = ipc_proxy_get_provider(); -+ -+ /* If there are active proxied IPC providers, then we may be running -+ * resources, so notify the cluster that we wish to shut down. -+ */ -+ if (ipc_proxy) { -+ if (shutting_down) { -+ crm_trace("Shutdown already in progress"); -+ return; -+ } -+ -+ crm_info("Sending shutdown request to cluster"); -+ if (ipc_proxy_shutdown_req(ipc_proxy) < 0) { -+ crm_crit("Shutdown request failed, exiting immediately"); -+ -+ } else { -+ /* We requested a shutdown. Now, we need to wait for an -+ * acknowledgement from the proxy host (which ensures the proxy host -+ * supports shutdown requests), then wait for all proxy hosts to -+ * disconnect (which ensures that all resources have been stopped). -+ */ -+ shutting_down = TRUE; -+ -+ /* Stop accepting new proxy connections */ -+ lrmd_tls_server_destroy(); -+ -+ /* Older crmd versions will never acknowledge our request, so set a -+ * fairly short timeout to exit quickly in that case. If we get the -+ * ack, we'll defuse this timer. -+ */ -+ shutdown_ack_timer = g_timeout_add_seconds(20, lrmd_exit, NULL); -+ -+ /* Currently, we let the OS kill us if the clients don't disconnect -+ * in a reasonable time. We could instead set a long timer here -+ * (shorter than what the OS is likely to use) and exit immediately -+ * if it pops. -+ */ -+ return; -+ } -+ } -+#endif -+ lrmd_exit(NULL); -+} -+ -+/*! -+ * \internal -+ * \brief Defuse short exit timer if shutting down -+ */ -+void handle_shutdown_ack() -+{ -+#ifdef ENABLE_PCMK_REMOTE -+ if (shutting_down) { -+ crm_info("Received shutdown ack"); -+ if (shutdown_ack_timer > 0) { -+ g_source_remove(shutdown_ack_timer); -+ } -+ return; -+ } -+#endif -+ crm_debug("Ignoring unexpected shutdown ack"); - } - - /* *INDENT-OFF* */ -@@ -363,6 +476,6 @@ main(int argc, char **argv) - g_main_run(mainloop); - - /* should never get here */ -- lrmd_shutdown(SIGTERM); -+ lrmd_exit(NULL); - return pcmk_ok; - } -diff --git a/lrmd/pacemaker_remote.service.in b/lrmd/pacemaker_remote.service.in -index 15e61fb..7252976 100644 ---- a/lrmd/pacemaker_remote.service.in -+++ b/lrmd/pacemaker_remote.service.in -@@ -13,7 +13,9 @@ EnvironmentFile=-/etc/sysconfig/pacemaker - - ExecStart=@sbindir@/pacemaker_remoted - --TimeoutStopSec=30s -+# Pacemaker Remote can exit only after all managed services have shut down; -+# an HA database could conceivably take even longer than this -+TimeoutStopSec=30min - TimeoutStartSec=30s - - # Restart options include: no, on-success, on-failure, on-abort or always -diff --git a/lrmd/tls_backend.c b/lrmd/tls_backend.c -index df5387f..7b8ef9d 100644 ---- a/lrmd/tls_backend.c -+++ b/lrmd/tls_backend.c -@@ -163,8 +163,7 @@ lrmd_remote_client_destroy(gpointer user_data) - close(csock); - } - -- crm_client_destroy(client); -- -+ lrmd_client_destroy(client); - return; - } - --- -1.8.3.1 - diff --git a/SOURCES/0107-Feature-PE-Honor-the-shutdown-transient-attributes-f.patch b/SOURCES/0107-Feature-PE-Honor-the-shutdown-transient-attributes-f.patch deleted file mode 100644 index 5a897a9..0000000 --- a/SOURCES/0107-Feature-PE-Honor-the-shutdown-transient-attributes-f.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 0edc762e63801b92b5a931c10446287f9b3d6406 Mon Sep 17 00:00:00 2001 -From: Andrew Beekhof -Date: Wed, 6 Jan 2016 15:15:24 +1100 -Subject: [PATCH 107/108] Feature: PE: Honor the shutdown transient attributes - for remote nodes - ---- - lib/pengine/unpack.c | 11 +++++++++++ - 1 file changed, 11 insertions(+) - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 6a125b0..75d9dd8 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -1145,6 +1145,8 @@ unpack_remote_status(xmlNode * status, pe_working_set_t * data_set) - { - const char *id = NULL; - const char *uname = NULL; -+ const char *shutdown = NULL; -+ - GListPtr gIter = NULL; - - xmlNode *state = NULL; -@@ -1190,6 +1192,15 @@ unpack_remote_status(xmlNode * status, pe_working_set_t * data_set) - attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE); - add_node_attrs(attrs, this_node, TRUE, data_set); - -+ shutdown = g_hash_table_lookup(this_node->details->attrs, XML_CIB_ATTR_SHUTDOWN); -+ if (shutdown != NULL && safe_str_neq("0", shutdown)) { -+ resource_t *rsc = this_node->details->remote_rsc; -+ -+ crm_info("Node %s is shutting down", this_node->details->uname); -+ this_node->details->shutdown = TRUE; -+ rsc->next_role = RSC_ROLE_STOPPED; -+ } -+ - if (crm_is_true(g_hash_table_lookup(this_node->details->attrs, "standby"))) { - crm_info("Node %s is in standby-mode", this_node->details->uname); - this_node->details->standby = TRUE; --- -1.8.3.1 - diff --git a/SOURCES/0108-Feature-crmd-Set-the-shutdown-transient-attribute-in.patch b/SOURCES/0108-Feature-crmd-Set-the-shutdown-transient-attribute-in.patch deleted file mode 100644 index d4f7292..0000000 --- a/SOURCES/0108-Feature-crmd-Set-the-shutdown-transient-attribute-in.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 0a883a90eeeee4c9b156023da693d4ff93a9d69a Mon Sep 17 00:00:00 2001 -From: Andrew Beekhof -Date: Wed, 6 Jan 2016 15:17:06 +1100 -Subject: [PATCH 108/108] Feature: crmd: Set the shutdown transient attribute - in response to LRMD_IPC_OP_SHUTDOWN_REQ from remote nodes - ---- - crmd/lrm_state.c | 22 ++++++++++++++-------- - 1 file changed, 14 insertions(+), 8 deletions(-) - -diff --git a/crmd/lrm_state.c b/crmd/lrm_state.c -index 5ee5b83..7833ebb 100644 ---- a/crmd/lrm_state.c -+++ b/crmd/lrm_state.c -@@ -478,17 +478,23 @@ remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) - CRM_CHECK(op != NULL, return); - CRM_CHECK(session != NULL, return); - -+ crm_element_value_int(msg, F_LRMD_IPC_MSG_ID, &msg_id); -+ /* This is msg from remote ipc client going to real ipc server */ -+ - if (safe_str_eq(op, LRMD_IPC_OP_SHUTDOWN_REQ)) { -- crm_warn("Graceful proxy shutdown not yet supported"); -- /* TODO: uncomment this, then put node in standby: */ -- /* remote_proxy_ack_shutdown(lrmd); */ -- return; -- } -+ char *now_s = NULL; -+ time_t now = time(NULL); - -- crm_element_value_int(msg, F_LRMD_IPC_MSG_ID, &msg_id); -+ crm_warn("Graceful proxy shutdown of %s not yet tested", lrm_state->node_name); - -- /* This is msg from remote ipc client going to real ipc server */ -- if (safe_str_eq(op, LRMD_IPC_OP_NEW)) { -+ now_s = crm_itoa(now); -+ update_attrd(lrm_state->node_name, XML_CIB_ATTR_SHUTDOWN, now_s, NULL, FALSE); -+ free(now_s); -+ -+ remote_proxy_ack_shutdown(lrmd); -+ return; -+ -+ } else if (safe_str_eq(op, LRMD_IPC_OP_NEW)) { - const char *channel = crm_element_value(msg, F_LRMD_IPC_IPC_SERVER); - - CRM_CHECK(channel != NULL, return); --- -1.8.3.1 - diff --git a/SOURCES/0109-Fix-attrd-Hook-up-the-client-name-so-we-can-track-re.patch b/SOURCES/0109-Fix-attrd-Hook-up-the-client-name-so-we-can-track-re.patch deleted file mode 100644 index d13d0c4..0000000 --- a/SOURCES/0109-Fix-attrd-Hook-up-the-client-name-so-we-can-track-re.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 6968a8b8c48a63af8c813ed47652662cbce837be Mon Sep 17 00:00:00 2001 -From: Andrew Beekhof -Date: Thu, 7 Jan 2016 11:28:14 +1100 -Subject: [PATCH] Fix: attrd: Hook up the client name so we can track requests - ---- - attrd/main.c | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/attrd/main.c b/attrd/main.c -index 069e9fa..0198396 100644 ---- a/attrd/main.c -+++ b/attrd/main.c -@@ -226,6 +226,11 @@ attrd_ipc_dispatch(qb_ipcs_connection_t * c, void *data, size_t size) - - op = crm_element_value(xml, F_ATTRD_TASK); - -+ if (client->name == NULL) { -+ const char *value = crm_element_value(xml, F_ORIG); -+ client->name = crm_strdup_printf("%s.%d", value?value:"unknown", client->pid); -+ } -+ - if (safe_str_eq(op, ATTRD_OP_PEER_REMOVE)) { - attrd_send_ack(client, id, flags); - attrd_client_peer_remove(client->name, xml); --- -1.8.3.1 - diff --git a/SOURCES/011-resend-shutdown.patch b/SOURCES/011-resend-shutdown.patch new file mode 100644 index 0000000..7993933 --- /dev/null +++ b/SOURCES/011-resend-shutdown.patch @@ -0,0 +1,82 @@ +From 19d273ae5831f40e1816b138a56260ddd3448a4e Mon Sep 17 00:00:00 2001 +From: Andrew Beekhof +Date: Fri, 12 Aug 2016 10:03:37 +1000 +Subject: [PATCH] Fix: crmd: Resend the shutdown request if the DC forgets + +As seen in: + https://bugzilla.redhat.com/show_bug.cgi?id=1310486 + +Scenario needs very poor timing and some bad luck: + +1. Start a node wait for it to become the DC +2. Start a second node +3. Tell the second node to stop while it is in the process of +negotiating with the DC. + Specifically just after do_cl_join_finalize_respond() is called on +the second node. +4. Cross your fingers that somehow the shutdown=0 update makes it to +attrd _after_ the DC sets shutdown=${large int} + +Under these conditions, the request to shut down will be lost and the DC +will feel free to start services on the second node. +--- + crmd/lrm.c | 36 ++++++++++++++++++++++++------------ + 1 file changed, 24 insertions(+), 12 deletions(-) + +diff --git a/crmd/lrm.c b/crmd/lrm.c +index c987e49..3e32f33 100644 +--- a/crmd/lrm.c ++++ b/crmd/lrm.c +@@ -2025,6 +2025,7 @@ do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operat + fsa_data_t *msg_data = NULL; + const char *transition = NULL; + gboolean stop_recurring = FALSE; ++ bool send_nack = FALSE; + + CRM_CHECK(rsc != NULL, return); + CRM_CHECK(operation != NULL, return); +@@ -2075,18 +2076,29 @@ do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operat + /* now do the op */ + crm_info("Performing key=%s op=%s_%s_%d", transition, rsc->id, operation, op->interval); + +- if (fsa_state != S_NOT_DC && fsa_state != S_POLICY_ENGINE && fsa_state != S_TRANSITION_ENGINE) { +- if (safe_str_neq(operation, "fail") +- && safe_str_neq(operation, CRMD_ACTION_STOP)) { +- crm_info("Discarding attempt to perform action %s on %s in state %s", +- operation, rsc->id, fsa_state2string(fsa_state)); +- op->rc = CRM_DIRECT_NACK_RC; +- op->op_status = PCMK_LRM_OP_ERROR; +- send_direct_ack(NULL, NULL, rsc, op, rsc->id); +- lrmd_free_event(op); +- free(op_id); +- return; +- } ++ if (is_set(fsa_input_register, R_SHUTDOWN) && safe_str_eq(operation, RSC_START)) { ++ register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL); ++ send_nack = TRUE; ++ ++ } else if (fsa_state != S_NOT_DC ++ && fsa_state != S_POLICY_ENGINE /* Recalculating */ ++ && fsa_state != S_TRANSITION_ENGINE ++ && safe_str_neq(operation, "fail") ++ && safe_str_neq(operation, CRMD_ACTION_STOP)) { ++ send_nack = TRUE; ++ } ++ ++ if(send_nack) { ++ crm_notice("Discarding attempt to perform action %s on %s in state %s (shutdown=%s)", ++ operation, rsc->id, fsa_state2string(fsa_state), ++ is_set(fsa_input_register, R_SHUTDOWN)?"true":"false"); ++ ++ op->rc = CRM_DIRECT_NACK_RC; ++ op->op_status = PCMK_LRM_OP_ERROR; ++ send_direct_ack(NULL, NULL, rsc, op, rsc->id); ++ lrmd_free_event(op); ++ free(op_id); ++ return; + } + + op_id = generate_op_key(rsc->id, op->op_type, op->interval); +-- +1.8.3.1 + diff --git a/SOURCES/0111-Log-crmd-Graceful-proxy-shutdown-is-now-tested.patch b/SOURCES/0111-Log-crmd-Graceful-proxy-shutdown-is-now-tested.patch deleted file mode 100644 index 227a7f5..0000000 --- a/SOURCES/0111-Log-crmd-Graceful-proxy-shutdown-is-now-tested.patch +++ /dev/null @@ -1,25 +0,0 @@ -From e2c7f8d987f090a3bb2ba3ec0e007a6dbf138ad2 Mon Sep 17 00:00:00 2001 -From: Andrew Beekhof -Date: Mon, 11 Jan 2016 08:28:24 +1100 -Subject: [PATCH] Log: crmd: Graceful proxy shutdown is now tested - ---- - crmd/lrm_state.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/crmd/lrm_state.c b/crmd/lrm_state.c -index 7833ebb..62e1c76 100644 ---- a/crmd/lrm_state.c -+++ b/crmd/lrm_state.c -@@ -485,7 +485,7 @@ remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) - char *now_s = NULL; - time_t now = time(NULL); - -- crm_warn("Graceful proxy shutdown of %s not yet tested", lrm_state->node_name); -+ crm_notice("Graceful proxy shutdown of %s", lrm_state->node_name); - - now_s = crm_itoa(now); - update_attrd(lrm_state->node_name, XML_CIB_ATTR_SHUTDOWN, now_s, NULL, FALSE); --- -1.8.3.1 - diff --git a/SOURCES/0112-Fix-crmd-set-remote-flag.patch b/SOURCES/0112-Fix-crmd-set-remote-flag.patch deleted file mode 100644 index a40862a..0000000 --- a/SOURCES/0112-Fix-crmd-set-remote-flag.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 615b0784516933106a8446272bc3c043b0a0d50a Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 26 Jan 2016 14:04:30 -0600 -Subject: [PATCH] Fix: crmd: set remote flag when gracefully shutting down - remote nodes - ---- - crmd/lrm_state.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/crmd/lrm_state.c b/crmd/lrm_state.c -index 62e1c76..7ea4e8a 100644 ---- a/crmd/lrm_state.c -+++ b/crmd/lrm_state.c -@@ -488,7 +488,7 @@ remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) - crm_notice("Graceful proxy shutdown of %s", lrm_state->node_name); - - now_s = crm_itoa(now); -- update_attrd(lrm_state->node_name, XML_CIB_ATTR_SHUTDOWN, now_s, NULL, FALSE); -+ update_attrd(lrm_state->node_name, XML_CIB_ATTR_SHUTDOWN, now_s, NULL, TRUE); - free(now_s); - - remote_proxy_ack_shutdown(lrmd); --- -1.8.3.1 - diff --git a/SOURCES/0113-Fix-attrd-correct-peer-cache.patch b/SOURCES/0113-Fix-attrd-correct-peer-cache.patch deleted file mode 100644 index e8d7343..0000000 --- a/SOURCES/0113-Fix-attrd-correct-peer-cache.patch +++ /dev/null @@ -1,59 +0,0 @@ -From 942efa4e8edcfdbdce42505c30c18cacd1d8fff0 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Tue, 26 Jan 2016 15:55:46 -0600 -Subject: [PATCH] Fix: attrd: ensure remote nodes are in correct peer cache - -If attrd receives an update for an unknown node name, it assumes the unknown -node is a cluster node, and adds it to the cluster peer cache. - -Previously, if the name was later used for a remote node, that would prevent -its attributes from being written to the CIB. Now, when an attribute is -received for a remote node, attrd will purge any inactive cluster peer cache -entry before adding the node to the remote peer cache. ---- - attrd/commands.c | 22 +++++++++++++++++----- - 1 file changed, 17 insertions(+), 5 deletions(-) - -diff --git a/attrd/commands.c b/attrd/commands.c -index 28e4a81..b2cc83a 100644 ---- a/attrd/commands.c -+++ b/attrd/commands.c -@@ -634,6 +634,22 @@ static attribute_value_t * - attrd_lookup_or_create_value(GHashTable *values, const char *host, xmlNode *xml) - { - attribute_value_t *v = g_hash_table_lookup(values, host); -+ int is_remote = 0; -+ -+ crm_element_value_int(xml, F_ATTRD_IS_REMOTE, &is_remote); -+ if (is_remote) { -+ /* If we previously assumed this node was an unseen cluster node, -+ * remove its entry from the cluster peer cache. -+ */ -+ crm_node_t *dup = crm_find_peer(0, host); -+ -+ if (dup && (dup->uuid == NULL)) { -+ reap_crm_member(0, host); -+ } -+ -+ /* Ensure this host is in the remote peer cache */ -+ crm_remote_peer_cache_add(host); -+ } - - if (v == NULL) { - v = calloc(1, sizeof(attribute_value_t)); -@@ -642,11 +658,7 @@ attrd_lookup_or_create_value(GHashTable *values, const char *host, xmlNode *xml) - v->nodename = strdup(host); - CRM_ASSERT(v->nodename != NULL); - -- crm_element_value_int(xml, F_ATTRD_IS_REMOTE, &v->is_remote); -- if (v->is_remote == TRUE) { -- crm_remote_peer_cache_add(host); -- } -- -+ v->is_remote = is_remote; - g_hash_table_replace(values, v->nodename, v); - } - return(v); --- -1.8.3.1 - diff --git a/SOURCES/0114-clear-remote-node-transient.patch b/SOURCES/0114-clear-remote-node-transient.patch deleted file mode 100644 index 9555dbf..0000000 --- a/SOURCES/0114-clear-remote-node-transient.patch +++ /dev/null @@ -1,12 +0,0 @@ ---- a/crmd/remote_lrmd_ra.c 2016-06-11 14:59:00.658320677 -0500 -+++ b/crmd/remote_lrmd_ra.c 2016-06-11 15:18:40.284860853 -0500 -@@ -484,6 +484,9 @@ - update_attrd_remote_node_removed(lrm_state->node_name, NULL); - /* delete pending ops when ever the remote connection is intentionally stopped */ - g_hash_table_remove_all(lrm_state->pending_ops); -+ /* Purge node's operation history and transient attributes from CIB */ -+ erase_status_tag(lrm_state->node_name, XML_CIB_TAG_LRM, cib_quorum_override); -+ erase_status_tag(lrm_state->node_name, XML_TAG_TRANSIENT_NODEATTRS, cib_quorum_override); - } else { - /* we no longer hold the history if this connection has been migrated */ - lrm_state_reset_tables(lrm_state); diff --git a/SOURCES/0115-crm_resource-restart-fixes.patch b/SOURCES/0115-crm_resource-restart-fixes.patch deleted file mode 100644 index 75575ad..0000000 --- a/SOURCES/0115-crm_resource-restart-fixes.patch +++ /dev/null @@ -1,383 +0,0 @@ -From 020ff3810c12bbc6ef6ec212958871bb36b5859a Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Sun, 12 Jun 2016 13:37:59 -0500 -Subject: [PATCH 1/5] Fix: tools: correctly count starting resources when doing - crm_resource --restart - ---- - tools/crm_resource_runtime.c | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index 82856ad..b246e34 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -1178,7 +1178,7 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - dump_list(list_delta, "Delta"); - } - -- crm_trace("%d (was %d) resources remaining", before, g_list_length(list_delta)); -+ crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before); - if(before == g_list_length(list_delta)) { - /* aborted during stop phase, print the contents of list_delta */ - fprintf(stderr, "Could not complete shutdown of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta)); -@@ -1209,6 +1209,7 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - - step_timeout_s = timeout / sleep_interval; - while(g_list_length(list_delta) > 0) { -+ before = g_list_length(list_delta); - if(timeout_ms == 0) { - step_timeout_s = max_delay_in(&data_set, list_delta) / sleep_interval; - } -@@ -1241,7 +1242,7 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - goto failure; - } - -- } while(g_list_length(list_delta) > 0); -+ } - - free(rsc_id); - return pcmk_ok; --- -1.8.3.1 - - -From 06cc891dd16b1e1b8a004ed364a9f46c64127ffd Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Sun, 12 Jun 2016 15:05:04 -0500 -Subject: [PATCH 2/5] Fix: tools: remember any existing target-role when doing - crm_resource --restart - ---- - tools/crm_resource_runtime.c | 26 +++++++++++++++++++++++++- - 1 file changed, 25 insertions(+), 1 deletion(-) - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index b246e34..1567559 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -1070,6 +1070,7 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - - bool is_clone = FALSE; - char *rsc_id = NULL; -+ char *orig_target_role = NULL; - - GList *list_delta = NULL; - GList *target_active = NULL; -@@ -1088,7 +1089,9 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - return -ENXIO; - } - -+ /* We might set the target-role meta-attribute */ - attr_set_type = XML_TAG_META_SETS; -+ - rsc_id = strdup(rsc->id); - if(rsc->variant > pe_group) { - is_clone = TRUE; -@@ -1127,10 +1130,20 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - dump_list(current_active, "Origin"); - - if(is_clone && host) { -+ /* Stop the clone instance by banning it from the host */ - BE_QUIET = TRUE; - rc = cli_resource_ban(rsc_id, host, NULL, cib); - - } else { -+ /* Stop the resource by setting target-role to Stopped. -+ * Remember any existing target-role so we can restore it later -+ * (though it only makes any difference if it's Slave). -+ */ -+ char *lookup_id = clone_strip(rsc->id); -+ -+ find_resource_attr(cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL, -+ NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role); -+ free(lookup_id); - rc = cli_resource_update_attribute(rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, RSC_STOPPED, FALSE, cib, &data_set); - } - if(rc != pcmk_ok) { -@@ -1192,6 +1205,13 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - if(is_clone && host) { - rc = cli_resource_clear(rsc_id, host, NULL, cib); - -+ } else if (orig_target_role) { -+ rc = cli_resource_update_attribute(rsc_id, NULL, NULL, -+ XML_RSC_ATTR_TARGET_ROLE, -+ orig_target_role, FALSE, cib, -+ &data_set); -+ free(orig_target_role); -+ orig_target_role = NULL; - } else { - rc = cli_resource_delete_attribute(rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, cib, &data_set); - } -@@ -1250,7 +1270,11 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - failure: - if(is_clone && host) { - cli_resource_clear(rsc_id, host, NULL, cib); -- -+ } else if (orig_target_role) { -+ cli_resource_update_attribute(rsc_id, NULL, NULL, -+ XML_RSC_ATTR_TARGET_ROLE, -+ orig_target_role, FALSE, cib, &data_set); -+ free(orig_target_role); - } else { - cli_resource_delete_attribute(rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, cib, &data_set); - } --- -1.8.3.1 - - -From aaed9569272a5d4704aede32d9d1cf5d76085e6b Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Sun, 12 Jun 2016 15:36:56 -0500 -Subject: [PATCH 3/5] Fix: tools: avoid memory leaks in crm_resource --restart - ---- - tools/crm_resource_runtime.c | 39 +++++++++++++++++++++++++++++++++++++-- - 1 file changed, 37 insertions(+), 2 deletions(-) - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index 1567559..6126e3c 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -1148,6 +1148,12 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - } - if(rc != pcmk_ok) { - fprintf(stderr, "Could not set target-role for %s: %s (%d)\n", rsc_id, pcmk_strerror(rc), rc); -+ if (current_active) { -+ g_list_free_full(current_active, free); -+ } -+ if (restart_target_active) { -+ g_list_free_full(restart_target_active, free); -+ } - free(rsc_id); - return crm_exit(rc); - } -@@ -1185,7 +1191,11 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - goto failure; - } - -+ if (current_active) { -+ g_list_free_full(current_active, free); -+ } - current_active = get_active_resources(host, &data_set); -+ g_list_free(list_delta); - list_delta = subtract_lists(current_active, target_active); - dump_list(current_active, "Current"); - dump_list(list_delta, "Delta"); -@@ -1222,7 +1232,13 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - return crm_exit(rc); - } - -+ if (target_active) { -+ g_list_free_full(target_active, free); -+ } - target_active = restart_target_active; -+ if (list_delta) { -+ g_list_free(list_delta); -+ } - list_delta = subtract_lists(target_active, current_active); - fprintf(stdout, "Waiting for %d resources to start again:\n", g_list_length(list_delta)); - display_list(list_delta, " * "); -@@ -1248,7 +1264,11 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - goto failure; - } - -+ if (current_active) { -+ g_list_free_full(current_active, free); -+ } - current_active = get_active_resources(host, &data_set); -+ g_list_free(list_delta); - list_delta = subtract_lists(target_active, current_active); - dump_list(current_active, "Current"); - dump_list(list_delta, "Delta"); -@@ -1264,8 +1284,8 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - - } - -- free(rsc_id); -- return pcmk_ok; -+ rc = pcmk_ok; -+ goto done; - - failure: - if(is_clone && host) { -@@ -1278,6 +1298,21 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - } else { - cli_resource_delete_attribute(rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, cib, &data_set); - } -+ -+done: -+ if (list_delta) { -+ g_list_free(list_delta); -+ } -+ if (current_active) { -+ g_list_free_full(current_active, free); -+ } -+ if (target_active && (target_active != restart_target_active)) { -+ g_list_free_full(target_active, free); -+ } -+ if (restart_target_active) { -+ g_list_free_full(restart_target_active, free); -+ } -+ cleanup_alloc_calculations(&data_set); - free(rsc_id); - return rc; - } --- -1.8.3.1 - - -From 847723f7175a0f008eeebe2d3b333fea4570a228 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Sun, 12 Jun 2016 16:10:00 -0500 -Subject: [PATCH 4/5] Fix: tools: don't assume all resources restart on same - node with crm_resource --restart - ---- - tools/crm_resource_runtime.c | 14 +++++++++++--- - 1 file changed, 11 insertions(+), 3 deletions(-) - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index 6126e3c..753ba2d 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -1044,6 +1044,9 @@ max_delay_in(pe_working_set_t * data_set, GList *resources) - return 5 + (max_delay / 1000); - } - -+#define waiting_for_starts(d, r, h) ((g_list_length(d) > 0) || \ -+ (resource_is_running_on((r), (h)) == FALSE)) -+ - /*! - * \internal - * \brief Restart a resource (on a particular host if requested). -@@ -1244,14 +1247,15 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - display_list(list_delta, " * "); - - step_timeout_s = timeout / sleep_interval; -- while(g_list_length(list_delta) > 0) { -+ while (waiting_for_starts(list_delta, rsc, host)) { - before = g_list_length(list_delta); - if(timeout_ms == 0) { - step_timeout_s = max_delay_in(&data_set, list_delta) / sleep_interval; - } - - /* We probably don't need the entire step timeout */ -- for(lpc = 0; lpc < step_timeout_s && g_list_length(list_delta) > 0; lpc++) { -+ for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) { -+ - sleep(sleep_interval); - if(timeout) { - timeout -= sleep_interval; -@@ -1267,7 +1271,11 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - if (current_active) { - g_list_free_full(current_active, free); - } -- current_active = get_active_resources(host, &data_set); -+ -+ /* It's OK if dependent resources moved to a different node, -+ * so we check active resources on all nodes. -+ */ -+ current_active = get_active_resources(NULL, &data_set); - g_list_free(list_delta); - list_delta = subtract_lists(target_active, current_active); - dump_list(current_active, "Current"); --- -1.8.3.1 - - -From f5afdc1badbe38781d049c86e8a2f51b17636072 Mon Sep 17 00:00:00 2001 -From: Ken Gaillot -Date: Mon, 13 Jun 2016 16:12:28 -0500 -Subject: [PATCH 5/5] Fix: tools: properly handle crm_resource --restart with a - resource in a group - ---- - tools/crm_resource_runtime.c | 34 +++++++++++++++++++++++++--------- - 1 file changed, 25 insertions(+), 9 deletions(-) - -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index 753ba2d..b714a96 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -817,19 +817,35 @@ static bool resource_is_running_on(resource_t *rsc, const char *host) - return found; - } - --static GList *get_active_resources(const char *host, pe_working_set_t *data_set) -+/*! -+ * \internal -+ * \brief Create a list of all resources active on host from a given list -+ * -+ * \param[in] host Name of host to check whether resources are active -+ * \param[in] rsc_list List of resources to check -+ * -+ * \return New list of resources from list that are active on host -+ */ -+static GList * -+get_active_resources(const char *host, GList *rsc_list) - { - GList *rIter = NULL; - GList *active = NULL; - -- for (rIter = data_set->resources; rIter != NULL; rIter = rIter->next) { -+ for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) { - resource_t *rsc = (resource_t *) rIter->data; - -- if(resource_is_running_on(rsc, host)) { -+ /* Expand groups to their members, because if we're restarting a member -+ * other than the first, we can't otherwise tell which resources are -+ * stopping and starting. -+ */ -+ if (rsc->variant == pe_group) { -+ active = g_list_concat(active, -+ get_active_resources(host, rsc->children)); -+ } else if (resource_is_running_on(rsc, host)) { - active = g_list_append(active, strdup(rsc->id)); - } - } -- - return active; - } - -@@ -1127,8 +1143,8 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - return rc; - } - -- restart_target_active = get_active_resources(host, &data_set); -- current_active = get_active_resources(host, &data_set); -+ restart_target_active = get_active_resources(host, data_set.resources); -+ current_active = get_active_resources(host, data_set.resources); - - dump_list(current_active, "Origin"); - -@@ -1167,7 +1183,7 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - goto failure; - } - -- target_active = get_active_resources(host, &data_set); -+ target_active = get_active_resources(host, data_set.resources); - dump_list(target_active, "Target"); - - list_delta = subtract_lists(current_active, target_active); -@@ -1197,7 +1213,7 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - if (current_active) { - g_list_free_full(current_active, free); - } -- current_active = get_active_resources(host, &data_set); -+ current_active = get_active_resources(host, data_set.resources); - g_list_free(list_delta); - list_delta = subtract_lists(current_active, target_active); - dump_list(current_active, "Current"); -@@ -1275,7 +1291,7 @@ cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * - /* It's OK if dependent resources moved to a different node, - * so we check active resources on all nodes. - */ -- current_active = get_active_resources(NULL, &data_set); -+ current_active = get_active_resources(NULL, data_set.resources); - g_list_free(list_delta); - list_delta = subtract_lists(target_active, current_active); - dump_list(current_active, "Current"); --- -1.8.3.1 - diff --git a/SOURCES/0116-unrunnable-clones.patch b/SOURCES/0116-unrunnable-clones.patch deleted file mode 100644 index d95baed..0000000 --- a/SOURCES/0116-unrunnable-clones.patch +++ /dev/null @@ -1,2802 +0,0 @@ -From 6951b7e5ea5cdee4dba890159d1efca61bb1101d Mon Sep 17 00:00:00 2001 -From: Andrew Beekhof -Date: Fri, 17 Jun 2016 10:55:18 +1000 -Subject: [PATCH 1/2] Fix: PE: Correctly update the dependant actions of - un-runnable clones - ---- - pengine/clone.c | 18 +++++++++++++----- - 1 file changed, 13 insertions(+), 5 deletions(-) - -diff --git a/pengine/clone.c b/pengine/clone.c -index fd47018..74b4ea6 100644 ---- a/pengine/clone.c -+++ b/pengine/clone.c -@@ -927,9 +927,11 @@ clone_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) - } - } - --static void -+static bool - assign_node(resource_t * rsc, node_t * node, gboolean force) - { -+ bool changed = FALSE; -+ - if (rsc->children) { - - GListPtr gIter = rsc->children; -@@ -937,12 +939,17 @@ assign_node(resource_t * rsc, node_t * node, gboolean force) - for (; gIter != NULL; gIter = gIter->next) { - resource_t *child_rsc = (resource_t *) gIter->data; - -- native_assign_node(child_rsc, NULL, node, force); -+ changed |= native_assign_node(child_rsc, NULL, node, force); - } - -- return; -+ return changed; -+ } -+ if (rsc->allocated_to != NULL) { -+ changed = true; - } -+ - native_assign_node(rsc, NULL, node, force); -+ return changed; - } - - static resource_t * -@@ -1264,8 +1271,9 @@ clone_update_actions_interleave(action_t * first, action_t * then, node_t * node - */ - if (type & (pe_order_runnable_left | pe_order_implies_then) /* Mandatory */ ) { - pe_rsc_info(then->rsc, "Inhibiting %s from being active", then_child->id); -- assign_node(then_child, NULL, TRUE); -- /* TODO - set changed correctly? */ -+ if(assign_node(then_child, NULL, TRUE)) { -+ changed |= pe_graph_updated_then; -+ } - } - - } else { --- -1.8.3.1 - - -From 80be44c2f6920dc82476f2461a3c897613608d02 Mon Sep 17 00:00:00 2001 -From: Andrew Beekhof -Date: Fri, 17 Jun 2016 10:58:09 +1000 -Subject: [PATCH 2/2] Test: PE: Correctly update the dependant actions of - un-runnable clones - ---- - pengine/regression.sh | 1 + - pengine/test10/unrunnable-2.dot | 495 +++++++++++++++ - pengine/test10/unrunnable-2.exp | 1 + - pengine/test10/unrunnable-2.scores | 814 ++++++++++++++++++++++++ - pengine/test10/unrunnable-2.summary | 175 ++++++ - pengine/test10/unrunnable-2.xml | 1189 +++++++++++++++++++++++++++++++++++ - 6 files changed, 2675 insertions(+) - create mode 100644 pengine/test10/unrunnable-2.dot - create mode 100644 pengine/test10/unrunnable-2.exp - create mode 100644 pengine/test10/unrunnable-2.scores - create mode 100644 pengine/test10/unrunnable-2.summary - create mode 100644 pengine/test10/unrunnable-2.xml - -diff --git a/pengine/regression.sh b/pengine/regression.sh -index 8a2864c..f86d0f1 100755 ---- a/pengine/regression.sh -+++ b/pengine/regression.sh -@@ -484,6 +484,7 @@ do_test 1360 "OSDL #1360 - Clone stickiness" - do_test 1484 "OSDL #1484 - on_fail=stop" - do_test 1494 "OSDL #1494 - Clone stability" - do_test unrunnable-1 "Unrunnable" -+do_test unrunnable-2 "Unrunnable 2" - do_test stonith-0 "Stonith loop - 1" - do_test stonith-1 "Stonith loop - 2" - do_test stonith-2 "Stonith loop - 3" -diff --git a/pengine/test10/unrunnable-2.dot b/pengine/test10/unrunnable-2.dot -new file mode 100644 -index 0000000..bfdabe8 ---- /dev/null -+++ b/pengine/test10/unrunnable-2.dot -@@ -0,0 +1,495 @@ -+digraph "g" { -+"delay-clone_running_0" -> "openstack-aodh-evaluator-clone_start_0" [ style = dashed] -+"delay-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"delay-clone_start_0" -> "delay-clone_running_0" [ style = dashed] -+"delay-clone_start_0" -> "delay:1_start_0 overcloud-controller-0" [ style = dashed] -+"delay-clone_start_0" -> "delay:2_start_0 overcloud-controller-1" [ style = dashed] -+"delay-clone_start_0" -> "delay_start_0 overcloud-controller-2" [ style = dashed] -+"delay-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"delay:1_monitor_10000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"delay:1_start_0 overcloud-controller-0" -> "delay-clone_running_0" [ style = dashed] -+"delay:1_start_0 overcloud-controller-0" -> "delay:1_monitor_10000 overcloud-controller-0" [ style = dashed] -+"delay:1_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"delay:2_monitor_10000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"delay:2_start_0 overcloud-controller-1" -> "delay-clone_running_0" [ style = dashed] -+"delay:2_start_0 overcloud-controller-1" -> "delay:2_monitor_10000 overcloud-controller-1" [ style = dashed] -+"delay:2_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"delay_monitor_10000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"delay_start_0 overcloud-controller-2" -> "delay-clone_running_0" [ style = dashed] -+"delay_start_0 overcloud-controller-2" -> "delay_monitor_10000 overcloud-controller-2" [ style = dashed] -+"delay_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"neutron-dhcp-agent-clone_running_0" -> "neutron-l3-agent-clone_start_0" [ style = dashed] -+"neutron-dhcp-agent-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"neutron-dhcp-agent-clone_start_0" -> "neutron-dhcp-agent-clone_running_0" [ style = dashed] -+"neutron-dhcp-agent-clone_start_0" -> "neutron-dhcp-agent:1_start_0 overcloud-controller-0" [ style = dashed] -+"neutron-dhcp-agent-clone_start_0" -> "neutron-dhcp-agent:2_start_0 overcloud-controller-1" [ style = dashed] -+"neutron-dhcp-agent-clone_start_0" -> "neutron-dhcp-agent_start_0 overcloud-controller-2" [ style = dashed] -+"neutron-dhcp-agent-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"neutron-dhcp-agent:1_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"neutron-dhcp-agent:1_start_0 overcloud-controller-0" -> "neutron-dhcp-agent-clone_running_0" [ style = dashed] -+"neutron-dhcp-agent:1_start_0 overcloud-controller-0" -> "neutron-dhcp-agent:1_monitor_60000 overcloud-controller-0" [ style = dashed] -+"neutron-dhcp-agent:1_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"neutron-dhcp-agent:2_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"neutron-dhcp-agent:2_start_0 overcloud-controller-1" -> "neutron-dhcp-agent-clone_running_0" [ style = dashed] -+"neutron-dhcp-agent:2_start_0 overcloud-controller-1" -> "neutron-dhcp-agent:2_monitor_60000 overcloud-controller-1" [ style = dashed] -+"neutron-dhcp-agent:2_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"neutron-dhcp-agent_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"neutron-dhcp-agent_start_0 overcloud-controller-2" -> "neutron-dhcp-agent-clone_running_0" [ style = dashed] -+"neutron-dhcp-agent_start_0 overcloud-controller-2" -> "neutron-dhcp-agent_monitor_60000 overcloud-controller-2" [ style = dashed] -+"neutron-dhcp-agent_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"neutron-l3-agent-clone_running_0" -> "neutron-metadata-agent-clone_start_0" [ style = dashed] -+"neutron-l3-agent-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"neutron-l3-agent-clone_start_0" -> "neutron-l3-agent-clone_running_0" [ style = dashed] -+"neutron-l3-agent-clone_start_0" -> "neutron-l3-agent:1_start_0 overcloud-controller-0" [ style = dashed] -+"neutron-l3-agent-clone_start_0" -> "neutron-l3-agent:2_start_0 overcloud-controller-1" [ style = dashed] -+"neutron-l3-agent-clone_start_0" -> "neutron-l3-agent_start_0 overcloud-controller-2" [ style = dashed] -+"neutron-l3-agent-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"neutron-l3-agent:1_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"neutron-l3-agent:1_start_0 overcloud-controller-0" -> "neutron-l3-agent-clone_running_0" [ style = dashed] -+"neutron-l3-agent:1_start_0 overcloud-controller-0" -> "neutron-l3-agent:1_monitor_60000 overcloud-controller-0" [ style = dashed] -+"neutron-l3-agent:1_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"neutron-l3-agent:2_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"neutron-l3-agent:2_start_0 overcloud-controller-1" -> "neutron-l3-agent-clone_running_0" [ style = dashed] -+"neutron-l3-agent:2_start_0 overcloud-controller-1" -> "neutron-l3-agent:2_monitor_60000 overcloud-controller-1" [ style = dashed] -+"neutron-l3-agent:2_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"neutron-l3-agent_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"neutron-l3-agent_start_0 overcloud-controller-2" -> "neutron-l3-agent-clone_running_0" [ style = dashed] -+"neutron-l3-agent_start_0 overcloud-controller-2" -> "neutron-l3-agent_monitor_60000 overcloud-controller-2" [ style = dashed] -+"neutron-l3-agent_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"neutron-metadata-agent-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"neutron-metadata-agent-clone_start_0" -> "neutron-metadata-agent-clone_running_0" [ style = dashed] -+"neutron-metadata-agent-clone_start_0" -> "neutron-metadata-agent:1_start_0 overcloud-controller-1" [ style = dashed] -+"neutron-metadata-agent-clone_start_0" -> "neutron-metadata-agent:2_start_0 overcloud-controller-2" [ style = dashed] -+"neutron-metadata-agent-clone_start_0" -> "neutron-metadata-agent_start_0 overcloud-controller-0" [ style = dashed] -+"neutron-metadata-agent-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"neutron-metadata-agent:1_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"neutron-metadata-agent:1_start_0 overcloud-controller-1" -> "neutron-metadata-agent-clone_running_0" [ style = dashed] -+"neutron-metadata-agent:1_start_0 overcloud-controller-1" -> "neutron-metadata-agent:1_monitor_60000 overcloud-controller-1" [ style = dashed] -+"neutron-metadata-agent:1_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"neutron-metadata-agent:2_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"neutron-metadata-agent:2_start_0 overcloud-controller-2" -> "neutron-metadata-agent-clone_running_0" [ style = dashed] -+"neutron-metadata-agent:2_start_0 overcloud-controller-2" -> "neutron-metadata-agent:2_monitor_60000 overcloud-controller-2" [ style = dashed] -+"neutron-metadata-agent:2_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"neutron-metadata-agent_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"neutron-metadata-agent_start_0 overcloud-controller-0" -> "neutron-metadata-agent-clone_running_0" [ style = dashed] -+"neutron-metadata-agent_start_0 overcloud-controller-0" -> "neutron-metadata-agent_monitor_60000 overcloud-controller-0" [ style = dashed] -+"neutron-metadata-agent_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"neutron-openvswitch-agent-clone_running_0" -> "neutron-dhcp-agent-clone_start_0" [ style = dashed] -+"neutron-openvswitch-agent-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"neutron-openvswitch-agent-clone_start_0" -> "neutron-openvswitch-agent-clone_running_0" [ style = dashed] -+"neutron-openvswitch-agent-clone_start_0" -> "neutron-openvswitch-agent:1_start_0 overcloud-controller-0" [ style = dashed] -+"neutron-openvswitch-agent-clone_start_0" -> "neutron-openvswitch-agent:2_start_0 overcloud-controller-1" [ style = dashed] -+"neutron-openvswitch-agent-clone_start_0" -> "neutron-openvswitch-agent_start_0 overcloud-controller-2" [ style = dashed] -+"neutron-openvswitch-agent-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"neutron-openvswitch-agent:1_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"neutron-openvswitch-agent:1_start_0 overcloud-controller-0" -> "neutron-openvswitch-agent-clone_running_0" [ style = dashed] -+"neutron-openvswitch-agent:1_start_0 overcloud-controller-0" -> "neutron-openvswitch-agent:1_monitor_60000 overcloud-controller-0" [ style = dashed] -+"neutron-openvswitch-agent:1_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"neutron-openvswitch-agent:2_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"neutron-openvswitch-agent:2_start_0 overcloud-controller-1" -> "neutron-openvswitch-agent-clone_running_0" [ style = dashed] -+"neutron-openvswitch-agent:2_start_0 overcloud-controller-1" -> "neutron-openvswitch-agent:2_monitor_60000 overcloud-controller-1" [ style = dashed] -+"neutron-openvswitch-agent:2_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"neutron-openvswitch-agent_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"neutron-openvswitch-agent_start_0 overcloud-controller-2" -> "neutron-openvswitch-agent-clone_running_0" [ style = dashed] -+"neutron-openvswitch-agent_start_0 overcloud-controller-2" -> "neutron-openvswitch-agent_monitor_60000 overcloud-controller-2" [ style = dashed] -+"neutron-openvswitch-agent_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"neutron-server-clone_running_0" -> "neutron-openvswitch-agent-clone_start_0" [ style = dashed] -+"neutron-server-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"neutron-server-clone_start_0" -> "neutron-server-clone_running_0" [ style = dashed] -+"neutron-server-clone_start_0" -> "neutron-server:1_start_0 overcloud-controller-1" [ style = dashed] -+"neutron-server-clone_start_0" -> "neutron-server:2_start_0 overcloud-controller-2" [ style = dashed] -+"neutron-server-clone_start_0" -> "neutron-server_start_0 overcloud-controller-0" [ style = dashed] -+"neutron-server-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"neutron-server:1_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"neutron-server:1_start_0 overcloud-controller-1" -> "neutron-server-clone_running_0" [ style = dashed] -+"neutron-server:1_start_0 overcloud-controller-1" -> "neutron-server:1_monitor_60000 overcloud-controller-1" [ style = dashed] -+"neutron-server:1_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"neutron-server:2_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"neutron-server:2_start_0 overcloud-controller-2" -> "neutron-server-clone_running_0" [ style = dashed] -+"neutron-server:2_start_0 overcloud-controller-2" -> "neutron-server:2_monitor_60000 overcloud-controller-2" [ style = dashed] -+"neutron-server:2_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"neutron-server_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"neutron-server_start_0 overcloud-controller-0" -> "neutron-server-clone_running_0" [ style = dashed] -+"neutron-server_start_0 overcloud-controller-0" -> "neutron-server_monitor_60000 overcloud-controller-0" [ style = dashed] -+"neutron-server_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-aodh-evaluator-clone_running_0" -> "openstack-aodh-notifier-clone_start_0" [ style = dashed] -+"openstack-aodh-evaluator-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-aodh-evaluator-clone_start_0" -> "openstack-aodh-evaluator-clone_running_0" [ style = dashed] -+"openstack-aodh-evaluator-clone_start_0" -> "openstack-aodh-evaluator:1_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-aodh-evaluator-clone_start_0" -> "openstack-aodh-evaluator:2_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-aodh-evaluator-clone_start_0" -> "openstack-aodh-evaluator_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-aodh-evaluator-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-aodh-evaluator:1_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-aodh-evaluator:1_start_0 overcloud-controller-0" -> "openstack-aodh-evaluator-clone_running_0" [ style = dashed] -+"openstack-aodh-evaluator:1_start_0 overcloud-controller-0" -> "openstack-aodh-evaluator:1_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-aodh-evaluator:1_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-aodh-evaluator:2_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-aodh-evaluator:2_start_0 overcloud-controller-1" -> "openstack-aodh-evaluator-clone_running_0" [ style = dashed] -+"openstack-aodh-evaluator:2_start_0 overcloud-controller-1" -> "openstack-aodh-evaluator:2_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-aodh-evaluator:2_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-aodh-evaluator_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-aodh-evaluator_start_0 overcloud-controller-2" -> "openstack-aodh-evaluator-clone_running_0" [ style = dashed] -+"openstack-aodh-evaluator_start_0 overcloud-controller-2" -> "openstack-aodh-evaluator_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-aodh-evaluator_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-aodh-notifier-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-aodh-notifier-clone_start_0" -> "openstack-aodh-notifier-clone_running_0" [ style = dashed] -+"openstack-aodh-notifier-clone_start_0" -> "openstack-aodh-notifier:1_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-aodh-notifier-clone_start_0" -> "openstack-aodh-notifier:2_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-aodh-notifier-clone_start_0" -> "openstack-aodh-notifier_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-aodh-notifier-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-aodh-notifier:1_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-aodh-notifier:1_start_0 overcloud-controller-1" -> "openstack-aodh-notifier-clone_running_0" [ style = dashed] -+"openstack-aodh-notifier:1_start_0 overcloud-controller-1" -> "openstack-aodh-notifier:1_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-aodh-notifier:1_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-aodh-notifier:2_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-aodh-notifier:2_start_0 overcloud-controller-2" -> "openstack-aodh-notifier-clone_running_0" [ style = dashed] -+"openstack-aodh-notifier:2_start_0 overcloud-controller-2" -> "openstack-aodh-notifier:2_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-aodh-notifier:2_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-aodh-notifier_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-aodh-notifier_start_0 overcloud-controller-0" -> "openstack-aodh-notifier-clone_running_0" [ style = dashed] -+"openstack-aodh-notifier_start_0 overcloud-controller-0" -> "openstack-aodh-notifier_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-aodh-notifier_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-api-clone_running_0" -> "delay-clone_start_0" [ style = dashed] -+"openstack-ceilometer-api-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-ceilometer-api-clone_start_0" -> "openstack-ceilometer-api-clone_running_0" [ style = dashed] -+"openstack-ceilometer-api-clone_start_0" -> "openstack-ceilometer-api:1_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-ceilometer-api-clone_start_0" -> "openstack-ceilometer-api:2_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-ceilometer-api-clone_start_0" -> "openstack-ceilometer-api_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-ceilometer-api-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-ceilometer-api:1_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-api:1_start_0 overcloud-controller-0" -> "openstack-ceilometer-api-clone_running_0" [ style = dashed] -+"openstack-ceilometer-api:1_start_0 overcloud-controller-0" -> "openstack-ceilometer-api:1_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-ceilometer-api:1_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-api:2_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-api:2_start_0 overcloud-controller-1" -> "openstack-ceilometer-api-clone_running_0" [ style = dashed] -+"openstack-ceilometer-api:2_start_0 overcloud-controller-1" -> "openstack-ceilometer-api:2_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-ceilometer-api:2_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-api_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-api_start_0 overcloud-controller-2" -> "openstack-ceilometer-api-clone_running_0" [ style = dashed] -+"openstack-ceilometer-api_start_0 overcloud-controller-2" -> "openstack-ceilometer-api_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-ceilometer-api_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-central-clone_running_0" -> "openstack-ceilometer-collector-clone_start_0" [ style = dashed] -+"openstack-ceilometer-central-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-ceilometer-central-clone_start_0" -> "openstack-ceilometer-central-clone_running_0" [ style = dashed] -+"openstack-ceilometer-central-clone_start_0" -> "openstack-ceilometer-central:1_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-ceilometer-central-clone_start_0" -> "openstack-ceilometer-central:2_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-ceilometer-central-clone_start_0" -> "openstack-ceilometer-central_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-ceilometer-central-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-ceilometer-central:1_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-central:1_start_0 overcloud-controller-1" -> "openstack-ceilometer-central-clone_running_0" [ style = dashed] -+"openstack-ceilometer-central:1_start_0 overcloud-controller-1" -> "openstack-ceilometer-central:1_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-ceilometer-central:1_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-central:2_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-central:2_start_0 overcloud-controller-2" -> "openstack-ceilometer-central-clone_running_0" [ style = dashed] -+"openstack-ceilometer-central:2_start_0 overcloud-controller-2" -> "openstack-ceilometer-central:2_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-ceilometer-central:2_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-central_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-central_start_0 overcloud-controller-0" -> "openstack-ceilometer-central-clone_running_0" [ style = dashed] -+"openstack-ceilometer-central_start_0 overcloud-controller-0" -> "openstack-ceilometer-central_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-ceilometer-central_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-collector-clone_running_0" -> "openstack-ceilometer-api-clone_start_0" [ style = dashed] -+"openstack-ceilometer-collector-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-ceilometer-collector-clone_start_0" -> "openstack-ceilometer-collector-clone_running_0" [ style = dashed] -+"openstack-ceilometer-collector-clone_start_0" -> "openstack-ceilometer-collector:1_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-ceilometer-collector-clone_start_0" -> "openstack-ceilometer-collector:2_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-ceilometer-collector-clone_start_0" -> "openstack-ceilometer-collector_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-ceilometer-collector-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-ceilometer-collector:1_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-collector:1_start_0 overcloud-controller-0" -> "openstack-ceilometer-collector-clone_running_0" [ style = dashed] -+"openstack-ceilometer-collector:1_start_0 overcloud-controller-0" -> "openstack-ceilometer-collector:1_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-ceilometer-collector:1_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-collector:2_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-collector:2_start_0 overcloud-controller-1" -> "openstack-ceilometer-collector-clone_running_0" [ style = dashed] -+"openstack-ceilometer-collector:2_start_0 overcloud-controller-1" -> "openstack-ceilometer-collector:2_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-ceilometer-collector:2_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-collector_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-ceilometer-collector_start_0 overcloud-controller-2" -> "openstack-ceilometer-collector-clone_running_0" [ style = dashed] -+"openstack-ceilometer-collector_start_0 overcloud-controller-2" -> "openstack-ceilometer-collector_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-ceilometer-collector_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-cinder-api-clone_running_0" -> "openstack-cinder-scheduler-clone_start_0" [ style = dashed] -+"openstack-cinder-api-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-cinder-api-clone_start_0" -> "openstack-cinder-api-clone_running_0" [ style = dashed] -+"openstack-cinder-api-clone_start_0" -> "openstack-cinder-api:1_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-cinder-api-clone_start_0" -> "openstack-cinder-api:2_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-cinder-api-clone_start_0" -> "openstack-cinder-api_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-cinder-api-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-cinder-api:1_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-cinder-api:1_start_0 overcloud-controller-0" -> "openstack-cinder-api-clone_running_0" [ style = dashed] -+"openstack-cinder-api:1_start_0 overcloud-controller-0" -> "openstack-cinder-api:1_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-cinder-api:1_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-cinder-api:2_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-cinder-api:2_start_0 overcloud-controller-1" -> "openstack-cinder-api-clone_running_0" [ style = dashed] -+"openstack-cinder-api:2_start_0 overcloud-controller-1" -> "openstack-cinder-api:2_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-cinder-api:2_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-cinder-api_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-cinder-api_start_0 overcloud-controller-2" -> "openstack-cinder-api-clone_running_0" [ style = dashed] -+"openstack-cinder-api_start_0 overcloud-controller-2" -> "openstack-cinder-api_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-cinder-api_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-cinder-scheduler-clone_running_0" -> "openstack-cinder-volume_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-cinder-scheduler-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-cinder-scheduler-clone_start_0" -> "openstack-cinder-scheduler-clone_running_0" [ style = dashed] -+"openstack-cinder-scheduler-clone_start_0" -> "openstack-cinder-scheduler:1_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-cinder-scheduler-clone_start_0" -> "openstack-cinder-scheduler:2_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-cinder-scheduler-clone_start_0" -> "openstack-cinder-scheduler_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-cinder-scheduler-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-cinder-scheduler:1_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-cinder-scheduler:1_start_0 overcloud-controller-0" -> "openstack-cinder-scheduler-clone_running_0" [ style = dashed] -+"openstack-cinder-scheduler:1_start_0 overcloud-controller-0" -> "openstack-cinder-scheduler:1_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-cinder-scheduler:1_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-cinder-scheduler:2_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-cinder-scheduler:2_start_0 overcloud-controller-1" -> "openstack-cinder-scheduler-clone_running_0" [ style = dashed] -+"openstack-cinder-scheduler:2_start_0 overcloud-controller-1" -> "openstack-cinder-scheduler:2_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-cinder-scheduler:2_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-cinder-scheduler_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-cinder-scheduler_start_0 overcloud-controller-2" -> "openstack-cinder-scheduler-clone_running_0" [ style = dashed] -+"openstack-cinder-scheduler_start_0 overcloud-controller-2" -> "openstack-cinder-scheduler_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-cinder-scheduler_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-cinder-volume_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-cinder-volume_start_0 overcloud-controller-2" -> "openstack-cinder-volume_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-cinder-volume_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-glance-api-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-glance-api-clone_start_0" -> "openstack-glance-api-clone_running_0" [ style = dashed] -+"openstack-glance-api-clone_start_0" -> "openstack-glance-api:1_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-glance-api-clone_start_0" -> "openstack-glance-api:2_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-glance-api-clone_start_0" -> "openstack-glance-api_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-glance-api-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-glance-api:1_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-glance-api:1_start_0 overcloud-controller-1" -> "openstack-glance-api-clone_running_0" [ style = dashed] -+"openstack-glance-api:1_start_0 overcloud-controller-1" -> "openstack-glance-api:1_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-glance-api:1_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-glance-api:2_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-glance-api:2_start_0 overcloud-controller-2" -> "openstack-glance-api-clone_running_0" [ style = dashed] -+"openstack-glance-api:2_start_0 overcloud-controller-2" -> "openstack-glance-api:2_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-glance-api:2_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-glance-api_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-glance-api_start_0 overcloud-controller-0" -> "openstack-glance-api-clone_running_0" [ style = dashed] -+"openstack-glance-api_start_0 overcloud-controller-0" -> "openstack-glance-api_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-glance-api_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-glance-registry-clone_running_0" -> "openstack-glance-api-clone_start_0" [ style = dashed] -+"openstack-glance-registry-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-glance-registry-clone_start_0" -> "openstack-glance-registry-clone_running_0" [ style = dashed] -+"openstack-glance-registry-clone_start_0" -> "openstack-glance-registry:1_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-glance-registry-clone_start_0" -> "openstack-glance-registry:2_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-glance-registry-clone_start_0" -> "openstack-glance-registry_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-glance-registry-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-glance-registry:1_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-glance-registry:1_start_0 overcloud-controller-1" -> "openstack-glance-registry-clone_running_0" [ style = dashed] -+"openstack-glance-registry:1_start_0 overcloud-controller-1" -> "openstack-glance-registry:1_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-glance-registry:1_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-glance-registry:2_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-glance-registry:2_start_0 overcloud-controller-2" -> "openstack-glance-registry-clone_running_0" [ style = dashed] -+"openstack-glance-registry:2_start_0 overcloud-controller-2" -> "openstack-glance-registry:2_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-glance-registry:2_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-glance-registry_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-glance-registry_start_0 overcloud-controller-0" -> "openstack-glance-registry-clone_running_0" [ style = dashed] -+"openstack-glance-registry_start_0 overcloud-controller-0" -> "openstack-glance-registry_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-glance-registry_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api-cfn-clone_running_0" -> "openstack-heat-api-cloudwatch-clone_start_0" [ style = dashed] -+"openstack-heat-api-cfn-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-heat-api-cfn-clone_start_0" -> "openstack-heat-api-cfn-clone_running_0" [ style = dashed] -+"openstack-heat-api-cfn-clone_start_0" -> "openstack-heat-api-cfn:1_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-heat-api-cfn-clone_start_0" -> "openstack-heat-api-cfn:2_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-heat-api-cfn-clone_start_0" -> "openstack-heat-api-cfn_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-heat-api-cfn-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-heat-api-cfn:1_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api-cfn:1_start_0 overcloud-controller-1" -> "openstack-heat-api-cfn-clone_running_0" [ style = dashed] -+"openstack-heat-api-cfn:1_start_0 overcloud-controller-1" -> "openstack-heat-api-cfn:1_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-heat-api-cfn:1_start_0 overcloud-controller-1" -> "openstack-heat-api-cloudwatch:1_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-heat-api-cfn:1_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api-cfn:2_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api-cfn:2_start_0 overcloud-controller-2" -> "openstack-heat-api-cfn-clone_running_0" [ style = dashed] -+"openstack-heat-api-cfn:2_start_0 overcloud-controller-2" -> "openstack-heat-api-cfn:2_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-heat-api-cfn:2_start_0 overcloud-controller-2" -> "openstack-heat-api-cloudwatch:2_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-heat-api-cfn:2_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api-cfn_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api-cfn_start_0 overcloud-controller-0" -> "openstack-heat-api-cfn-clone_running_0" [ style = dashed] -+"openstack-heat-api-cfn_start_0 overcloud-controller-0" -> "openstack-heat-api-cfn_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-heat-api-cfn_start_0 overcloud-controller-0" -> "openstack-heat-api-cloudwatch_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-heat-api-cfn_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api-clone_running_0" -> "openstack-heat-api-cfn-clone_start_0" [ style = dashed] -+"openstack-heat-api-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-heat-api-clone_start_0" -> "openstack-heat-api-clone_running_0" [ style = dashed] -+"openstack-heat-api-clone_start_0" -> "openstack-heat-api:1_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-heat-api-clone_start_0" -> "openstack-heat-api:2_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-heat-api-clone_start_0" -> "openstack-heat-api_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-heat-api-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-heat-api-cloudwatch-clone_running_0" -> "openstack-heat-engine-clone_start_0" [ style = dashed] -+"openstack-heat-api-cloudwatch-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-heat-api-cloudwatch-clone_start_0" -> "openstack-heat-api-cloudwatch-clone_running_0" [ style = dashed] -+"openstack-heat-api-cloudwatch-clone_start_0" -> "openstack-heat-api-cloudwatch:1_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-heat-api-cloudwatch-clone_start_0" -> "openstack-heat-api-cloudwatch:2_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-heat-api-cloudwatch-clone_start_0" -> "openstack-heat-api-cloudwatch_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-heat-api-cloudwatch-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-heat-api-cloudwatch:1_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api-cloudwatch:1_start_0 overcloud-controller-1" -> "openstack-heat-api-cloudwatch-clone_running_0" [ style = dashed] -+"openstack-heat-api-cloudwatch:1_start_0 overcloud-controller-1" -> "openstack-heat-api-cloudwatch:1_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-heat-api-cloudwatch:1_start_0 overcloud-controller-1" -> "openstack-heat-engine:1_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-heat-api-cloudwatch:1_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api-cloudwatch:2_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api-cloudwatch:2_start_0 overcloud-controller-2" -> "openstack-heat-api-cloudwatch-clone_running_0" [ style = dashed] -+"openstack-heat-api-cloudwatch:2_start_0 overcloud-controller-2" -> "openstack-heat-api-cloudwatch:2_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-heat-api-cloudwatch:2_start_0 overcloud-controller-2" -> "openstack-heat-engine:2_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-heat-api-cloudwatch:2_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api-cloudwatch_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api-cloudwatch_start_0 overcloud-controller-0" -> "openstack-heat-api-cloudwatch-clone_running_0" [ style = dashed] -+"openstack-heat-api-cloudwatch_start_0 overcloud-controller-0" -> "openstack-heat-api-cloudwatch_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-heat-api-cloudwatch_start_0 overcloud-controller-0" -> "openstack-heat-engine_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-heat-api-cloudwatch_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api:1_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api:1_start_0 overcloud-controller-1" -> "openstack-heat-api-cfn:1_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-heat-api:1_start_0 overcloud-controller-1" -> "openstack-heat-api-clone_running_0" [ style = dashed] -+"openstack-heat-api:1_start_0 overcloud-controller-1" -> "openstack-heat-api:1_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-heat-api:1_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api:2_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api:2_start_0 overcloud-controller-2" -> "openstack-heat-api-cfn:2_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-heat-api:2_start_0 overcloud-controller-2" -> "openstack-heat-api-clone_running_0" [ style = dashed] -+"openstack-heat-api:2_start_0 overcloud-controller-2" -> "openstack-heat-api:2_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-heat-api:2_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-api_start_0 overcloud-controller-0" -> "openstack-heat-api-cfn_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-heat-api_start_0 overcloud-controller-0" -> "openstack-heat-api-clone_running_0" [ style = dashed] -+"openstack-heat-api_start_0 overcloud-controller-0" -> "openstack-heat-api_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-heat-api_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-engine-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-heat-engine-clone_start_0" -> "openstack-heat-engine-clone_running_0" [ style = dashed] -+"openstack-heat-engine-clone_start_0" -> "openstack-heat-engine:1_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-heat-engine-clone_start_0" -> "openstack-heat-engine:2_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-heat-engine-clone_start_0" -> "openstack-heat-engine_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-heat-engine-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-heat-engine:1_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-engine:1_start_0 overcloud-controller-1" -> "openstack-heat-engine-clone_running_0" [ style = dashed] -+"openstack-heat-engine:1_start_0 overcloud-controller-1" -> "openstack-heat-engine:1_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-heat-engine:1_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-engine:2_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-engine:2_start_0 overcloud-controller-2" -> "openstack-heat-engine-clone_running_0" [ style = dashed] -+"openstack-heat-engine:2_start_0 overcloud-controller-2" -> "openstack-heat-engine:2_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-heat-engine:2_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-engine_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-heat-engine_start_0 overcloud-controller-0" -> "openstack-heat-engine-clone_running_0" [ style = dashed] -+"openstack-heat-engine_start_0 overcloud-controller-0" -> "openstack-heat-engine_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-heat-engine_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-api-clone_running_0" -> "openstack-nova-scheduler-clone_start_0" [ style = dashed] -+"openstack-nova-api-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-nova-api-clone_start_0" -> "openstack-nova-api-clone_running_0" [ style = dashed] -+"openstack-nova-api-clone_start_0" -> "openstack-nova-api:1_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-nova-api-clone_start_0" -> "openstack-nova-api:2_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-nova-api-clone_start_0" -> "openstack-nova-api_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-nova-api-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-nova-api:1_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-api:1_start_0 overcloud-controller-0" -> "openstack-nova-api-clone_running_0" [ style = dashed] -+"openstack-nova-api:1_start_0 overcloud-controller-0" -> "openstack-nova-api:1_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-nova-api:1_start_0 overcloud-controller-0" -> "openstack-nova-scheduler:1_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-nova-api:1_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-api:2_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-api:2_start_0 overcloud-controller-1" -> "openstack-nova-api-clone_running_0" [ style = dashed] -+"openstack-nova-api:2_start_0 overcloud-controller-1" -> "openstack-nova-api:2_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-nova-api:2_start_0 overcloud-controller-1" -> "openstack-nova-scheduler:2_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-nova-api:2_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-api_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-api_start_0 overcloud-controller-2" -> "openstack-nova-api-clone_running_0" [ style = dashed] -+"openstack-nova-api_start_0 overcloud-controller-2" -> "openstack-nova-api_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-nova-api_start_0 overcloud-controller-2" -> "openstack-nova-scheduler_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-nova-api_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-conductor-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-nova-conductor-clone_start_0" -> "openstack-nova-conductor-clone_running_0" [ style = dashed] -+"openstack-nova-conductor-clone_start_0" -> "openstack-nova-conductor:1_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-nova-conductor-clone_start_0" -> "openstack-nova-conductor:2_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-nova-conductor-clone_start_0" -> "openstack-nova-conductor_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-nova-conductor-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-nova-conductor:1_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-conductor:1_start_0 overcloud-controller-1" -> "openstack-nova-conductor-clone_running_0" [ style = dashed] -+"openstack-nova-conductor:1_start_0 overcloud-controller-1" -> "openstack-nova-conductor:1_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-nova-conductor:1_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-conductor:2_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-conductor:2_start_0 overcloud-controller-2" -> "openstack-nova-conductor-clone_running_0" [ style = dashed] -+"openstack-nova-conductor:2_start_0 overcloud-controller-2" -> "openstack-nova-conductor:2_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-nova-conductor:2_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-conductor_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-conductor_start_0 overcloud-controller-0" -> "openstack-nova-conductor-clone_running_0" [ style = dashed] -+"openstack-nova-conductor_start_0 overcloud-controller-0" -> "openstack-nova-conductor_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-nova-conductor_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-consoleauth-clone_running_0" -> "openstack-nova-novncproxy-clone_start_0" [ style = dashed] -+"openstack-nova-consoleauth-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-nova-consoleauth-clone_start_0" -> "openstack-nova-consoleauth-clone_running_0" [ style = dashed] -+"openstack-nova-consoleauth-clone_start_0" -> "openstack-nova-consoleauth:1_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-nova-consoleauth-clone_start_0" -> "openstack-nova-consoleauth:2_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-nova-consoleauth-clone_start_0" -> "openstack-nova-consoleauth_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-nova-consoleauth-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-nova-consoleauth:1_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-consoleauth:1_start_0 overcloud-controller-0" -> "openstack-nova-consoleauth-clone_running_0" [ style = dashed] -+"openstack-nova-consoleauth:1_start_0 overcloud-controller-0" -> "openstack-nova-consoleauth:1_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-nova-consoleauth:1_start_0 overcloud-controller-0" -> "openstack-nova-novncproxy:1_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-nova-consoleauth:1_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-consoleauth:2_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-consoleauth:2_start_0 overcloud-controller-1" -> "openstack-nova-consoleauth-clone_running_0" [ style = dashed] -+"openstack-nova-consoleauth:2_start_0 overcloud-controller-1" -> "openstack-nova-consoleauth:2_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-nova-consoleauth:2_start_0 overcloud-controller-1" -> "openstack-nova-novncproxy:2_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-nova-consoleauth:2_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-consoleauth_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-consoleauth_start_0 overcloud-controller-2" -> "openstack-nova-consoleauth-clone_running_0" [ style = dashed] -+"openstack-nova-consoleauth_start_0 overcloud-controller-2" -> "openstack-nova-consoleauth_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-nova-consoleauth_start_0 overcloud-controller-2" -> "openstack-nova-novncproxy_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-nova-consoleauth_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-novncproxy-clone_running_0" -> "openstack-nova-api-clone_start_0" [ style = dashed] -+"openstack-nova-novncproxy-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-nova-novncproxy-clone_start_0" -> "openstack-nova-novncproxy-clone_running_0" [ style = dashed] -+"openstack-nova-novncproxy-clone_start_0" -> "openstack-nova-novncproxy:1_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-nova-novncproxy-clone_start_0" -> "openstack-nova-novncproxy:2_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-nova-novncproxy-clone_start_0" -> "openstack-nova-novncproxy_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-nova-novncproxy-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-nova-novncproxy:1_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-novncproxy:1_start_0 overcloud-controller-0" -> "openstack-nova-api:1_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-nova-novncproxy:1_start_0 overcloud-controller-0" -> "openstack-nova-novncproxy-clone_running_0" [ style = dashed] -+"openstack-nova-novncproxy:1_start_0 overcloud-controller-0" -> "openstack-nova-novncproxy:1_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-nova-novncproxy:1_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-novncproxy:2_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-novncproxy:2_start_0 overcloud-controller-1" -> "openstack-nova-api:2_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-nova-novncproxy:2_start_0 overcloud-controller-1" -> "openstack-nova-novncproxy-clone_running_0" [ style = dashed] -+"openstack-nova-novncproxy:2_start_0 overcloud-controller-1" -> "openstack-nova-novncproxy:2_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-nova-novncproxy:2_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-novncproxy_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-novncproxy_start_0 overcloud-controller-2" -> "openstack-nova-api_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-nova-novncproxy_start_0 overcloud-controller-2" -> "openstack-nova-novncproxy-clone_running_0" [ style = dashed] -+"openstack-nova-novncproxy_start_0 overcloud-controller-2" -> "openstack-nova-novncproxy_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-nova-novncproxy_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-scheduler-clone_running_0" -> "openstack-nova-conductor-clone_start_0" [ style = dashed] -+"openstack-nova-scheduler-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-nova-scheduler-clone_start_0" -> "openstack-nova-scheduler-clone_running_0" [ style = dashed] -+"openstack-nova-scheduler-clone_start_0" -> "openstack-nova-scheduler:1_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-nova-scheduler-clone_start_0" -> "openstack-nova-scheduler:2_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-nova-scheduler-clone_start_0" -> "openstack-nova-scheduler_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-nova-scheduler-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-nova-scheduler:1_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-scheduler:1_start_0 overcloud-controller-0" -> "openstack-nova-conductor_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-nova-scheduler:1_start_0 overcloud-controller-0" -> "openstack-nova-scheduler-clone_running_0" [ style = dashed] -+"openstack-nova-scheduler:1_start_0 overcloud-controller-0" -> "openstack-nova-scheduler:1_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-nova-scheduler:1_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-scheduler:2_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-scheduler:2_start_0 overcloud-controller-1" -> "openstack-nova-conductor:1_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-nova-scheduler:2_start_0 overcloud-controller-1" -> "openstack-nova-scheduler-clone_running_0" [ style = dashed] -+"openstack-nova-scheduler:2_start_0 overcloud-controller-1" -> "openstack-nova-scheduler:2_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-nova-scheduler:2_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-scheduler_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-nova-scheduler_start_0 overcloud-controller-2" -> "openstack-nova-conductor:2_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-nova-scheduler_start_0 overcloud-controller-2" -> "openstack-nova-scheduler-clone_running_0" [ style = dashed] -+"openstack-nova-scheduler_start_0 overcloud-controller-2" -> "openstack-nova-scheduler_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-nova-scheduler_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-sahara-api-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-sahara-api-clone_start_0" -> "openstack-sahara-api-clone_running_0" [ style = dashed] -+"openstack-sahara-api-clone_start_0" -> "openstack-sahara-api:1_start_0 overcloud-controller-1" [ style = dashed] -+"openstack-sahara-api-clone_start_0" -> "openstack-sahara-api:2_start_0 overcloud-controller-2" [ style = dashed] -+"openstack-sahara-api-clone_start_0" -> "openstack-sahara-api_start_0 overcloud-controller-0" [ style = dashed] -+"openstack-sahara-api-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"openstack-sahara-api:1_monitor_60000 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-sahara-api:1_start_0 overcloud-controller-1" -> "openstack-sahara-api-clone_running_0" [ style = dashed] -+"openstack-sahara-api:1_start_0 overcloud-controller-1" -> "openstack-sahara-api:1_monitor_60000 overcloud-controller-1" [ style = dashed] -+"openstack-sahara-api:1_start_0 overcloud-controller-1" [ style=dashed color="red" fontcolor="black"] -+"openstack-sahara-api:2_monitor_60000 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-sahara-api:2_start_0 overcloud-controller-2" -> "openstack-sahara-api-clone_running_0" [ style = dashed] -+"openstack-sahara-api:2_start_0 overcloud-controller-2" -> "openstack-sahara-api:2_monitor_60000 overcloud-controller-2" [ style = dashed] -+"openstack-sahara-api:2_start_0 overcloud-controller-2" [ style=dashed color="red" fontcolor="black"] -+"openstack-sahara-api_monitor_60000 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+"openstack-sahara-api_start_0 overcloud-controller-0" -> "openstack-sahara-api-clone_running_0" [ style = dashed] -+"openstack-sahara-api_start_0 overcloud-controller-0" -> "openstack-sahara-api_monitor_60000 overcloud-controller-0" [ style = dashed] -+"openstack-sahara-api_start_0 overcloud-controller-0" [ style=dashed color="red" fontcolor="black"] -+} -diff --git a/pengine/test10/unrunnable-2.exp b/pengine/test10/unrunnable-2.exp -new file mode 100644 -index 0000000..56e315f ---- /dev/null -+++ b/pengine/test10/unrunnable-2.exp -@@ -0,0 +1 @@ -+ -diff --git a/pengine/test10/unrunnable-2.scores b/pengine/test10/unrunnable-2.scores -new file mode 100644 -index 0000000..6ec65c9 ---- /dev/null -+++ b/pengine/test10/unrunnable-2.scores -@@ -0,0 +1,814 @@ -+Allocation scores: -+clone_color: delay-clone allocation score on overcloud-controller-0: 0 -+clone_color: delay-clone allocation score on overcloud-controller-1: 0 -+clone_color: delay-clone allocation score on overcloud-controller-2: 0 -+clone_color: delay:0 allocation score on overcloud-controller-0: 0 -+clone_color: delay:0 allocation score on overcloud-controller-1: 0 -+clone_color: delay:0 allocation score on overcloud-controller-2: 0 -+clone_color: delay:1 allocation score on overcloud-controller-0: 0 -+clone_color: delay:1 allocation score on overcloud-controller-1: 0 -+clone_color: delay:1 allocation score on overcloud-controller-2: 0 -+clone_color: delay:2 allocation score on overcloud-controller-0: 0 -+clone_color: delay:2 allocation score on overcloud-controller-1: 0 -+clone_color: delay:2 allocation score on overcloud-controller-2: 0 -+clone_color: galera-master allocation score on overcloud-controller-0: 0 -+clone_color: galera-master allocation score on overcloud-controller-1: 0 -+clone_color: galera-master allocation score on overcloud-controller-2: 0 -+clone_color: galera:0 allocation score on overcloud-controller-0: 0 -+clone_color: galera:0 allocation score on overcloud-controller-1: INFINITY -+clone_color: galera:0 allocation score on overcloud-controller-2: 0 -+clone_color: galera:1 allocation score on overcloud-controller-0: INFINITY -+clone_color: galera:1 allocation score on overcloud-controller-1: 0 -+clone_color: galera:1 allocation score on overcloud-controller-2: 0 -+clone_color: galera:2 allocation score on overcloud-controller-0: 0 -+clone_color: galera:2 allocation score on overcloud-controller-1: 0 -+clone_color: galera:2 allocation score on overcloud-controller-2: INFINITY -+clone_color: haproxy-clone allocation score on overcloud-controller-0: INFINITY -+clone_color: haproxy-clone allocation score on overcloud-controller-1: INFINITY -+clone_color: haproxy-clone allocation score on overcloud-controller-2: 0 -+clone_color: haproxy:0 allocation score on overcloud-controller-0: 0 -+clone_color: haproxy:0 allocation score on overcloud-controller-1: INFINITY -+clone_color: haproxy:0 allocation score on overcloud-controller-2: 0 -+clone_color: haproxy:1 allocation score on overcloud-controller-0: INFINITY -+clone_color: haproxy:1 allocation score on overcloud-controller-1: 0 -+clone_color: haproxy:1 allocation score on overcloud-controller-2: 0 -+clone_color: haproxy:2 allocation score on overcloud-controller-0: 0 -+clone_color: haproxy:2 allocation score on overcloud-controller-1: 0 -+clone_color: haproxy:2 allocation score on overcloud-controller-2: INFINITY -+clone_color: httpd-clone allocation score on overcloud-controller-0: 0 -+clone_color: httpd-clone allocation score on overcloud-controller-1: 0 -+clone_color: httpd-clone allocation score on overcloud-controller-2: 0 -+clone_color: httpd:0 allocation score on overcloud-controller-0: 0 -+clone_color: httpd:0 allocation score on overcloud-controller-1: 0 -+clone_color: httpd:0 allocation score on overcloud-controller-2: 0 -+clone_color: httpd:1 allocation score on overcloud-controller-0: 0 -+clone_color: httpd:1 allocation score on overcloud-controller-1: 0 -+clone_color: httpd:1 allocation score on overcloud-controller-2: 0 -+clone_color: httpd:2 allocation score on overcloud-controller-0: 0 -+clone_color: httpd:2 allocation score on overcloud-controller-1: 0 -+clone_color: httpd:2 allocation score on overcloud-controller-2: 0 -+clone_color: memcached-clone allocation score on overcloud-controller-0: 0 -+clone_color: memcached-clone allocation score on overcloud-controller-1: 0 -+clone_color: memcached-clone allocation score on overcloud-controller-2: 0 -+clone_color: memcached:0 allocation score on overcloud-controller-0: 0 -+clone_color: memcached:0 allocation score on overcloud-controller-1: INFINITY -+clone_color: memcached:0 allocation score on overcloud-controller-2: 0 -+clone_color: memcached:1 allocation score on overcloud-controller-0: INFINITY -+clone_color: memcached:1 allocation score on overcloud-controller-1: 0 -+clone_color: memcached:1 allocation score on overcloud-controller-2: 0 -+clone_color: memcached:2 allocation score on overcloud-controller-0: 0 -+clone_color: memcached:2 allocation score on overcloud-controller-1: 0 -+clone_color: memcached:2 allocation score on overcloud-controller-2: INFINITY -+clone_color: mongod-clone allocation score on overcloud-controller-0: 0 -+clone_color: mongod-clone allocation score on overcloud-controller-1: 0 -+clone_color: mongod-clone allocation score on overcloud-controller-2: 0 -+clone_color: mongod:0 allocation score on overcloud-controller-0: 0 -+clone_color: mongod:0 allocation score on overcloud-controller-1: INFINITY -+clone_color: mongod:0 allocation score on overcloud-controller-2: 0 -+clone_color: mongod:1 allocation score on overcloud-controller-0: INFINITY -+clone_color: mongod:1 allocation score on overcloud-controller-1: 0 -+clone_color: mongod:1 allocation score on overcloud-controller-2: 0 -+clone_color: mongod:2 allocation score on overcloud-controller-0: 0 -+clone_color: mongod:2 allocation score on overcloud-controller-1: 0 -+clone_color: mongod:2 allocation score on overcloud-controller-2: INFINITY -+clone_color: neutron-dhcp-agent-clone allocation score on overcloud-controller-0: 0 -+clone_color: neutron-dhcp-agent-clone allocation score on overcloud-controller-1: 0 -+clone_color: neutron-dhcp-agent-clone allocation score on overcloud-controller-2: 0 -+clone_color: neutron-dhcp-agent:0 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-dhcp-agent:0 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-dhcp-agent:0 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-dhcp-agent:1 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-dhcp-agent:1 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-dhcp-agent:1 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-dhcp-agent:2 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-dhcp-agent:2 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-dhcp-agent:2 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-l3-agent-clone allocation score on overcloud-controller-0: 0 -+clone_color: neutron-l3-agent-clone allocation score on overcloud-controller-1: 0 -+clone_color: neutron-l3-agent-clone allocation score on overcloud-controller-2: 0 -+clone_color: neutron-l3-agent:0 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-l3-agent:0 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-l3-agent:0 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-l3-agent:1 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-l3-agent:1 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-l3-agent:1 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-l3-agent:2 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-l3-agent:2 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-l3-agent:2 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-metadata-agent-clone allocation score on overcloud-controller-0: 0 -+clone_color: neutron-metadata-agent-clone allocation score on overcloud-controller-1: 0 -+clone_color: neutron-metadata-agent-clone allocation score on overcloud-controller-2: 0 -+clone_color: neutron-metadata-agent:0 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-metadata-agent:0 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-metadata-agent:0 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-metadata-agent:1 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-metadata-agent:1 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-metadata-agent:1 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-metadata-agent:2 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-metadata-agent:2 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-metadata-agent:2 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-netns-cleanup-clone allocation score on overcloud-controller-0: 0 -+clone_color: neutron-netns-cleanup-clone allocation score on overcloud-controller-1: 0 -+clone_color: neutron-netns-cleanup-clone allocation score on overcloud-controller-2: 0 -+clone_color: neutron-netns-cleanup:0 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-netns-cleanup:0 allocation score on overcloud-controller-1: INFINITY -+clone_color: neutron-netns-cleanup:0 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-netns-cleanup:1 allocation score on overcloud-controller-0: INFINITY -+clone_color: neutron-netns-cleanup:1 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-netns-cleanup:1 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-netns-cleanup:2 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-netns-cleanup:2 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-netns-cleanup:2 allocation score on overcloud-controller-2: INFINITY -+clone_color: neutron-openvswitch-agent-clone allocation score on overcloud-controller-0: 0 -+clone_color: neutron-openvswitch-agent-clone allocation score on overcloud-controller-1: 0 -+clone_color: neutron-openvswitch-agent-clone allocation score on overcloud-controller-2: 0 -+clone_color: neutron-openvswitch-agent:0 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-openvswitch-agent:0 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-openvswitch-agent:0 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-openvswitch-agent:1 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-openvswitch-agent:1 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-openvswitch-agent:1 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-openvswitch-agent:2 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-openvswitch-agent:2 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-openvswitch-agent:2 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-ovs-cleanup-clone allocation score on overcloud-controller-0: 0 -+clone_color: neutron-ovs-cleanup-clone allocation score on overcloud-controller-1: 0 -+clone_color: neutron-ovs-cleanup-clone allocation score on overcloud-controller-2: 0 -+clone_color: neutron-ovs-cleanup:0 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-ovs-cleanup:0 allocation score on overcloud-controller-1: INFINITY -+clone_color: neutron-ovs-cleanup:0 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-ovs-cleanup:1 allocation score on overcloud-controller-0: INFINITY -+clone_color: neutron-ovs-cleanup:1 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-ovs-cleanup:1 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-ovs-cleanup:2 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-ovs-cleanup:2 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-ovs-cleanup:2 allocation score on overcloud-controller-2: INFINITY -+clone_color: neutron-server-clone allocation score on overcloud-controller-0: 0 -+clone_color: neutron-server-clone allocation score on overcloud-controller-1: 0 -+clone_color: neutron-server-clone allocation score on overcloud-controller-2: 0 -+clone_color: neutron-server:0 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-server:0 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-server:0 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-server:1 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-server:1 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-server:1 allocation score on overcloud-controller-2: 0 -+clone_color: neutron-server:2 allocation score on overcloud-controller-0: 0 -+clone_color: neutron-server:2 allocation score on overcloud-controller-1: 0 -+clone_color: neutron-server:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-aodh-evaluator-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-aodh-evaluator-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-aodh-evaluator-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-aodh-evaluator:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-aodh-evaluator:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-aodh-evaluator:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-aodh-evaluator:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-aodh-evaluator:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-aodh-evaluator:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-aodh-evaluator:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-aodh-evaluator:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-aodh-evaluator:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-aodh-listener-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-aodh-listener-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-aodh-listener-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-aodh-listener:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-aodh-listener:0 allocation score on overcloud-controller-1: INFINITY -+clone_color: openstack-aodh-listener:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-aodh-listener:1 allocation score on overcloud-controller-0: INFINITY -+clone_color: openstack-aodh-listener:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-aodh-listener:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-aodh-listener:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-aodh-listener:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-aodh-listener:2 allocation score on overcloud-controller-2: INFINITY -+clone_color: openstack-aodh-notifier-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-aodh-notifier-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-aodh-notifier-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-aodh-notifier:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-aodh-notifier:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-aodh-notifier:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-aodh-notifier:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-aodh-notifier:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-aodh-notifier:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-aodh-notifier:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-aodh-notifier:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-aodh-notifier:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-ceilometer-api-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-ceilometer-api-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-ceilometer-api-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-ceilometer-api:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-ceilometer-api:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-ceilometer-api:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-ceilometer-api:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-ceilometer-api:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-ceilometer-api:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-ceilometer-api:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-ceilometer-api:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-ceilometer-api:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-ceilometer-central-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-ceilometer-central-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-ceilometer-central-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-ceilometer-central:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-ceilometer-central:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-ceilometer-central:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-ceilometer-central:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-ceilometer-central:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-ceilometer-central:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-ceilometer-central:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-ceilometer-central:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-ceilometer-central:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-ceilometer-collector-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-ceilometer-collector-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-ceilometer-collector-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-ceilometer-collector:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-ceilometer-collector:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-ceilometer-collector:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-ceilometer-collector:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-ceilometer-collector:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-ceilometer-collector:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-ceilometer-collector:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-ceilometer-collector:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-ceilometer-collector:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-ceilometer-notification-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-ceilometer-notification-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-ceilometer-notification-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-ceilometer-notification:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-ceilometer-notification:0 allocation score on overcloud-controller-1: INFINITY -+clone_color: openstack-ceilometer-notification:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-ceilometer-notification:1 allocation score on overcloud-controller-0: INFINITY -+clone_color: openstack-ceilometer-notification:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-ceilometer-notification:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-ceilometer-notification:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-ceilometer-notification:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-ceilometer-notification:2 allocation score on overcloud-controller-2: INFINITY -+clone_color: openstack-cinder-api-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-cinder-api-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-cinder-api-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-cinder-api:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-cinder-api:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-cinder-api:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-cinder-api:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-cinder-api:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-cinder-api:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-cinder-api:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-cinder-api:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-cinder-api:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-cinder-scheduler-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-cinder-scheduler-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-cinder-scheduler-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-cinder-scheduler:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-cinder-scheduler:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-cinder-scheduler:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-cinder-scheduler:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-cinder-scheduler:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-cinder-scheduler:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-cinder-scheduler:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-cinder-scheduler:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-cinder-scheduler:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-core-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-core-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-core-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-core:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-core:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-core:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-core:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-core:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-core:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-core:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-core:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-core:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-glance-api-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-glance-api-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-glance-api-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-glance-api:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-glance-api:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-glance-api:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-glance-api:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-glance-api:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-glance-api:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-glance-api:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-glance-api:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-glance-api:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-glance-registry-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-glance-registry-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-glance-registry-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-glance-registry:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-glance-registry:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-glance-registry:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-glance-registry:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-glance-registry:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-glance-registry:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-glance-registry:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-glance-registry:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-glance-registry:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-heat-api-cfn-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-heat-api-cfn-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-heat-api-cfn-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-heat-api-cfn:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-heat-api-cfn:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-heat-api-cfn:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-heat-api-cfn:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-heat-api-cfn:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-heat-api-cfn:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-heat-api-cfn:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-heat-api-cfn:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-heat-api-cfn:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-heat-api-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-heat-api-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-heat-api-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-heat-api-cloudwatch-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-heat-api-cloudwatch-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-heat-api-cloudwatch-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-heat-api-cloudwatch:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-heat-api-cloudwatch:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-heat-api-cloudwatch:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-heat-api-cloudwatch:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-heat-api-cloudwatch:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-heat-api-cloudwatch:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-heat-api-cloudwatch:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-heat-api-cloudwatch:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-heat-api-cloudwatch:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-heat-api:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-heat-api:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-heat-api:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-heat-api:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-heat-api:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-heat-api:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-heat-api:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-heat-api:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-heat-api:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-heat-engine-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-heat-engine-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-heat-engine-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-heat-engine:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-heat-engine:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-heat-engine:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-heat-engine:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-heat-engine:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-heat-engine:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-heat-engine:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-heat-engine:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-heat-engine:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-api-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-api-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-api-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-api:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-api:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-api:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-api:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-api:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-api:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-api:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-api:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-api:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-conductor-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-conductor-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-conductor-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-conductor:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-conductor:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-conductor:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-conductor:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-conductor:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-conductor:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-conductor:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-conductor:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-conductor:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-consoleauth-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-consoleauth-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-consoleauth-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-consoleauth:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-consoleauth:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-consoleauth:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-consoleauth:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-consoleauth:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-consoleauth:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-consoleauth:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-consoleauth:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-consoleauth:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-novncproxy-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-novncproxy-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-novncproxy-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-novncproxy:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-novncproxy:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-novncproxy:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-novncproxy:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-novncproxy:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-novncproxy:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-novncproxy:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-novncproxy:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-novncproxy:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-scheduler-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-scheduler-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-scheduler-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-scheduler:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-scheduler:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-scheduler:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-scheduler:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-scheduler:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-scheduler:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-nova-scheduler:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-nova-scheduler:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-nova-scheduler:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-sahara-api-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-sahara-api-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-sahara-api-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-sahara-api:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-sahara-api:0 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-sahara-api:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-sahara-api:1 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-sahara-api:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-sahara-api:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-sahara-api:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-sahara-api:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-sahara-api:2 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-sahara-engine-clone allocation score on overcloud-controller-0: 0 -+clone_color: openstack-sahara-engine-clone allocation score on overcloud-controller-1: 0 -+clone_color: openstack-sahara-engine-clone allocation score on overcloud-controller-2: 0 -+clone_color: openstack-sahara-engine:0 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-sahara-engine:0 allocation score on overcloud-controller-1: INFINITY -+clone_color: openstack-sahara-engine:0 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-sahara-engine:1 allocation score on overcloud-controller-0: INFINITY -+clone_color: openstack-sahara-engine:1 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-sahara-engine:1 allocation score on overcloud-controller-2: 0 -+clone_color: openstack-sahara-engine:2 allocation score on overcloud-controller-0: 0 -+clone_color: openstack-sahara-engine:2 allocation score on overcloud-controller-1: 0 -+clone_color: openstack-sahara-engine:2 allocation score on overcloud-controller-2: INFINITY -+clone_color: rabbitmq-clone allocation score on overcloud-controller-0: 0 -+clone_color: rabbitmq-clone allocation score on overcloud-controller-1: 0 -+clone_color: rabbitmq-clone allocation score on overcloud-controller-2: 0 -+clone_color: rabbitmq:0 allocation score on overcloud-controller-0: 0 -+clone_color: rabbitmq:0 allocation score on overcloud-controller-1: INFINITY -+clone_color: rabbitmq:0 allocation score on overcloud-controller-2: 0 -+clone_color: rabbitmq:1 allocation score on overcloud-controller-0: INFINITY -+clone_color: rabbitmq:1 allocation score on overcloud-controller-1: 0 -+clone_color: rabbitmq:1 allocation score on overcloud-controller-2: 0 -+clone_color: rabbitmq:2 allocation score on overcloud-controller-0: 0 -+clone_color: rabbitmq:2 allocation score on overcloud-controller-1: 0 -+clone_color: rabbitmq:2 allocation score on overcloud-controller-2: INFINITY -+clone_color: redis-master allocation score on overcloud-controller-0: 0 -+clone_color: redis-master allocation score on overcloud-controller-1: 0 -+clone_color: redis-master allocation score on overcloud-controller-2: 0 -+clone_color: redis:0 allocation score on overcloud-controller-0: 0 -+clone_color: redis:0 allocation score on overcloud-controller-1: INFINITY -+clone_color: redis:0 allocation score on overcloud-controller-2: 0 -+clone_color: redis:1 allocation score on overcloud-controller-0: INFINITY -+clone_color: redis:1 allocation score on overcloud-controller-1: 0 -+clone_color: redis:1 allocation score on overcloud-controller-2: 0 -+clone_color: redis:2 allocation score on overcloud-controller-0: 0 -+clone_color: redis:2 allocation score on overcloud-controller-1: 0 -+clone_color: redis:2 allocation score on overcloud-controller-2: INFINITY -+galera:0 promotion score on overcloud-controller-1: 100 -+galera:1 promotion score on overcloud-controller-0: 100 -+galera:2 promotion score on overcloud-controller-2: 100 -+native_color: delay:0 allocation score on overcloud-controller-0: 0 -+native_color: delay:0 allocation score on overcloud-controller-1: 0 -+native_color: delay:0 allocation score on overcloud-controller-2: 0 -+native_color: delay:1 allocation score on overcloud-controller-0: 0 -+native_color: delay:1 allocation score on overcloud-controller-1: 0 -+native_color: delay:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: delay:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: delay:2 allocation score on overcloud-controller-1: 0 -+native_color: delay:2 allocation score on overcloud-controller-2: -INFINITY -+native_color: galera:0 allocation score on overcloud-controller-0: 0 -+native_color: galera:0 allocation score on overcloud-controller-1: INFINITY -+native_color: galera:0 allocation score on overcloud-controller-2: 0 -+native_color: galera:1 allocation score on overcloud-controller-0: INFINITY -+native_color: galera:1 allocation score on overcloud-controller-1: -INFINITY -+native_color: galera:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: galera:2 allocation score on overcloud-controller-0: 0 -+native_color: galera:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: galera:2 allocation score on overcloud-controller-2: INFINITY -+native_color: haproxy:0 allocation score on overcloud-controller-0: -INFINITY -+native_color: haproxy:0 allocation score on overcloud-controller-1: INFINITY -+native_color: haproxy:0 allocation score on overcloud-controller-2: 0 -+native_color: haproxy:1 allocation score on overcloud-controller-0: INFINITY -+native_color: haproxy:1 allocation score on overcloud-controller-1: 0 -+native_color: haproxy:1 allocation score on overcloud-controller-2: 0 -+native_color: haproxy:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: haproxy:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: haproxy:2 allocation score on overcloud-controller-2: INFINITY -+native_color: httpd:0 allocation score on overcloud-controller-0: -INFINITY -+native_color: httpd:0 allocation score on overcloud-controller-1: -INFINITY -+native_color: httpd:0 allocation score on overcloud-controller-2: -INFINITY -+native_color: httpd:1 allocation score on overcloud-controller-0: -INFINITY -+native_color: httpd:1 allocation score on overcloud-controller-1: -INFINITY -+native_color: httpd:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: httpd:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: httpd:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: httpd:2 allocation score on overcloud-controller-2: -INFINITY -+native_color: ip-192.0.2.11 allocation score on overcloud-controller-0: 0 -+native_color: ip-192.0.2.11 allocation score on overcloud-controller-1: INFINITY -+native_color: ip-192.0.2.11 allocation score on overcloud-controller-2: 0 -+native_color: ip-192.0.2.12 allocation score on overcloud-controller-0: INFINITY -+native_color: ip-192.0.2.12 allocation score on overcloud-controller-1: 0 -+native_color: ip-192.0.2.12 allocation score on overcloud-controller-2: 0 -+native_color: memcached:0 allocation score on overcloud-controller-0: 0 -+native_color: memcached:0 allocation score on overcloud-controller-1: INFINITY -+native_color: memcached:0 allocation score on overcloud-controller-2: 0 -+native_color: memcached:1 allocation score on overcloud-controller-0: INFINITY -+native_color: memcached:1 allocation score on overcloud-controller-1: -INFINITY -+native_color: memcached:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: memcached:2 allocation score on overcloud-controller-0: 0 -+native_color: memcached:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: memcached:2 allocation score on overcloud-controller-2: INFINITY -+native_color: mongod:0 allocation score on overcloud-controller-0: -INFINITY -+native_color: mongod:0 allocation score on overcloud-controller-1: INFINITY -+native_color: mongod:0 allocation score on overcloud-controller-2: -INFINITY -+native_color: mongod:1 allocation score on overcloud-controller-0: INFINITY -+native_color: mongod:1 allocation score on overcloud-controller-1: 0 -+native_color: mongod:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: mongod:2 allocation score on overcloud-controller-0: 0 -+native_color: mongod:2 allocation score on overcloud-controller-1: 0 -+native_color: mongod:2 allocation score on overcloud-controller-2: INFINITY -+native_color: neutron-dhcp-agent:0 allocation score on overcloud-controller-0: 0 -+native_color: neutron-dhcp-agent:0 allocation score on overcloud-controller-1: 0 -+native_color: neutron-dhcp-agent:0 allocation score on overcloud-controller-2: 0 -+native_color: neutron-dhcp-agent:1 allocation score on overcloud-controller-0: 0 -+native_color: neutron-dhcp-agent:1 allocation score on overcloud-controller-1: 0 -+native_color: neutron-dhcp-agent:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: neutron-dhcp-agent:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: neutron-dhcp-agent:2 allocation score on overcloud-controller-1: 0 -+native_color: neutron-dhcp-agent:2 allocation score on overcloud-controller-2: -INFINITY -+native_color: neutron-l3-agent:0 allocation score on overcloud-controller-0: 0 -+native_color: neutron-l3-agent:0 allocation score on overcloud-controller-1: 0 -+native_color: neutron-l3-agent:0 allocation score on overcloud-controller-2: 0 -+native_color: neutron-l3-agent:1 allocation score on overcloud-controller-0: 0 -+native_color: neutron-l3-agent:1 allocation score on overcloud-controller-1: 0 -+native_color: neutron-l3-agent:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: neutron-l3-agent:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: neutron-l3-agent:2 allocation score on overcloud-controller-1: 0 -+native_color: neutron-l3-agent:2 allocation score on overcloud-controller-2: -INFINITY -+native_color: neutron-metadata-agent:0 allocation score on overcloud-controller-0: 0 -+native_color: neutron-metadata-agent:0 allocation score on overcloud-controller-1: 0 -+native_color: neutron-metadata-agent:0 allocation score on overcloud-controller-2: 0 -+native_color: neutron-metadata-agent:1 allocation score on overcloud-controller-0: -INFINITY -+native_color: neutron-metadata-agent:1 allocation score on overcloud-controller-1: 0 -+native_color: neutron-metadata-agent:1 allocation score on overcloud-controller-2: 0 -+native_color: neutron-metadata-agent:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: neutron-metadata-agent:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: neutron-metadata-agent:2 allocation score on overcloud-controller-2: 0 -+native_color: neutron-netns-cleanup:0 allocation score on overcloud-controller-0: -INFINITY -+native_color: neutron-netns-cleanup:0 allocation score on overcloud-controller-1: INFINITY -+native_color: neutron-netns-cleanup:0 allocation score on overcloud-controller-2: -INFINITY -+native_color: neutron-netns-cleanup:1 allocation score on overcloud-controller-0: INFINITY -+native_color: neutron-netns-cleanup:1 allocation score on overcloud-controller-1: 0 -+native_color: neutron-netns-cleanup:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: neutron-netns-cleanup:2 allocation score on overcloud-controller-0: 0 -+native_color: neutron-netns-cleanup:2 allocation score on overcloud-controller-1: 0 -+native_color: neutron-netns-cleanup:2 allocation score on overcloud-controller-2: INFINITY -+native_color: neutron-openvswitch-agent:0 allocation score on overcloud-controller-0: 0 -+native_color: neutron-openvswitch-agent:0 allocation score on overcloud-controller-1: 0 -+native_color: neutron-openvswitch-agent:0 allocation score on overcloud-controller-2: 0 -+native_color: neutron-openvswitch-agent:1 allocation score on overcloud-controller-0: 0 -+native_color: neutron-openvswitch-agent:1 allocation score on overcloud-controller-1: 0 -+native_color: neutron-openvswitch-agent:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: neutron-openvswitch-agent:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: neutron-openvswitch-agent:2 allocation score on overcloud-controller-1: 0 -+native_color: neutron-openvswitch-agent:2 allocation score on overcloud-controller-2: -INFINITY -+native_color: neutron-ovs-cleanup:0 allocation score on overcloud-controller-0: -INFINITY -+native_color: neutron-ovs-cleanup:0 allocation score on overcloud-controller-1: INFINITY -+native_color: neutron-ovs-cleanup:0 allocation score on overcloud-controller-2: -INFINITY -+native_color: neutron-ovs-cleanup:1 allocation score on overcloud-controller-0: INFINITY -+native_color: neutron-ovs-cleanup:1 allocation score on overcloud-controller-1: 0 -+native_color: neutron-ovs-cleanup:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: neutron-ovs-cleanup:2 allocation score on overcloud-controller-0: 0 -+native_color: neutron-ovs-cleanup:2 allocation score on overcloud-controller-1: 0 -+native_color: neutron-ovs-cleanup:2 allocation score on overcloud-controller-2: INFINITY -+native_color: neutron-server:0 allocation score on overcloud-controller-0: 0 -+native_color: neutron-server:0 allocation score on overcloud-controller-1: 0 -+native_color: neutron-server:0 allocation score on overcloud-controller-2: 0 -+native_color: neutron-server:1 allocation score on overcloud-controller-0: -INFINITY -+native_color: neutron-server:1 allocation score on overcloud-controller-1: 0 -+native_color: neutron-server:1 allocation score on overcloud-controller-2: 0 -+native_color: neutron-server:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: neutron-server:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: neutron-server:2 allocation score on overcloud-controller-2: 0 -+native_color: openstack-aodh-evaluator:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-aodh-evaluator:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-aodh-evaluator:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-aodh-evaluator:1 allocation score on overcloud-controller-0: 0 -+native_color: openstack-aodh-evaluator:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-aodh-evaluator:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-aodh-evaluator:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-aodh-evaluator:2 allocation score on overcloud-controller-1: 0 -+native_color: openstack-aodh-evaluator:2 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-aodh-listener:0 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-aodh-listener:0 allocation score on overcloud-controller-1: INFINITY -+native_color: openstack-aodh-listener:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-aodh-listener:1 allocation score on overcloud-controller-0: INFINITY -+native_color: openstack-aodh-listener:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-aodh-listener:1 allocation score on overcloud-controller-2: 0 -+native_color: openstack-aodh-listener:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-aodh-listener:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: openstack-aodh-listener:2 allocation score on overcloud-controller-2: INFINITY -+native_color: openstack-aodh-notifier:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-aodh-notifier:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-aodh-notifier:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-aodh-notifier:1 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-aodh-notifier:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-aodh-notifier:1 allocation score on overcloud-controller-2: 0 -+native_color: openstack-aodh-notifier:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-aodh-notifier:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: openstack-aodh-notifier:2 allocation score on overcloud-controller-2: 0 -+native_color: openstack-ceilometer-api:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-ceilometer-api:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-ceilometer-api:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-ceilometer-api:1 allocation score on overcloud-controller-0: 0 -+native_color: openstack-ceilometer-api:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-ceilometer-api:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-ceilometer-api:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-ceilometer-api:2 allocation score on overcloud-controller-1: 0 -+native_color: openstack-ceilometer-api:2 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-ceilometer-central:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-ceilometer-central:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-ceilometer-central:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-ceilometer-central:1 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-ceilometer-central:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-ceilometer-central:1 allocation score on overcloud-controller-2: 0 -+native_color: openstack-ceilometer-central:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-ceilometer-central:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: openstack-ceilometer-central:2 allocation score on overcloud-controller-2: 0 -+native_color: openstack-ceilometer-collector:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-ceilometer-collector:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-ceilometer-collector:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-ceilometer-collector:1 allocation score on overcloud-controller-0: 0 -+native_color: openstack-ceilometer-collector:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-ceilometer-collector:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-ceilometer-collector:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-ceilometer-collector:2 allocation score on overcloud-controller-1: 0 -+native_color: openstack-ceilometer-collector:2 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-ceilometer-notification:0 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-ceilometer-notification:0 allocation score on overcloud-controller-1: INFINITY -+native_color: openstack-ceilometer-notification:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-ceilometer-notification:1 allocation score on overcloud-controller-0: INFINITY -+native_color: openstack-ceilometer-notification:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-ceilometer-notification:1 allocation score on overcloud-controller-2: 0 -+native_color: openstack-ceilometer-notification:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-ceilometer-notification:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: openstack-ceilometer-notification:2 allocation score on overcloud-controller-2: INFINITY -+native_color: openstack-cinder-api:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-cinder-api:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-cinder-api:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-cinder-api:1 allocation score on overcloud-controller-0: 0 -+native_color: openstack-cinder-api:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-cinder-api:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-cinder-api:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-cinder-api:2 allocation score on overcloud-controller-1: 0 -+native_color: openstack-cinder-api:2 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-cinder-scheduler:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-cinder-scheduler:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-cinder-scheduler:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-cinder-scheduler:1 allocation score on overcloud-controller-0: 0 -+native_color: openstack-cinder-scheduler:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-cinder-scheduler:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-cinder-scheduler:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-cinder-scheduler:2 allocation score on overcloud-controller-1: 0 -+native_color: openstack-cinder-scheduler:2 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-cinder-volume allocation score on overcloud-controller-0: 0 -+native_color: openstack-cinder-volume allocation score on overcloud-controller-1: 0 -+native_color: openstack-cinder-volume allocation score on overcloud-controller-2: 0 -+native_color: openstack-core:0 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-core:0 allocation score on overcloud-controller-1: -INFINITY -+native_color: openstack-core:0 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-core:1 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-core:1 allocation score on overcloud-controller-1: -INFINITY -+native_color: openstack-core:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-core:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-core:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: openstack-core:2 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-glance-api:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-glance-api:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-glance-api:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-glance-api:1 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-glance-api:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-glance-api:1 allocation score on overcloud-controller-2: 0 -+native_color: openstack-glance-api:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-glance-api:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: openstack-glance-api:2 allocation score on overcloud-controller-2: 0 -+native_color: openstack-glance-registry:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-glance-registry:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-glance-registry:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-glance-registry:1 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-glance-registry:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-glance-registry:1 allocation score on overcloud-controller-2: 0 -+native_color: openstack-glance-registry:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-glance-registry:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: openstack-glance-registry:2 allocation score on overcloud-controller-2: 0 -+native_color: openstack-heat-api-cfn:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-heat-api-cfn:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-heat-api-cfn:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-heat-api-cfn:1 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-heat-api-cfn:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-heat-api-cfn:1 allocation score on overcloud-controller-2: 0 -+native_color: openstack-heat-api-cfn:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-heat-api-cfn:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: openstack-heat-api-cfn:2 allocation score on overcloud-controller-2: 0 -+native_color: openstack-heat-api-cloudwatch:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-heat-api-cloudwatch:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-heat-api-cloudwatch:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-heat-api-cloudwatch:1 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-heat-api-cloudwatch:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-heat-api-cloudwatch:1 allocation score on overcloud-controller-2: 0 -+native_color: openstack-heat-api-cloudwatch:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-heat-api-cloudwatch:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: openstack-heat-api-cloudwatch:2 allocation score on overcloud-controller-2: 0 -+native_color: openstack-heat-api:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-heat-api:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-heat-api:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-heat-api:1 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-heat-api:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-heat-api:1 allocation score on overcloud-controller-2: 0 -+native_color: openstack-heat-api:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-heat-api:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: openstack-heat-api:2 allocation score on overcloud-controller-2: 0 -+native_color: openstack-heat-engine:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-heat-engine:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-heat-engine:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-heat-engine:1 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-heat-engine:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-heat-engine:1 allocation score on overcloud-controller-2: 0 -+native_color: openstack-heat-engine:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-heat-engine:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: openstack-heat-engine:2 allocation score on overcloud-controller-2: 0 -+native_color: openstack-nova-api:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-nova-api:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-nova-api:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-nova-api:1 allocation score on overcloud-controller-0: 0 -+native_color: openstack-nova-api:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-nova-api:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-nova-api:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-nova-api:2 allocation score on overcloud-controller-1: 0 -+native_color: openstack-nova-api:2 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-nova-conductor:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-nova-conductor:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-nova-conductor:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-nova-conductor:1 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-nova-conductor:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-nova-conductor:1 allocation score on overcloud-controller-2: 0 -+native_color: openstack-nova-conductor:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-nova-conductor:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: openstack-nova-conductor:2 allocation score on overcloud-controller-2: 0 -+native_color: openstack-nova-consoleauth:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-nova-consoleauth:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-nova-consoleauth:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-nova-consoleauth:1 allocation score on overcloud-controller-0: 0 -+native_color: openstack-nova-consoleauth:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-nova-consoleauth:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-nova-consoleauth:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-nova-consoleauth:2 allocation score on overcloud-controller-1: 0 -+native_color: openstack-nova-consoleauth:2 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-nova-novncproxy:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-nova-novncproxy:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-nova-novncproxy:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-nova-novncproxy:1 allocation score on overcloud-controller-0: 0 -+native_color: openstack-nova-novncproxy:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-nova-novncproxy:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-nova-novncproxy:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-nova-novncproxy:2 allocation score on overcloud-controller-1: 0 -+native_color: openstack-nova-novncproxy:2 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-nova-scheduler:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-nova-scheduler:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-nova-scheduler:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-nova-scheduler:1 allocation score on overcloud-controller-0: 0 -+native_color: openstack-nova-scheduler:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-nova-scheduler:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-nova-scheduler:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-nova-scheduler:2 allocation score on overcloud-controller-1: 0 -+native_color: openstack-nova-scheduler:2 allocation score on overcloud-controller-2: -INFINITY -+native_color: openstack-sahara-api:0 allocation score on overcloud-controller-0: 0 -+native_color: openstack-sahara-api:0 allocation score on overcloud-controller-1: 0 -+native_color: openstack-sahara-api:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-sahara-api:1 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-sahara-api:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-sahara-api:1 allocation score on overcloud-controller-2: 0 -+native_color: openstack-sahara-api:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-sahara-api:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: openstack-sahara-api:2 allocation score on overcloud-controller-2: 0 -+native_color: openstack-sahara-engine:0 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-sahara-engine:0 allocation score on overcloud-controller-1: INFINITY -+native_color: openstack-sahara-engine:0 allocation score on overcloud-controller-2: 0 -+native_color: openstack-sahara-engine:1 allocation score on overcloud-controller-0: INFINITY -+native_color: openstack-sahara-engine:1 allocation score on overcloud-controller-1: 0 -+native_color: openstack-sahara-engine:1 allocation score on overcloud-controller-2: 0 -+native_color: openstack-sahara-engine:2 allocation score on overcloud-controller-0: -INFINITY -+native_color: openstack-sahara-engine:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: openstack-sahara-engine:2 allocation score on overcloud-controller-2: INFINITY -+native_color: rabbitmq:0 allocation score on overcloud-controller-0: 0 -+native_color: rabbitmq:0 allocation score on overcloud-controller-1: INFINITY -+native_color: rabbitmq:0 allocation score on overcloud-controller-2: 0 -+native_color: rabbitmq:1 allocation score on overcloud-controller-0: INFINITY -+native_color: rabbitmq:1 allocation score on overcloud-controller-1: -INFINITY -+native_color: rabbitmq:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: rabbitmq:2 allocation score on overcloud-controller-0: 0 -+native_color: rabbitmq:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: rabbitmq:2 allocation score on overcloud-controller-2: INFINITY -+native_color: redis:0 allocation score on overcloud-controller-0: 0 -+native_color: redis:0 allocation score on overcloud-controller-1: INFINITY -+native_color: redis:0 allocation score on overcloud-controller-2: 0 -+native_color: redis:1 allocation score on overcloud-controller-0: INFINITY -+native_color: redis:1 allocation score on overcloud-controller-1: -INFINITY -+native_color: redis:1 allocation score on overcloud-controller-2: -INFINITY -+native_color: redis:2 allocation score on overcloud-controller-0: 0 -+native_color: redis:2 allocation score on overcloud-controller-1: -INFINITY -+native_color: redis:2 allocation score on overcloud-controller-2: INFINITY -+redis:0 promotion score on overcloud-controller-1: 1 -+redis:1 promotion score on overcloud-controller-0: 1 -+redis:2 promotion score on overcloud-controller-2: 1 -diff --git a/pengine/test10/unrunnable-2.summary b/pengine/test10/unrunnable-2.summary -new file mode 100644 -index 0000000..0c0c765 ---- /dev/null -+++ b/pengine/test10/unrunnable-2.summary -@@ -0,0 +1,175 @@ -+6 of 117 resources DISABLED and 0 BLOCKED from being started due to failures -+ -+Current cluster status: -+Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ -+ ip-192.0.2.12 (ocf::heartbeat:IPaddr2): Started overcloud-controller-0 -+ Clone Set: haproxy-clone [haproxy] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Master/Slave Set: galera-master [galera] -+ Masters: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: memcached-clone [memcached] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: rabbitmq-clone [rabbitmq] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-core-clone [openstack-core] -+ Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Master/Slave Set: redis-master [redis] -+ Masters: [ overcloud-controller-1 ] -+ Slaves: [ overcloud-controller-0 overcloud-controller-2 ] -+ ip-192.0.2.11 (ocf::heartbeat:IPaddr2): Started overcloud-controller-1 -+ Clone Set: mongod-clone [mongod] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-aodh-evaluator-clone [openstack-aodh-evaluator] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-nova-scheduler-clone [openstack-nova-scheduler] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: neutron-l3-agent-clone [neutron-l3-agent] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ openstack-cinder-volume (systemd:openstack-cinder-volume): Stopped -+ Clone Set: openstack-heat-engine-clone [openstack-heat-engine] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-ceilometer-api-clone [openstack-ceilometer-api] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-aodh-listener-clone [openstack-aodh-listener] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-aodh-notifier-clone [openstack-aodh-notifier] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-heat-api-clone [openstack-heat-api] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-ceilometer-collector-clone [openstack-ceilometer-collector] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-glance-api-clone [openstack-glance-api] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-cinder-scheduler-clone [openstack-cinder-scheduler] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-nova-api-clone [openstack-nova-api] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-nova-consoleauth-clone [openstack-nova-consoleauth] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-sahara-api-clone [openstack-sahara-api] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-heat-api-cloudwatch-clone [openstack-heat-api-cloudwatch] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-sahara-engine-clone [openstack-sahara-engine] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-glance-registry-clone [openstack-glance-registry] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-ceilometer-notification-clone [openstack-ceilometer-notification] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-cinder-api-clone [openstack-cinder-api] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-nova-novncproxy-clone [openstack-nova-novncproxy] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: delay-clone [delay] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: neutron-server-clone [neutron-server] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-ceilometer-central-clone [openstack-ceilometer-central] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: httpd-clone [httpd] -+ Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-heat-api-cfn-clone [openstack-heat-api-cfn] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-nova-conductor-clone [openstack-nova-conductor] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ -+Transition Summary: -+ * Start openstack-cinder-volume (overcloud-controller-2 - blocked) -+ -+Executing cluster transition: -+ -+Revised cluster status: -+Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ -+ ip-192.0.2.12 (ocf::heartbeat:IPaddr2): Started overcloud-controller-0 -+ Clone Set: haproxy-clone [haproxy] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Master/Slave Set: galera-master [galera] -+ Masters: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: memcached-clone [memcached] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: rabbitmq-clone [rabbitmq] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-core-clone [openstack-core] -+ Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Master/Slave Set: redis-master [redis] -+ Masters: [ overcloud-controller-1 ] -+ Slaves: [ overcloud-controller-0 overcloud-controller-2 ] -+ ip-192.0.2.11 (ocf::heartbeat:IPaddr2): Started overcloud-controller-1 -+ Clone Set: mongod-clone [mongod] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-aodh-evaluator-clone [openstack-aodh-evaluator] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-nova-scheduler-clone [openstack-nova-scheduler] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: neutron-l3-agent-clone [neutron-l3-agent] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: neutron-netns-cleanup-clone [neutron-netns-cleanup] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: neutron-ovs-cleanup-clone [neutron-ovs-cleanup] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ openstack-cinder-volume (systemd:openstack-cinder-volume): Stopped -+ Clone Set: openstack-heat-engine-clone [openstack-heat-engine] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-ceilometer-api-clone [openstack-ceilometer-api] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-aodh-listener-clone [openstack-aodh-listener] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: neutron-metadata-agent-clone [neutron-metadata-agent] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-aodh-notifier-clone [openstack-aodh-notifier] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-heat-api-clone [openstack-heat-api] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-ceilometer-collector-clone [openstack-ceilometer-collector] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-glance-api-clone [openstack-glance-api] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-cinder-scheduler-clone [openstack-cinder-scheduler] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-nova-api-clone [openstack-nova-api] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-nova-consoleauth-clone [openstack-nova-consoleauth] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-sahara-api-clone [openstack-sahara-api] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-heat-api-cloudwatch-clone [openstack-heat-api-cloudwatch] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-sahara-engine-clone [openstack-sahara-engine] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-glance-registry-clone [openstack-glance-registry] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-ceilometer-notification-clone [openstack-ceilometer-notification] -+ Started: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-cinder-api-clone [openstack-cinder-api] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: neutron-dhcp-agent-clone [neutron-dhcp-agent] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: neutron-openvswitch-agent-clone [neutron-openvswitch-agent] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-nova-novncproxy-clone [openstack-nova-novncproxy] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: delay-clone [delay] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: neutron-server-clone [neutron-server] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-ceilometer-central-clone [openstack-ceilometer-central] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: httpd-clone [httpd] -+ Stopped (disabled): [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-heat-api-cfn-clone [openstack-heat-api-cfn] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ Clone Set: openstack-nova-conductor-clone [openstack-nova-conductor] -+ Stopped: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ] -+ -diff --git a/pengine/test10/unrunnable-2.xml b/pengine/test10/unrunnable-2.xml -new file mode 100644 -index 0000000..cc780eb ---- /dev/null -+++ b/pengine/test10/unrunnable-2.xml -@@ -0,0 +1,1189 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ --- -1.8.3.1 - diff --git a/SOURCES/012-invalid-config-loop.patch b/SOURCES/012-invalid-config-loop.patch new file mode 100644 index 0000000..395314e --- /dev/null +++ b/SOURCES/012-invalid-config-loop.patch @@ -0,0 +1,298 @@ +From 7e49054aad07a3d04311c183bc89dd159f75e7d8 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= +Date: Fri, 12 Aug 2016 13:52:15 +0200 +Subject: [PATCH 1/5] Refactor: xml.c: merge two identical branches + +--- + lib/common/xml.c | 7 +------ + 1 file changed, 1 insertion(+), 6 deletions(-) + +diff --git a/lib/common/xml.c b/lib/common/xml.c +index a20e73e..ff819a2 100644 +--- a/lib/common/xml.c ++++ b/lib/common/xml.c +@@ -5668,16 +5668,11 @@ update_validation(xmlNode ** xml_blob, int *best, int max, gboolean transform, g + crm_trace("Stopping at %s", known_schemas[lpc].name); + break; + +- } else if (max > 0 && lpc == max) { ++ } else if (max > 0 && (lpc == max || next > max)) { + crm_trace("Upgrade limit reached at %s (lpc=%d, next=%d, max=%d)", + known_schemas[lpc].name, lpc, next, max); + break; + +- } else if (max > 0 && next > max) { +- crm_debug("Upgrade limit reached at %s (lpc=%d, next=%d, max=%d)", +- known_schemas[lpc].name, lpc, next, max); +- break; +- + } else if (known_schemas[lpc].transform == NULL) { + crm_debug("%s-style configuration is also valid for %s", + known_schemas[lpc].name, known_schemas[next].name); +-- +1.8.3.1 + + +From d83ffab1e6acb5049b8634ed20e156573307162a Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= +Date: Fri, 12 Aug 2016 13:53:24 +0200 +Subject: [PATCH 2/5] Med: xml.c: internal self-or-less reference inflicted + infloop guard + +--- + lib/common/xml.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/lib/common/xml.c b/lib/common/xml.c +index ff819a2..4e019b5 100644 +--- a/lib/common/xml.c ++++ b/lib/common/xml.c +@@ -329,7 +329,7 @@ static void __xml_schema_add( + known_schemas[last].transform = strdup(transform); + } + if(after_transform == 0) { +- after_transform = xml_schema_max; ++ after_transform = xml_schema_max; /* upgrade is a one-way */ + } + known_schemas[last].after_transform = after_transform; + +@@ -5664,7 +5664,7 @@ update_validation(xmlNode ** xml_blob, int *best, int max, gboolean transform, g + xmlNode *upgrade = NULL; + int next = known_schemas[lpc].after_transform; + +- if (next < 0) { ++ if (next < 0 || next <= lpc) { + crm_trace("Stopping at %s", known_schemas[lpc].name); + break; + +-- +1.8.3.1 + + +From 35fadfd89ee59edb999ed828b7e39e8449fd99b9 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= +Date: Fri, 12 Aug 2016 14:13:51 +0200 +Subject: [PATCH 3/5] High: fix infloop when no schema validates on + update_validation + +Test case: +$ ./autogen.sh; ./configure; make # if not already +$ sed -i.orig 's|"promote"|"infloop"|' \ + pengine/test10/use-after-free-merge.xml +$ PCMK_schema_directory=$(pwd)/xml \ + tools/crm_simulate -x pengine/test10/use-after-free-merge.xml -S + +BEFORE: +Entity: line 54: element rsc_colocation: Relax-NG validity error : +Invalid attribute then-action for element rsc_order +Entity: line 49: element rsc_location: Relax-NG validity error : Element +constraints has extra content: rsc_location +[...infloop...] + +AFTER: +Entity: line 54: element rsc_colocation: Relax-NG validity error : +Invalid attribute then-action for element rsc_order +Entity: line 49: element rsc_location: Relax-NG validity error : Element +constraints has extra content: rsc_location +[several times] + +--- + lib/common/xml.c | 26 +++++++++++++++++++------- + 1 file changed, 19 insertions(+), 7 deletions(-) + +diff --git a/lib/common/xml.c b/lib/common/xml.c +index 4e019b5..26d76f2 100644 +--- a/lib/common/xml.c ++++ b/lib/common/xml.c +@@ -5647,20 +5647,27 @@ update_validation(xmlNode ** xml_blob, int *best, int max, gboolean transform, g + } + + while(lpc <= max_stable_schemas) { +- gboolean valid = TRUE; +- + crm_debug("Testing '%s' validation (%d of %d)", + known_schemas[lpc].name ? known_schemas[lpc].name : "", + lpc, max_stable_schemas); +- valid = validate_with(xml, lpc, to_logs); + +- if (valid) { +- *best = lpc; +- } else { ++ if (validate_with(xml, lpc, to_logs) == FALSE) { + crm_trace("%s validation failed", known_schemas[lpc].name ? known_schemas[lpc].name : ""); ++ if (*best) { ++ /* we've satisfied the validation, no need to check further */ ++ break; ++ } ++ rc = -pcmk_err_schema_validation; ++ ++ } else { ++ rc = pcmk_ok; + } + +- if (valid && transform) { ++ if (rc == pcmk_ok) { ++ *best = lpc; ++ } ++ ++ if (rc == pcmk_ok && transform) { + xmlNode *upgrade = NULL; + int next = known_schemas[lpc].after_transform; + +@@ -5716,6 +5723,11 @@ update_validation(xmlNode ** xml_blob, int *best, int max, gboolean transform, g + } + } + } ++ ++ if (transform == FALSE || rc != pcmk_ok) { ++ /* we need some progress! */ ++ lpc++; ++ } + } + + if (*best > match) { +-- +1.8.3.1 + + +From e41267c3fd4958d61d6833385d86f354b10264b3 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= +Date: Fri, 12 Aug 2016 14:35:47 +0200 +Subject: [PATCH 4/5] Refactor: xml.c: avoid repeated validate_with call on + happy path + +--- + lib/common/xml.c | 25 ++++++++++++++----------- + 1 file changed, 14 insertions(+), 11 deletions(-) + +diff --git a/lib/common/xml.c b/lib/common/xml.c +index 26d76f2..fc1fe7f 100644 +--- a/lib/common/xml.c ++++ b/lib/common/xml.c +@@ -5617,6 +5617,7 @@ update_validation(xmlNode ** xml_blob, int *best, int max, gboolean transform, g + char *value = NULL; + int max_stable_schemas = xml_latest_schema_index(); + int lpc = 0, match = -1, rc = pcmk_ok; ++ int next = -1; /* -1 denotes "inactive" value */ + + CRM_CHECK(best != NULL, return -EINVAL); + CRM_CHECK(xml_blob != NULL, return -EINVAL); +@@ -5652,7 +5653,12 @@ update_validation(xmlNode ** xml_blob, int *best, int max, gboolean transform, g + lpc, max_stable_schemas); + + if (validate_with(xml, lpc, to_logs) == FALSE) { +- crm_trace("%s validation failed", known_schemas[lpc].name ? known_schemas[lpc].name : ""); ++ if (next != -1) { ++ crm_info("Configuration not valid for schema: %s", known_schemas[lpc].name); ++ next = -1; ++ } else { ++ crm_trace("%s validation failed", known_schemas[lpc].name ? known_schemas[lpc].name : ""); ++ } + if (*best) { + /* we've satisfied the validation, no need to check further */ + break; +@@ -5660,6 +5666,10 @@ update_validation(xmlNode ** xml_blob, int *best, int max, gboolean transform, g + rc = -pcmk_err_schema_validation; + + } else { ++ if (next != -1) { ++ crm_debug("Configuration valid for schema: %s", known_schemas[next].name); ++ next = -1; ++ } + rc = pcmk_ok; + } + +@@ -5669,7 +5679,7 @@ update_validation(xmlNode ** xml_blob, int *best, int max, gboolean transform, g + + if (rc == pcmk_ok && transform) { + xmlNode *upgrade = NULL; +- int next = known_schemas[lpc].after_transform; ++ next = known_schemas[lpc].after_transform; + + if (next < 0 || next <= lpc) { + crm_trace("Stopping at %s", known_schemas[lpc].name); +@@ -5684,15 +5694,7 @@ update_validation(xmlNode ** xml_blob, int *best, int max, gboolean transform, g + crm_debug("%s-style configuration is also valid for %s", + known_schemas[lpc].name, known_schemas[next].name); + +- if (validate_with(xml, next, to_logs)) { +- crm_debug("Configuration valid for schema: %s", known_schemas[next].name); +- lpc = next; +- *best = next; +- rc = pcmk_ok; +- +- } else { +- crm_info("Configuration not valid for schema: %s", known_schemas[next].name); +- } ++ lpc = next; + + } else { + crm_debug("Upgrading %s-style configuration to %s with %s", +@@ -5721,6 +5723,7 @@ update_validation(xmlNode ** xml_blob, int *best, int max, gboolean transform, g + free_xml(upgrade); + rc = -pcmk_err_schema_validation; + } ++ next = -1; + } + } + +-- +1.8.3.1 + + +From e4b9f340d54310251576c0f862a99149e846235b Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Jan=20Pokorn=C3=BD?= +Date: Fri, 12 Aug 2016 14:38:42 +0200 +Subject: [PATCH 5/5] Low: xml.c: better info in cli_config_update on no + validation success + +This commit turns [error message] from testcase at 35fadfd commit's +message from + +> Your current configuration could only be upgraded to pacemaker-1.0... +> the minimum requirement is pacemaker-2.0. + +into + +> Your current configuration could not validate with any schema in +> range [pacemaker-1.0, pacemaker-2.6] +--- + lib/common/xml.c | 15 ++++++++++++++- + 1 file changed, 14 insertions(+), 1 deletion(-) + +diff --git a/lib/common/xml.c b/lib/common/xml.c +index fc1fe7f..e4574fb 100644 +--- a/lib/common/xml.c ++++ b/lib/common/xml.c +@@ -5752,6 +5752,7 @@ cli_config_update(xmlNode ** xml, int *best_version, gboolean to_logs) + const char *value = crm_element_value(*xml, XML_ATTR_VALIDATION); + + int version = get_schema_version(value); ++ int orig_version = version; + int min_version = xml_minimum_schema_index(); + + if (version < min_version) { +@@ -5762,7 +5763,19 @@ cli_config_update(xmlNode ** xml, int *best_version, gboolean to_logs) + + value = crm_element_value(converted, XML_ATTR_VALIDATION); + if (version < min_version) { +- if (to_logs) { ++ if (version < orig_version) { ++ if (to_logs) { ++ crm_config_err("Your current configuration could not validate" ++ " with any schema in range [%s, %s]\n", ++ get_schema_name(orig_version), ++ xml_latest_schema()); ++ } else { ++ fprintf(stderr, "Your current configuration could not validate" ++ " with any schema in range [%s, %s]\n", ++ get_schema_name(orig_version), ++ xml_latest_schema()); ++ } ++ } else if (to_logs) { + crm_config_err("Your current configuration could only be upgraded to %s... " + "the minimum requirement is %s.\n", crm_str(value), + get_schema_name(min_version)); +-- +1.8.3.1 + diff --git a/SOURCES/013-clear-remote-history.patch b/SOURCES/013-clear-remote-history.patch new file mode 100644 index 0000000..915e132 --- /dev/null +++ b/SOURCES/013-clear-remote-history.patch @@ -0,0 +1,41 @@ +From 29cbb5d0165397be01a8f54147d2e147d83a5798 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Fri, 5 Aug 2016 16:49:30 -0500 +Subject: [PATCH] Fix: crmd: clear remote node operation history only when it + comes up + +--- + crmd/remote_lrmd_ra.c | 10 ++++------ + 1 file changed, 4 insertions(+), 6 deletions(-) + +diff --git a/crmd/remote_lrmd_ra.c b/crmd/remote_lrmd_ra.c +index c347bb1..02218eb 100644 +--- a/crmd/remote_lrmd_ra.c ++++ b/crmd/remote_lrmd_ra.c +@@ -178,10 +178,9 @@ remote_node_up(const char *node_name) + CRM_CHECK(node_name != NULL, return); + crm_info("Announcing pacemaker_remote node %s", node_name); + +- /* Clear node's operation history and transient attributes. +- * This should and normally will be done when the node leaves, +- * but since remote node state has a number of corner cases, +- * we additionally clear it on startup to be sure. ++ /* Clear node's operation history. The node's transient attributes should ++ * and normally will be cleared when the node leaves, but since remote node ++ * state has a number of corner cases, clear them here as well, to be sure. + */ + call_opt = crmd_cib_smart_opt(); + erase_status_tag(node_name, XML_CIB_TAG_LRM, call_opt); +@@ -243,8 +242,7 @@ remote_node_down(const char *node_name) + /* Purge node from attrd's memory */ + update_attrd_remote_node_removed(node_name, NULL); + +- /* Purge node's operation history and transient attributes from CIB */ +- erase_status_tag(node_name, XML_CIB_TAG_LRM, call_opt); ++ /* Purge node's transient attributes */ + erase_status_tag(node_name, XML_TAG_TRANSIENT_NODEATTRS, call_opt); + + /* Ensure node is in the remote peer cache with lost state */ +-- +1.8.3.1 + diff --git a/SOURCES/014-crm_report.patch b/SOURCES/014-crm_report.patch new file mode 100644 index 0000000..97303d2 --- /dev/null +++ b/SOURCES/014-crm_report.patch @@ -0,0 +1,223 @@ +From d152f45d874546db7615b5caa54633130ee733bf Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 15 Sep 2016 09:40:48 -0500 +Subject: [PATCH 1/3] Fix: tools: make crm_report sanitize CIB before + generating readable version + +--- + tools/report.collector | 18 +++++++++++++----- + 1 file changed, 13 insertions(+), 5 deletions(-) + +diff --git a/tools/report.collector b/tools/report.collector +index e75a790..83218ee 100644 +--- a/tools/report.collector ++++ b/tools/report.collector +@@ -249,13 +249,17 @@ getconfig() { + else + echo "$host" > $target/STOPPED + fi ++} ++ ++get_readable_cib() { ++ target="$1"; shift; + + if [ -f "$target/$CIB_F" ]; then +- crm_verify -V -x $target/$CIB_F >$target/$CRM_VERIFY_F 2>&1 ++ crm_verify -V -x "$target/$CIB_F" >"$target/$CRM_VERIFY_F" 2>&1 + if which crm >/dev/null 2>&1 ; then +- CIB_file=$target/$CIB_F crm configure show >$target/$CIB_TXT_F 2>&1 ++ CIB_file="$target/$CIB_F" crm configure show >"$target/$CIB_TXT_F" 2>&1 + elif which pcs >/dev/null 2>&1 ; then +- pcs config -f $target/$CIB_F >$target/$CIB_TXT_F 2>&1 ++ pcs config -f "$target/$CIB_F" >"$target/$CIB_TXT_F" 2>&1 + fi + fi + } +@@ -735,12 +739,16 @@ cf="" + if [ ! -z "$cluster_cf" ]; then + cf=`basename $cluster_cf` + fi +-for f in $cf $CIB_F $CIB_TXT_F $CIB_F.live pengine/*; do ++for f in "$cf" "$CIB_F" "$CIB_F.live" pengine/*; do + if [ -f "$f" ]; then +- sanitize $f ++ sanitize "$f" + fi + done + ++# For convenience, generate human-readable version of CIB and any XML errors ++# in it (AFTER sanitizing, so we don't need to sanitize this output) ++get_readable_cib "$REPORT_HOME/$REPORT_TARGET" ++ + # Grab logs + start=`date -d @${LOG_START} +"%F %T"` + end=`date -d @${LOG_END} +"%F %T"` +-- +1.8.3.1 + + +From 700c8004fc62ca5de39b1558825ea6aba4edab53 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 15 Sep 2016 10:50:48 -0500 +Subject: [PATCH 2/3] Low: tools: make crm_report skip global disk searches + when pacemaker_remoted present + +This saves a lot of time in the common cases, and the only case where it's +worse is where pacemaker_remoted is installed in a standard location but the +other pacemaker components are not (and if someone manages to do that, it's no +wonder they have problems). +--- + tools/report.common.in | 62 +++++++++++++++++++++++++++++++------------------- + 1 file changed, 39 insertions(+), 23 deletions(-) + +diff --git a/tools/report.common.in b/tools/report.common.in +index f9ed6f5..9b43486 100644 +--- a/tools/report.common.in ++++ b/tools/report.common.in +@@ -82,6 +82,9 @@ SYSLOGS=" + /var/log/cluster/* + " + ++# Whether pacemaker_remoted was found (0 = yes, 1 = no, -1 = haven't looked yet) ++REMOTED_STATUS=-1 ++ + # + # keep the user posted + # +@@ -129,13 +132,23 @@ is_running() { + } + + has_remoted() { +- # TODO: the binary might be elsewhere +- if which pacemaker_remoted >/dev/null 2>&1; then +- return 0 +- elif [ -x "@sbindir@/pacemaker_remoted" ]; then +- return 0 ++ if [ $REMOTED_STATUS -eq -1 ]; then ++ REMOTED_STATUS=1 ++ if which pacemaker_remoted >/dev/null 2>&1; then ++ REMOTED_STATUS=0 ++ elif [ -x "@sbindir@/pacemaker_remoted" ]; then ++ REMOTED_STATUS=0 ++ else ++ # @TODO: the binary might be elsewhere, ++ # but a global search is too expensive ++ for d in /{usr,opt}/{local/,}{s,}bin; do ++ if [ -x "${d}/pacemaker_remoted" ]; then ++ REMOTED_STATUS=0 ++ fi ++ done ++ fi + fi +- return 1 ++ return $REMOTED_STATUS + } + + # found_dir +@@ -158,18 +171,19 @@ detect_daemon_dir() { + fi + done + ++ # Pacemaker Remote nodes don't need to install daemons ++ if has_remoted; then ++ info "Pacemaker daemons not found (this appears to be a Pacemaker Remote node)" ++ return ++ fi ++ + for f in $(find / -maxdepth $maxdepth -type f -name pengine -o -name lrmd_test); do + d=$(dirname "$f") + found_dir "daemons" "$d" + return + done + +- # Pacemaker Remote nodes don't need to install daemons +- if has_remoted; then +- info "Not found (this appears to be a Pacemaker Remote node)" +- else +- fatal "Pacemaker daemons not found (nonstandard installation?)" +- fi ++ fatal "Pacemaker daemons not found (nonstandard installation?)" + } + + detect_cib_dir() { +@@ -180,6 +194,12 @@ detect_cib_dir() { + fi + done + ++ # Pacemaker Remote nodes don't need a CIB ++ if has_remoted; then ++ info "Pacemaker config not found (this appears to be a Pacemaker Remote node)" ++ return ++ fi ++ + info "Searching for where Pacemaker keeps config information... this may take a while" + # TODO: What about false positives where someone copied the CIB? + for f in $(find / -maxdepth $maxdepth -type f -name cib.xml); do +@@ -188,12 +208,7 @@ detect_cib_dir() { + return + done + +- # Pacemaker Remote nodes don't need a CIB +- if has_remoted; then +- info "Not found (this appears to be a Pacemaker Remote node)" +- else +- warning "Pacemaker config not found (nonstandard installation?)" +- fi ++ warning "Pacemaker config not found (nonstandard installation?)" + } + + detect_state_dir() { +@@ -217,17 +232,18 @@ detect_pe_dir() { + return + fi + ++ if has_remoted; then ++ info "Pacemaker policy engine inputs not found (this appears to be a Pacemaker Remote node)" ++ return ++ fi ++ + info "Searching for where Pacemaker keeps Policy Engine inputs... this may take a while" + for d in $(find / -maxdepth $maxdepth -type d -name pengine); do + found_dir "policy engine inputs" "$d" + return + done + +- if has_remoted; then +- info "Not found (this appears to be a Pacemaker Remote node)" +- else +- fatal "Pacemaker policy engine inputs not found (nonstandard installation?)" +- fi ++ fatal "Pacemaker policy engine inputs not found (nonstandard installation?)" + } + + detect_host() { +-- +1.8.3.1 + + +From 03f013076c191c61470a73dd8fa1d6ceae6e5fb1 Mon Sep 17 00:00:00 2001 +From: Ken Gaillot +Date: Thu, 15 Sep 2016 11:46:18 -0500 +Subject: [PATCH 3/3] Doc: tools: document crm_report --sos-mode option in help + text/man page + +--- + tools/crm_report.in | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/tools/crm_report.in b/tools/crm_report.in +index 848b6b9..0a456af 100755 +--- a/tools/crm_report.in ++++ b/tools/crm_report.in +@@ -73,6 +73,8 @@ Options: + -u, --user user ssh username for cluster nodes (default: root) + -D, --depth search depth to use when attempting to locate files + -e, --rsh specify the remote shell to use (default: ssh -T) ++ --sos-mode use defaults suitable for being called by sosreport tool ++ (behavior subject to change and not useful to end users) + --dest a custom destination directory/file + dest a custom destination directory/file + +-- +1.8.3.1 + diff --git a/SOURCES/pacemaker-63f8e9a-rollup.patch b/SOURCES/pacemaker-63f8e9a-rollup.patch deleted file mode 100644 index ef14d87..0000000 --- a/SOURCES/pacemaker-63f8e9a-rollup.patch +++ /dev/null @@ -1,5904 +0,0 @@ -diff --git a/ChangeLog b/ChangeLog -index d70edbd..e445890 100644 ---- a/ChangeLog -+++ b/ChangeLog -@@ -1,4 +1,218 @@ - -+* Wed Jun 24 2015 Andrew Beekhof Pacemaker-1.1.13-1 -+- Update source tarball to revision: 2a1847e -+- Changesets: 750 -+- Diff: 156 files changed, 11323 insertions(+), 3725 deletions(-) -+ -+- Features added since Pacemaker-1.1.12 -+ + Allow fail-counts to be removed en-mass when the new attrd is in operation -+ + attrd supports private attributes (not written to CIB) -+ + crmd: Ensure a watchdog device is in use if stonith-watchdog-timeout is configured -+ + crmd: If configured, trigger the watchdog immediately if we loose quorum and no-quorum-policy=suicide -+ + crm_diff: Support generating a difference without versions details if --no-version/-u is supplied -+ + crm_resource: Implement an intelligent restart capability -+ + Fencing: Advertise the watchdog device for fencing operations -+ + Fencing: Allow the cluster to recover resources if the watchdog is in use -+ + fencing: cl#5134 - Support random fencing delay to avoid double fencing -+ + mcp: Allow orphan children to initiate node panic via SIGQUIT -+ + mcp: Turn on sbd integration if pacemakerd finds it running -+ + mcp: Two new error codes that result in machine reset or power off -+ + Officially support the resource-discovery attribute for location constraints -+ + PE: Allow natural ordering of colocation sets -+ + PE: Support non-actionable degraded mode for OCF -+ + pengine: cl#5207 - Display "UNCLEAN" for resources running on unclean offline nodes -+ + remote: pcmk remote client tool for use with container wrapper script -+ + Support machine panics for some kinds of errors (via sbd if available) -+ + tools: add crm_resource --wait option -+ + tools: attrd_updater supports --query and --all options -+ + tools: attrd_updater: Allow attributes to be set for other nodes -+ -+- Changes since Pacemaker-1.1.12 -+ + pengine: exclusive discovery implies rsc is only allowed on exclusive subset of nodes -+ + acl: Correctly implement the 'reference' acl directive -+ + acl: Do not delay evaluation of added nodes in some situations -+ + attrd: b22b1fe did uuid test too early -+ + attrd: Clean out the node cache when requested by the admin -+ + attrd: fixes double free in attrd legacy -+ + attrd: properly write attributes for peers once uuid is discovered -+ + attrd: refresh should force an immediate write-out of all attributes -+ + attrd: Simplify how node deletions happen -+ + Bug rhbz#1067544 - Tools: Correctly handle --ban, --move and --locate for master/slave groups -+ + Bug rhbz#1181824 - Ensure the DC can be reliably fenced -+ + cib: Ability to upgrade cib validation schema in legacy mode -+ + cib: Always generate digests for cib diffs in legacy mode -+ + cib: assignment where comparison intended -+ + cib: Avoid nodeid conflicts we don't care about -+ + cib: Correctly add "update-origin", "update-client" and "update-user" attributes for cib -+ + cib: Correctly set up signal handlers -+ + cib: Correctly track node state -+ + cib: Do not update on disk backups if we're just querying them -+ + cib: Enable cib legacy mode for plugin-based clusters -+ + cib: Ensure file-based backends treat '-o section' consistently with the native backend -+ + cib: Ensure upgrade operations from a non-DC get an acknowledgement -+ + cib: No need to enforce cib digests for v2 diffs in legacy mode -+ + cib: Revert d153b86 to instantly get cib synchronized in legacy mode -+ + cib: tls sock cleanup for remote cib connections -+ + cli: Ensure subsequent unknown long options are correctly detected -+ + cluster: Invoke crm_remove_conflicting_peer() only when the new node's uname is being assigned in the node cache -+ + common: Increment current and age for lib common as a result of APIs being added -+ + corosync: Bug cl#5232 - Somewhat gracefully handle nodes with invalid UUIDs -+ + corosync: Avoid unnecessary repeated CMAP API calls -+ + crmd/pengine: handle on-fail=ignore properly -+ + crmd: Add "on_node" attribute for *_last_failure_0 lrm resource operations -+ + crmd: All peers need to track node shutdown requests -+ + crmd: Cached copies of transient attributes cease to be valid once a node leaves the membership -+ + crmd: Correctly add the local option that validates against schema for pengine to calculate -+ + crmd: Disable debug logging that results in significant overhead -+ + crmd: do not remove connection resources during re-probe -+ + crmd: don't update fail count twice for same failure -+ + crmd: Ensure remote connection resources timeout properly during 'migrate_from' action -+ + crmd: Ensure throttle_mode() does something on Linux -+ + crmd: Fixes crash when remote connection migration fails -+ + crmd: gracefully handle remote node disconnects during op execution -+ + crmd: Handle remote connection failures while executing ops on remote connection -+ + crmd: include remote nodes when forcing cluster wide resource reprobe -+ + crmd: never stop recurring monitor ops for pcmk remote during incomplete migration -+ + crmd: Prevent the old version of DC from being fenced when it shuts down for rolling-upgrade -+ + crmd: Prevent use-of-NULL during reprobe -+ + crmd: properly update job limit for baremetal remote-nodes -+ + crmd: Remote-node throttle jobs count towards cluster-node hosting conneciton rsc -+ + crmd: Reset stonith failcount to recover transitioner when the node rejoins -+ + crmd: resolves memory leak in crmd. -+ + crmd: respect start-failure-is-fatal even for artifically injected events -+ + crmd: Wait for all pending operations to complete before poking the policy engine -+ + crmd: When container's host is fenced, cancel in-flight operations -+ + crm_attribute: Correctly update config options when -o crm_config is specified -+ + crm_failcount: Better error reporting when no resource is specified -+ + crm_mon: add exit reason to resource failure output -+ + crm_mon: Fill CRM_notify_node in traps with node's uname rather than node's id if possible -+ + crm_mon: Repair notification delivery when the v2 patch format is in use -+ + crm_node: Correctly remove nodes from the CIB by nodeid -+ + crm_report: More patterns for finding logs on non-DC nodes -+ + crm_resource: Allow resource restart operations to be node specific -+ + crm_resource: avoid deletion of lrm cache on node with resource discovery disabled. -+ + crm_resource: Calculate how long to wait for a restart based on the resource timeouts -+ + crm_resource: Clean up memory in --restart error paths -+ + crm_resource: Display the locations of all anonymous clone children when supplying the children's common ID -+ + crm_resource: Ensure --restart sets/clears meta attributes -+ + crm_resource: Ensure fail-counts are purged when we redetect the state of all resources -+ + crm_resource: Implement --timeout for resource restart operations -+ + crm_resource: Include group members when calculating the next timeout -+ + crm_resource: Memory leak in error paths -+ + crm_resource: Prevent use-after-free -+ + crm_resource: Repair regression test outputs -+ + crm_resource: Use-after-free when restarting a resource -+ + dbus: ref count leaks -+ + dbus: Ensure both the read and write queues get dispatched -+ + dbus: Fail gracefully if malloc fails -+ + dbus: handle dispatch queue when multiple replies need to be processed -+ + dbus: Notice when dbus connections get disabled -+ + dbus: Remove double-free introduced while trying to make coverity shut up -+ + ensure if B is colocated with A, B can never run without A -+ + fence_legacy: Avoid passing 'port' to cluster-glue agents -+ + fencing: Allow nodes to be purged from the member cache -+ + fencing: Correctly make args for fencing agents -+ + fencing: Correctly wait for self-fencing to occur when the watchdog is in use -+ + fencing: Ensure the hostlist parameter is set for watchdog agents -+ + fencing: Force 'stonith-ng' as the system name -+ + fencing: Gracefully handle invalid metadata from agents -+ + fencing: If configured, wait stonith-watchdog-timer seconds for self-fencing to complete -+ + fencing: Reject actions for devices that haven't been explicitly registered yet -+ + ipc: properly allocate server enforced buffer size on client -+ + ipc: use server enforced buffer during ipc client send -+ + lrmd, services: interpret LSB status codes properly -+ + lrmd: add back support for class heartbeat agents -+ + lrmd: cancel pending async connection during disconnect -+ + lrmd: enable ipc proxy for docker-wrapper privileged mode -+ + lrmd: fix rescheduling of systemd monitor op during start -+ + lrmd: Handle systemd reporting 'done' before a resource is actually stopped -+ + lrmd: Hint to child processes that using sd_notify is not required -+ + lrmd: Log with the correct personality -+ + lrmd: Prevent glib assert triggered by timers being removed from mainloop more than once -+ + lrmd: report original timeout when systemd operation completes -+ + lrmd: store failed operation exit reason in cib -+ + mainloop: resolves race condition mainloop poll involving modification of ipc connections -+ + make targetted reprobe for remote node work, crm_resource -C -N -+ + mcp: Allow a configurable delay when debugging shutdown issues -+ + mcp: Avoid requiring 'export' for SYS-V sysconfig options -+ + Membership: Detect and resolve nodes that change their ID -+ + pacemakerd: resolves memory leak of xml structure in pacemakerd -+ + pengine: ability to launch resources in isolated containers -+ + pengine: add #kind=remote for baremetal remote-nodes -+ + pengine: allow baremetal remote-nodes to recover without requiring fencing when cluster-node fails -+ + pengine: allow remote-nodes to be placed in maintenance mode -+ + pengine: Avoid trailing whitespaces when printing resource state -+ + pengine: cl#5130 - Choose nodes capable of running all the colocated utilization resources -+ + pengine: cl#5130 - Only check the capacities of the nodes that are allowed to run the resource -+ + pengine: Correctly compare feature set to determine how to unpack meta attributes -+ + pengine: disable migrations for resources with isolation containers -+ + pengine: disable reloading of resources within isolated container wrappers -+ + pengine: Do not aggregate children in a pending state into the started/stopped/etc lists -+ + pengine: Do not record duplicate copies of the failed actions -+ + pengine: Do not reschedule monitors that are no longer needed while resource definitions have changed -+ + pengine: Fence baremetal remote when recurring monitor op fails -+ + pengine: Fix colocation with unmanaged resources -+ + pengine: Fix the behaviors of multi-state resources with asymmetrical ordering -+ + pengine: fixes pengine crash with orphaned remote node connection resource -+ + pengine: fixes segfault caused by malformed log warning -+ + pengine: handle cloned isolated resources in a sane way -+ + pengine: handle isolated resource scenario, cloned group of isolated resources -+ + pengine: Handle ordering between stateful and migratable resources -+ + pengine: imply stop in container node resources when host node is fenced -+ + pengine: only fence baremetal remote when connection can fails or can not be recovered -+ + pengine: only kill process group on timeout when on-fail does not equal block. -+ + pengine: per-node control over resource discovery -+ + pengine: prefer migration target for remote node connections -+ + pengine: prevent disabling rsc discovery per node in certain situations -+ + pengine: Prevent use-after-free in sort_rsc_process_order() -+ + pengine: properly handle ordering during remote connection partial migration -+ + pengine: properly recover remote-nodes when cluster-node proxy goes offline -+ + pengine: remove unnecessary whitespace from notify environment variables -+ + pengine: require-all feature for ordered clones -+ + pengine: Resolve memory leaks -+ + pengine: resource discovery mode for location constraints -+ + pengine: restart master instances on instance attribute changes -+ + pengine: Turn off legacy unpacking of resource options into the meta hashtable -+ + pengine: Watchdog integration is sufficient for fencing -+ + Perform systemd reloads asynchronously -+ + ping: Correctly advertise multiplier default -+ + Prefer to inherit the watchdog timeout from SBD -+ + properly record stop args after reload -+ + provide fake meta data for ra class heartbeat -+ + remote: report timestamps for remote connection resource operations -+ + remote: Treat recv msg timeout as a disconnect -+ + service: Prevent potential use-of-NULL in metadata lookups -+ + solaris: Allow compilation when dirent.d_type is not available -+ + solaris: Correctly replace the linux swab functions -+ + solaris: Disable throttling since /proc doesn't exist -+ + stonith-ng: Correctly observe the watchdog completion timeout -+ + stonith-ng: Correctly track node state -+ + stonith-ng: Reset mainloop source IDs after removing them -+ + systemd: Correctly handle long running stop actions -+ + systemd: Ensure failed monitor operations always return -+ + systemd: Ensure we don't call dbus_message_unref() with NULL -+ + systemd: fix crash caused when canceling in-flight operation -+ + systemd: Kindly ask dbus NOT to kill the process if the dbus connection fails -+ + systemd: Perform actions asynchronously -+ + systemd: Perform monitor operations without blocking -+ + systemd: Tell systemd not to take DBus down from underneath us -+ + systemd: Trick systemd into not stopping our services before us during shutdown -+ + tools: Improve crm_mon output with certain option combinations -+ + upstart: Monitor actions always return 'ok' or 'not running' -+ + upstart: Perform more parts of monitor operations without blocking -+ + xml: add 'require-all' to xml schema for constraints -+ + xml: cl#5231 - Unset the deleted attributes in the resulting diffs -+ + xml: Clone the latest constraint schema in preparation for changes" -+ + xml: Correctly create v1 patchsets when deleting attributes -+ + xml: Do not change the ordering of properties when applying v1 cib diffs -+ + xml: Do not dump deleted attributes -+ + xml: Do not prune leaves from v1 cib diffs that are being created with digests -+ + xml: Ensure ACLs are reapplied before calculating what a replace operation changed -+ + xml: Fix upgrade-1.3.xsl to correctly transform ACL rules with "attribute" -+ + xml: Prevent assert errors in crm_element_value() on applying a patch without version information -+ + xml: Prevent potential use-of-NULL -+ -+ - * Tue Jul 22 2014 Andrew Beekhof Pacemaker-1.1.12-1 - - Update source tarball to revision: 93a037d - - Changesets: 795 -diff --git a/attrd/commands.c b/attrd/commands.c -index 442c5f8..18c0523 100644 ---- a/attrd/commands.c -+++ b/attrd/commands.c -@@ -289,6 +289,9 @@ attrd_client_update(xmlNode *xml) - - crm_info("Expanded %s=%s to %d", attr, value, int_value); - crm_xml_add_int(xml, F_ATTRD_VALUE, int_value); -+ -+ /* Replacing the value frees the previous memory, so re-query it */ -+ value = crm_element_value(xml, F_ATTRD_VALUE); - } - } - -diff --git a/cib/callbacks.c b/cib/callbacks.c -index 71c487e..1452ded 100644 ---- a/cib/callbacks.c -+++ b/cib/callbacks.c -@@ -40,6 +40,8 @@ - #include - #include "common.h" - -+static unsigned long cib_local_bcast_num = 0; -+ - typedef struct cib_local_notify_s { - xmlNode *notify_src; - char *client_id; -@@ -48,7 +50,13 @@ typedef struct cib_local_notify_s { - } cib_local_notify_t; - - int next_client_id = 0; -+ -+#if SUPPORT_PLUGIN -+gboolean legacy_mode = TRUE; -+#else - gboolean legacy_mode = FALSE; -+#endif -+ - qb_ipcs_service_t *ipcs_ro = NULL; - qb_ipcs_service_t *ipcs_rw = NULL; - qb_ipcs_service_t *ipcs_shm = NULL; -@@ -82,8 +90,12 @@ static gboolean cib_read_legacy_mode(void) - return legacy; - } - --static gboolean cib_legacy_mode(void) -+gboolean cib_legacy_mode(void) - { -+#if SUPPORT_PLUGIN -+ return TRUE; -+#endif -+ - if(cib_read_legacy_mode()) { - return TRUE; - } -@@ -442,6 +454,54 @@ do_local_notify(xmlNode * notify_src, const char *client_id, - } - - static void -+local_notify_destroy_callback(gpointer data) -+{ -+ cib_local_notify_t *notify = data; -+ -+ free_xml(notify->notify_src); -+ free(notify->client_id); -+ free(notify); -+} -+ -+static void -+check_local_notify(int bcast_id) -+{ -+ cib_local_notify_t *notify = NULL; -+ -+ if (!local_notify_queue) { -+ return; -+ } -+ -+ notify = g_hash_table_lookup(local_notify_queue, GINT_TO_POINTER(bcast_id)); -+ -+ if (notify) { -+ do_local_notify(notify->notify_src, notify->client_id, notify->sync_reply, -+ notify->from_peer); -+ g_hash_table_remove(local_notify_queue, GINT_TO_POINTER(bcast_id)); -+ } -+} -+ -+static void -+queue_local_notify(xmlNode * notify_src, const char *client_id, gboolean sync_reply, -+ gboolean from_peer) -+{ -+ cib_local_notify_t *notify = calloc(1, sizeof(cib_local_notify_t)); -+ -+ notify->notify_src = notify_src; -+ notify->client_id = strdup(client_id); -+ notify->sync_reply = sync_reply; -+ notify->from_peer = from_peer; -+ -+ if (!local_notify_queue) { -+ local_notify_queue = g_hash_table_new_full(g_direct_hash, -+ g_direct_equal, NULL, -+ local_notify_destroy_callback); -+ } -+ -+ g_hash_table_insert(local_notify_queue, GINT_TO_POINTER(cib_local_bcast_num), notify); -+} -+ -+static void - parse_local_options_v1(crm_client_t * cib_client, int call_type, int call_options, const char *host, - const char *op, gboolean * local_notify, gboolean * needs_reply, - gboolean * process, gboolean * needs_forward) -@@ -814,9 +874,12 @@ send_peer_reply(xmlNode * msg, xmlNode * result_diff, const char *originator, gb - int diff_del_admin_epoch = 0; - - const char *digest = NULL; -+ int format = 1; - - CRM_LOG_ASSERT(result_diff != NULL); - digest = crm_element_value(result_diff, XML_ATTR_DIGEST); -+ crm_element_value_int(result_diff, "format", &format); -+ - cib_diff_version_details(result_diff, - &diff_add_admin_epoch, &diff_add_epoch, &diff_add_updates, - &diff_del_admin_epoch, &diff_del_epoch, &diff_del_updates); -@@ -829,7 +892,9 @@ send_peer_reply(xmlNode * msg, xmlNode * result_diff, const char *originator, gb - crm_xml_add(msg, F_CIB_GLOBAL_UPDATE, XML_BOOLEAN_TRUE); - crm_xml_add(msg, F_CIB_OPERATION, CIB_OP_APPLY_DIFF); - -- CRM_ASSERT(digest != NULL); -+ if (format == 1) { -+ CRM_ASSERT(digest != NULL); -+ } - - add_message_xml(msg, F_CIB_UPDATE_DIFF, result_diff); - crm_log_xml_explicit(msg, "copy"); -@@ -1039,6 +1104,27 @@ cib_process_request(xmlNode * request, gboolean force_synchronous, gboolean priv - */ - crm_trace("Completed slave update"); - -+ } else if (cib_legacy_mode() && -+ rc == pcmk_ok && result_diff != NULL && !(call_options & cib_inhibit_bcast)) { -+ gboolean broadcast = FALSE; -+ -+ cib_local_bcast_num++; -+ crm_xml_add_int(request, F_CIB_LOCAL_NOTIFY_ID, cib_local_bcast_num); -+ broadcast = send_peer_reply(request, result_diff, originator, TRUE); -+ -+ if (broadcast && client_id && local_notify && op_reply) { -+ -+ /* If we have been asked to sync the reply, -+ * and a bcast msg has gone out, we queue the local notify -+ * until we know the bcast message has been received */ -+ local_notify = FALSE; -+ crm_trace("Queuing local %ssync notification for %s", -+ (call_options & cib_sync_call) ? "" : "a-", client_id); -+ -+ queue_local_notify(op_reply, client_id, (call_options & cib_sync_call), from_peer); -+ op_reply = NULL; /* the reply is queued, so don't free here */ -+ } -+ - } else if (call_options & cib_discard_reply) { - crm_trace("Caller isn't interested in reply"); - -@@ -1322,6 +1408,11 @@ cib_peer_callback(xmlNode * msg, void *private_data) - - if (cib_legacy_mode() && (originator == NULL || crm_str_eq(originator, cib_our_uname, TRUE))) { - /* message is from ourselves */ -+ int bcast_id = 0; -+ -+ if (!(crm_element_value_int(msg, F_CIB_LOCAL_NOTIFY_ID, &bcast_id))) { -+ check_local_notify(bcast_id); -+ } - return; - - } else if (crm_peer_cache == NULL) { -diff --git a/cib/callbacks.h b/cib/callbacks.h -index 7549a6c..bca9992 100644 ---- a/cib/callbacks.h -+++ b/cib/callbacks.h -@@ -73,6 +73,8 @@ void cib_shutdown(int nsig); - void initiate_exit(void); - void terminate_cib(const char *caller, gboolean fast); - -+extern gboolean cib_legacy_mode(void); -+ - #if SUPPORT_HEARTBEAT - extern void cib_ha_peer_callback(HA_Message * msg, void *private_data); - extern int cib_ccm_dispatch(gpointer user_data); -diff --git a/cib/main.c b/cib/main.c -index 2a48054..e20a2b6 100644 ---- a/cib/main.c -+++ b/cib/main.c -@@ -438,6 +438,13 @@ cib_peer_update_callback(enum crm_status_type type, crm_node_t * node, const voi - - if (cib_shutdown_flag && crm_active_peers() < 2 && crm_hash_table_size(client_connections) == 0) { - crm_info("No more peers"); -+ /* @TODO -+ * terminate_cib() calls crm_cluster_disconnect() which calls -+ * crm_peer_destroy() which destroys the peer caches, which a peer -+ * status callback shouldn't do. For now, there is a workaround in -+ * crm_update_peer_proc(), but CIB should be refactored to avoid -+ * destroying the peer caches here. -+ */ - terminate_cib(__FUNCTION__, FALSE); - } - } -diff --git a/cib/messages.c b/cib/messages.c -index 9c66349..363562c 100644 ---- a/cib/messages.c -+++ b/cib/messages.c -@@ -297,7 +297,14 @@ cib_process_upgrade_server(const char *op, int options, const char *section, xml - crm_xml_add(up, F_CIB_CALLOPTS, crm_element_value(req, F_CIB_CALLOPTS)); - crm_xml_add(up, F_CIB_CALLID, crm_element_value(req, F_CIB_CALLID)); - -- send_cluster_message(NULL, crm_msg_cib, up, FALSE); -+ if (cib_legacy_mode() && cib_is_master) { -+ rc = cib_process_upgrade( -+ op, options, section, up, input, existing_cib, result_cib, answer); -+ -+ } else { -+ send_cluster_message(NULL, crm_msg_cib, up, FALSE); -+ } -+ - free_xml(up); - - } else if(rc == pcmk_ok) { -diff --git a/crmd/lrm.c b/crmd/lrm.c -index 74fede4..062f769 100644 ---- a/crmd/lrm.c -+++ b/crmd/lrm.c -@@ -454,8 +454,6 @@ get_rsc_metadata(const char *type, const char *rclass, const char *provider, boo - - snprintf(key, len, "%s::%s:%s", type, rclass, provider); - if(force == FALSE) { -- snprintf(key, len, "%s::%s:%s", type, rclass, provider); -- - crm_trace("Retreiving cached metadata for %s", key); - metadata = g_hash_table_lookup(metadata_hash, key); - } -@@ -581,7 +579,7 @@ resource_supports_action(xmlNode *metadata, const char *name) - for (action = __xml_first_child(actions); action != NULL; action = __xml_next(action)) { - if (crm_str_eq((const char *)action->name, "action", TRUE)) { - value = crm_element_value(action, "name"); -- if (safe_str_eq("reload", value)) { -+ if (safe_str_eq(name, value)) { - return TRUE; - } - } -@@ -606,16 +604,18 @@ append_restart_list(lrmd_event_data_t *op, xmlNode *metadata, xmlNode * update, - - if(resource_supports_action(metadata, "reload")) { - restart = create_xml_node(NULL, XML_TAG_PARAMS); -- list = build_parameter_list(op, metadata, restart, "unique", FALSE, FALSE); -- } -+ /* Any parameters with unique="1" should be added into the "op-force-restart" list. */ -+ list = build_parameter_list(op, metadata, restart, "unique", TRUE, FALSE); - -- if (list == NULL) { -+ } else { - /* Resource does not support reloads */ - return; - } - - digest = calculate_operation_digest(restart, version); -- crm_xml_add(update, XML_LRM_ATTR_OP_RESTART, list); -+ /* Add "op-force-restart" and "op-restart-digest" to indicate the resource supports reload, -+ * no matter if it actually supports any parameters with unique="1"). */ -+ crm_xml_add(update, XML_LRM_ATTR_OP_RESTART, list? list: ""); - crm_xml_add(update, XML_LRM_ATTR_RESTART_DIGEST, digest); - - crm_trace("%s: %s, %s", op->rsc_id, digest, list); -diff --git a/crmd/throttle.c b/crmd/throttle.c -index 165050c..169594b 100644 ---- a/crmd/throttle.c -+++ b/crmd/throttle.c -@@ -92,41 +92,60 @@ int throttle_num_cores(void) - return cores; - } - -+/* -+ * \internal -+ * \brief Return name of /proc file containing the CIB deamon's load statistics -+ * -+ * \return Newly allocated memory with file name on success, NULL otherwise -+ * -+ * \note It is the caller's responsibility to free the return value. -+ * This will return NULL if the daemon is being run via valgrind. -+ * This should be called only on Linux systems. -+ */ - static char *find_cib_loadfile(void) - { - DIR *dp; - struct dirent *entry; - struct stat statbuf; - char *match = NULL; -+ char procpath[128]; -+ char value[64]; -+ char key[16]; - - dp = opendir("/proc"); - if (!dp) { - /* no proc directory to search through */ - crm_notice("Can not read /proc directory to track existing components"); -- return FALSE; -+ return NULL; - } - -+ /* Iterate through contents of /proc */ - while ((entry = readdir(dp)) != NULL) { -- char procpath[128]; -- char value[64]; -- char key[16]; - FILE *file; - int pid; - -- strcpy(procpath, "/proc/"); -- /* strlen("/proc/") + strlen("/status") + 1 = 14 -- * 128 - 14 = 114 */ -- strncat(procpath, entry->d_name, 114); -- -- if (lstat(procpath, &statbuf)) { -+ /* We're only interested in entries whose name is a PID, -+ * so skip anything non-numeric or that is too long. -+ * -+ * 114 = 128 - strlen("/proc/") - strlen("/status") - 1 -+ */ -+ pid = atoi(entry->d_name); -+ if ((pid <= 0) || (strlen(entry->d_name) > 114)) { - continue; - } -- if (!S_ISDIR(statbuf.st_mode) || !isdigit(entry->d_name[0])) { -+ -+ /* We're only interested in subdirectories */ -+ strcpy(procpath, "/proc/"); -+ strcat(procpath, entry->d_name); -+ if (lstat(procpath, &statbuf) || !S_ISDIR(statbuf.st_mode)) { - continue; - } - -+ /* Read the first entry ("Name:") from the process's status file. -+ * We could handle the valgrind case if we parsed the cmdline file -+ * instead, but that's more of a pain than it's worth. -+ */ - strcat(procpath, "/status"); -- - file = fopen(procpath, "r"); - if (!file) { - continue; -@@ -137,17 +156,11 @@ static char *find_cib_loadfile(void) - } - fclose(file); - -- if (safe_str_neq("cib", value)) { -- continue; -- } -- -- pid = atoi(entry->d_name); -- if (pid <= 0) { -- continue; -+ if (safe_str_eq("cib", value)) { -+ /* We found the CIB! */ -+ match = crm_strdup_printf("/proc/%d/stat", pid); -+ break; - } -- -- match = crm_strdup_printf("/proc/%d/stat", pid); -- break; - } - - closedir(dp); -@@ -214,6 +227,10 @@ static bool throttle_cib_load(float *load) - last_utime = 0; - last_stime = 0; - loadfile = find_cib_loadfile(); -+ if (loadfile == NULL) { -+ crm_warn("Couldn't find CIB load file"); -+ return FALSE; -+ } - ticks_per_s = sysconf(_SC_CLK_TCK); - crm_trace("Found %s", loadfile); - } -diff --git a/cts/CIB.py b/cts/CIB.py -index cdfc7ca..82d02d7 100644 ---- a/cts/CIB.py -+++ b/cts/CIB.py -@@ -312,7 +312,7 @@ Description=Dummy resource that takes a while to start - Type=notify - ExecStart=/usr/bin/python -c 'import time, systemd.daemon; time.sleep(10); systemd.daemon.notify("READY=1"); time.sleep(86400)' - ExecStop=/bin/sleep 10 --ExecStop=/bin/kill -s KILL $MAINPID -+ExecStop=/bin/kill -s KILL \$MAINPID - """ - - os.system("cat <<-END >/tmp/DummySD.service\n%s\nEND" % (dummy_service_file)) -diff --git a/cts/CTStests.py b/cts/CTStests.py -index 14ab4bf..f817004 100644 ---- a/cts/CTStests.py -+++ b/cts/CTStests.py -@@ -1105,7 +1105,7 @@ class MaintenanceMode(CTSTest): - # fail the resource right after turning Maintenance mode on - # verify it is not recovered until maintenance mode is turned off - if action == "On": -- pats.append("pengine.*: warning: Processing failed op %s for %s on" % (self.action, self.rid)) -+ pats.append("pengine.*: warning:.* Processing failed op %s for %s on" % (self.action, self.rid)) - else: - pats.append(self.templates["Pat:RscOpOK"] % (self.rid, "stop_0")) - pats.append(self.templates["Pat:RscOpOK"] % (self.rid, "start_0")) -@@ -1314,7 +1314,8 @@ class ResourceRecover(CTSTest): - self.debug("Shooting %s aka. %s" % (rsc.clone_id, rsc.id)) - - pats = [] -- pats.append("pengine.*: warning: Processing failed op %s for %s on" % (self.action, self.rid)) -+ pats.append(r"pengine.*: warning:.* Processing failed op %s for (%s|%s) on" % (self.action, -+ rsc.id, rsc.clone_id)) - - if rsc.managed(): - pats.append(self.templates["Pat:RscOpOK"] % (self.rid, "stop_0")) -@@ -2647,32 +2648,31 @@ class RemoteDriver(CTSTest): - self.remote_node_added = 0 - self.remote_rsc_added = 0 - self.remote_rsc = "remote-rsc" -+ self.remote_use_reconnect_interval = self.Env.RandomGen.choice(["true","false"]) - self.cib_cmd = """cibadmin -C -o %s -X '%s' """ - -- def del_rsc(self, node, rsc): -- -+ def get_othernode(self, node): - for othernode in self.Env["nodes"]: - if othernode == node: - # we don't want to try and use the cib that we just shutdown. - # find a cluster node that is not our soon to be remote-node. - continue -- rc = self.rsh(othernode, "crm_resource -D -r %s -t primitive" % (rsc)) -- if rc != 0: -- self.fail_string = ("Removal of resource '%s' failed" % (rsc)) -- self.failed = 1 -- return -+ else: -+ return othernode -+ -+ def del_rsc(self, node, rsc): -+ othernode = self.get_othernode(node) -+ rc = self.rsh(othernode, "crm_resource -D -r %s -t primitive" % (rsc)) -+ if rc != 0: -+ self.fail_string = ("Removal of resource '%s' failed" % (rsc)) -+ self.failed = 1 - - def add_rsc(self, node, rsc_xml): -- for othernode in self.CM.Env["nodes"]: -- if othernode == node: -- # we don't want to try and use the cib that we just shutdown. -- # find a cluster node that is not our soon to be remote-node. -- continue -- rc = self.rsh(othernode, self.cib_cmd % ("resources", rsc_xml)) -- if rc != 0: -- self.fail_string = "resource creation failed" -- self.failed = 1 -- return -+ othernode = self.get_othernode(node) -+ rc = self.rsh(othernode, self.cib_cmd % ("resources", rsc_xml)) -+ if rc != 0: -+ self.fail_string = "resource creation failed" -+ self.failed = 1 - - def add_primitive_rsc(self, node): - rsc_xml = """ -@@ -2687,7 +2687,24 @@ class RemoteDriver(CTSTest): - self.remote_rsc_added = 1 - - def add_connection_rsc(self, node): -- rsc_xml = """ -+ if self.remote_use_reconnect_interval == "true": -+ # use reconnect interval and make sure to set cluster-recheck-interval as well. -+ rsc_xml = """ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+""" % (self.remote_node, node) -+ self.rsh(self.get_othernode(node), self.templates["SetCheckInterval"] % ("45s")) -+ else: -+ # not using reconnect interval -+ rsc_xml = """ - - - -@@ -2698,6 +2715,7 @@ class RemoteDriver(CTSTest): - - - """ % (self.remote_node, node) -+ - self.add_rsc(node, rsc_xml) - if self.failed == 0: - self.remote_node_added = 1 -@@ -2836,7 +2854,7 @@ class RemoteDriver(CTSTest): - self.CM.ns.WaitForNodeToComeUp(node, 120); - - pats = [ ] -- watch = self.create_watch(pats, 120) -+ watch = self.create_watch(pats, 200) - watch.setwatch() - pats.append(self.templates["Pat:RscOpOK"] % (self.remote_node, "start")) - if self.remote_rsc_added == 1: -@@ -2927,12 +2945,19 @@ class RemoteDriver(CTSTest): - pats.append(self.templates["Pat:RscOpOK"] % (self.remote_node, "stop")) - - self.set_timer("remoteMetalCleanup") -+ -+ if self.remote_use_reconnect_interval == "true": -+ self.debug("Cleaning up re-check interval") -+ self.rsh(self.get_othernode(node), self.templates["ClearCheckInterval"]) - if self.remote_rsc_added == 1: -+ self.debug("Cleaning up dummy rsc put on remote node") - self.rsh(node, "crm_resource -U -r %s -N %s" % (self.remote_rsc, self.remote_node)) - self.del_rsc(node, self.remote_rsc) - if self.remote_node_added == 1: -+ self.debug("Cleaning up remote node connection resource") - self.rsh(node, "crm_resource -U -r %s" % (self.remote_node)) - self.del_rsc(node, self.remote_node) -+ - watch.lookforall() - self.log_timer("remoteMetalCleanup") - -diff --git a/cts/environment.py b/cts/environment.py -index 6edf331..a3399c3 100644 ---- a/cts/environment.py -+++ b/cts/environment.py -@@ -160,7 +160,7 @@ class Environment: - self.data["Stack"] = "heartbeat" - - elif name == "openais" or name == "ais" or name == "whitetank": -- self.data["Stack"] = "openais (whitetank)" -+ self.data["Stack"] = "corosync (plugin v0)" - - elif name == "corosync" or name == "cs" or name == "mcp": - self.data["Stack"] = "corosync 2.x" -@@ -351,6 +351,10 @@ class Environment: - self["DoFencing"]=1 - elif args[i+1] == "0" or args[i+1] == "no": - self["DoFencing"]=0 -+ elif args[i+1] == "phd": -+ self["DoStonith"]=1 -+ self["stonith-type"] = "fence_phd_kvm" -+ self["stonith-params"] = "pcmk_arg_map=domain:uname,delay=0" - elif args[i+1] == "rhcs" or args[i+1] == "xvm" or args[i+1] == "virt": - self["DoStonith"]=1 - self["stonith-type"] = "fence_xvm" -diff --git a/cts/patterns.py b/cts/patterns.py -index 8398c7e..1bc05a6 100644 ---- a/cts/patterns.py -+++ b/cts/patterns.py -@@ -32,6 +32,9 @@ class BasePatterns: - - "UUIDQueryCmd" : "crmadmin -N", - -+ "SetCheckInterval" : "cibadmin --modify -c --xml-text ''", -+ "ClearCheckInterval" : "cibadmin --delete --xpath \"//nvpair[@name='cluster-recheck-interval']\"", -+ - "MaintenanceModeOn" : "cibadmin --modify -c --xml-text ''", - "MaintenanceModeOff" : "cibadmin --delete --xpath \"//nvpair[@name='maintenance-mode']\"", - -@@ -291,6 +294,9 @@ class crm_cs_v0(BasePatterns): - r"error:.*Connection to cib_shm failed", - r"error:.*Connection to cib_shm.* closed", - r"error:.*STONITH connection failed", -+ r"error: Connection to stonith-ng failed", -+ r"crit: Fencing daemon connection failed", -+ r"error: Connection to stonith-ng.* closed", - ] - - self.components["corosync"] = [ -diff --git a/doc/Pacemaker_Explained/en-US/Ch-Stonith.txt b/doc/Pacemaker_Explained/en-US/Ch-Stonith.txt -index 02525d6..a3c02cb 100644 ---- a/doc/Pacemaker_Explained/en-US/Ch-Stonith.txt -+++ b/doc/Pacemaker_Explained/en-US/Ch-Stonith.txt -@@ -343,7 +343,7 @@ http://www.clusterlabs.org/doc/[Clusters from Scratch] guide for those details. - # cibadmin -C -o resources --xml-file stonith.xml - ---- - --. Set stonith-enabled to true: -+. Set +stonith-enabled+ to true: - + - ---- - # crm_attribute -t crm_config -n stonith-enabled -v true -@@ -831,3 +831,29 @@ Put together, the configuration looks like this: - - - ---- -+ -+== Remapping Reboots == -+ -+When the cluster needs to reboot a node, whether because +stonith-action+ is +reboot+ or because -+a reboot was manually requested (such as by `stonith_admin --reboot`), it will remap that to -+other commands in two cases: -+ -+. If the chosen fencing device does not support the +reboot+ command, the cluster -+ will ask it to perform +off+ instead. -+ -+. If a fencing topology level with multiple devices must be executed, the cluster -+ will ask all the devices to perform +off+, then ask the devices to perform +on+. -+ -+To understand the second case, consider the example of a node with redundant -+power supplies connected to intelligent power switches. Rebooting one switch -+and then the other would have no effect on the node. Turning both switches off, -+and then on, actually reboots the node. -+ -+In such a case, the fencing operation will be treated as successful as long as -+the +off+ commands succeed, because then it is safe for the cluster to recover -+any resources that were on the node. Timeouts and errors in the +on+ phase will -+be logged but ignored. -+ -+When a reboot operation is remapped, any action-specific timeout for the -+remapped action will be used (for example, +pcmk_off_timeout+ will be used when -+executing the +off+ command, not +pcmk_reboot_timeout+). -diff --git a/doc/asciidoc.reference b/doc/asciidoc.reference -index a9a171b..9323864 100644 ---- a/doc/asciidoc.reference -+++ b/doc/asciidoc.reference -@@ -1,31 +1,49 @@ -+= Single-chapter part of the documentation = -+ -+== Go-to reference chapter for how we use AsciiDoc on this project == -+ -+[NOTE] -+====== -+This is *not* an attempt for fully self-hosted AsciiDoc document, -+consider it a plaintext full of AsciiDoc samples (it's up to the reader -+to recognize the borderline) at documentation writers' disposal -+to somewhat standardize the style{empty}footnote:[ -+ style of both source notation and final visual appearance -+]. -+ - See also: - http://powerman.name/doc/asciidoc -+====== - --Commands: `some-tool --with option` --Files: '/tmp/file.name' --Italic: _some text_ -+Emphasis: _some test_ - Mono: +some text+ --Bold: *some text* --Super: ^some text^ --Sub: ~some text~ -+Strong: *some text* -+Super: ^some text^ -+Sub: ~some text~ - Quotes: - ``double quoted'' - `single quoted' - --Tool: command -+Command: `some-tool --with option` -+Newly introduced term: -+ 'some text' (another form of emphasis as of this edit) -+ -+File: mono - Literal: mono -+Tool: command -+Option: mono -+Replaceable: emphasis mono - Varname: mono --Option: italic --Emphasis: italic bold --Replaceable: italic mono -+Term encountered on system (e.g., menu choice, hostname): -+ strong - - --.Title for Eaxmple -+.Title for Example - ===== - Some text - ===== - --.Title for Eaxmple with XML Listing -+.Title for Example with XML Listing - ===== - [source,XML] - ----- -@@ -49,4 +67,4 @@ Section anchors: - - References to section anchors: - --<> or <> -\ No newline at end of file -+<> or <> -diff --git a/doc/shared/en-US/pacemaker-intro.txt b/doc/shared/en-US/pacemaker-intro.txt -index bf432fc..6b898c9 100644 ---- a/doc/shared/en-US/pacemaker-intro.txt -+++ b/doc/shared/en-US/pacemaker-intro.txt -@@ -1,41 +1,62 @@ - --== What Is Pacemaker? == -+== What Is 'Pacemaker'? == - --Pacemaker is a cluster resource manager. -+Pacemaker is a 'cluster resource manager', that is, a logic responsible -+for a life-cycle of deployed software -- indirectly perhaps even whole -+systems or their interconnections -- under its control within a set of -+computers (a.k.a. 'cluster nodes', 'nodes' for short) and driven by -+prescribed rules. - - It achieves maximum availability for your cluster services --(aka. resources) by detecting and recovering from node- and -+(a.k.a. 'resources') by detecting and recovering from node- and - resource-level failures by making use of the messaging and membership - capabilities provided by your preferred cluster infrastructure (either - http://www.corosync.org/[Corosync] or --http://linux-ha.org/wiki/Heartbeat[Heartbeat]). -+http://linux-ha.org/wiki/Heartbeat[Heartbeat]), and possibly by -+utilizing other parts of the overall cluster stack. -+ -+.High Availability Clusters -+[NOTE] -+For *the goal of minimal downtime* a term 'high availability' was coined -+and together with its acronym, 'HA', is well-established in the sector. -+To differentiate this sort of clusters from high performance computing -+('HPC') ones, should a context require it (apparently, not the case in -+this document), using 'HA cluster' is an option. - - Pacemaker's key features include: - - * Detection and recovery of node and service-level failures - * Storage agnostic, no requirement for shared storage - * Resource agnostic, anything that can be scripted can be clustered -- * Supports fencing (aka. STONITH) for ensuring data integrity -+ * Supports 'fencing' (also referred to as the 'STONITH' acronym, -+ <> later on) for ensuring data integrity - * Supports large and small clusters - * Supports both quorate and resource-driven clusters - * Supports practically any redundancy configuration -- * Automatically replicated configuration that can be updated from any node -- * Ability to specify cluster-wide service ordering, colocation and anti-colocation -+ * Automatically replicated configuration that can be updated -+ from any node -+ * Ability to specify cluster-wide service ordering, -+ colocation and anti-colocation - * Support for advanced service types - ** Clones: for services which need to be active on multiple nodes -- ** Multi-state: for services with multiple modes (eg. master/slave, primary/secondary) -- * Unified, scriptable, cluster management tools. -+ ** Multi-state: for services with multiple modes -+ (e.g. master/slave, primary/secondary) -+ * Unified, scriptable cluster management tools - - == Pacemaker Architecture == - - At the highest level, the cluster is made up of three pieces: - -- * Non-cluster-aware components. These pieces -+ * *Non-cluster-aware components*. These pieces - include the resources themselves; scripts that start, stop and - monitor them; and a local daemon that masks the differences - between the different standards these scripts implement. -+ Even though interactions of these resources when run as multiple -+ instances can resemble a distributed system, they still lack -+ the proper HA mechanisms and/or autonomous cluster-wide governance -+ as subsumed in the following item. - -- * Resource management. Pacemaker provides the brain that processes -+ * *Resource management*. Pacemaker provides the brain that processes - and reacts to events regarding the cluster. These events include - nodes joining or leaving the cluster; resource events caused by - failures, maintenance and scheduled activities; and other -@@ -44,21 +65,24 @@ At the highest level, the cluster is made up of three pieces: - events. This may include moving resources, stopping nodes and even - forcing them offline with remote power switches. - -- * Low-level infrastructure. Projects like Corosync, CMAN and -- Heartbeat provide reliable messaging, membership and quorum -+ * *Low-level infrastructure*. Projects like 'Corosync', 'CMAN' and -+ 'Heartbeat' provide reliable messaging, membership and quorum - information about the cluster. - - When combined with Corosync, Pacemaker also supports popular open --source cluster filesystems. --footnote:[Even though Pacemaker also supports Heartbeat, the filesystems need --to use the stack for messaging and membership, and Corosync seems to be --what they're standardizing on. Technically, it would be possible for them to --support Heartbeat as well, but there seems little interest in this.] -+source cluster filesystems.{empty}footnote:[ -+ Even though Pacemaker also supports Heartbeat, the filesystems need to -+ use the stack for messaging and membership, and Corosync seems to be -+ what they're standardizing on. Technically, it would be possible for -+ them to support Heartbeat as well, but there seems little interest -+ in this. -+] - - Due to past standardization within the cluster filesystem community, --cluster filesystems make use of a common distributed lock manager, which makes --use of Corosync for its messaging and membership capabilities (which nodes --are up/down) and Pacemaker for fencing services. -+cluster filesystems make use of a common 'distributed lock manager', -+which makes use of Corosync for its messaging and membership -+capabilities (which nodes are up/down) and Pacemaker for fencing -+services. - - .The Pacemaker Stack - image::images/pcmk-stack.png["The Pacemaker stack",width="10cm",height="7.5cm",align="center"] -@@ -67,75 +91,79 @@ image::images/pcmk-stack.png["The Pacemaker stack",width="10cm",height="7.5cm",a - - Pacemaker itself is composed of five key components: - -- * Cluster Information Base (CIB) -- * Cluster Resource Management daemon (CRMd) -- * Local Resource Management daemon (LRMd) -- * Policy Engine (PEngine or PE) -- * Fencing daemon (STONITHd) -+ * 'Cluster Information Base' ('CIB') -+ * 'Cluster Resource Management daemon' ('CRMd') -+ * 'Local Resource Management daemon' ('LRMd') -+ * 'Policy Engine' ('PEngine' or 'PE') -+ * Fencing daemon ('STONITHd') - - .Internal Components - image::images/pcmk-internals.png["Subsystems of a Pacemaker cluster",align="center",scaledwidth="65%"] - - The CIB uses XML to represent both the cluster's configuration and - current state of all resources in the cluster. The contents of the CIB --are automatically kept in sync across the entire cluster and are used --by the PEngine to compute the ideal state of the cluster and how it --should be achieved. -+are automatically kept in sync across the entire cluster and are used by -+the PEngine to compute the ideal state of the cluster and how it should -+be achieved. - --This list of instructions is then fed to the Designated --Controller (DC). Pacemaker centralizes all cluster decision making by --electing one of the CRMd instances to act as a master. Should the --elected CRMd process (or the node it is on) fail, a new one is --quickly established. -+This list of instructions is then fed to the 'Designated Controller' -+('DC'). Pacemaker centralizes all cluster decision making by electing -+one of the CRMd instances to act as a master. Should the elected CRMd -+process (or the node it is on) fail, a new one is quickly established. - - The DC carries out the PEngine's instructions in the required order by - passing them to either the Local Resource Management daemon (LRMd) or - CRMd peers on other nodes via the cluster messaging infrastructure - (which in turn passes them on to their LRMd process). - --The peer nodes all report the results of their operations back to the --DC and, based on the expected and actual results, will either execute --any actions that needed to wait for the previous one to complete, or --abort processing and ask the PEngine to recalculate the ideal cluster --state based on the unexpected results. -+The peer nodes all report the results of their operations back to the DC -+and, based on the expected and actual results, will either execute any -+actions that needed to wait for the previous one to complete, or abort -+processing and ask the PEngine to recalculate the ideal cluster state -+based on the unexpected results. - - In some cases, it may be necessary to power off nodes in order to - protect shared data or complete resource recovery. For this, Pacemaker - comes with STONITHd. - --STONITH is an acronym for Shoot-The-Other-Node-In-The-Head and is --usually implemented with a remote power switch. -+[[s-intro-stonith]] -+.STONITH -+[NOTE] -+*STONITH* is an acronym for 'Shoot-The-Other-Node-In-The-Head', -+a recommended practice that misbehaving node is best to be promptly -+'fenced' (shut off, cut from shared resources or otherwise immobilized), -+and is usually implemented with a remote power switch. - - In Pacemaker, STONITH devices are modeled as resources (and configured - in the CIB) to enable them to be easily monitored for failure, however --STONITHd takes care of understanding the STONITH topology such that --its clients simply request a node be fenced, and it does the rest. -+STONITHd takes care of understanding the STONITH topology such that its -+clients simply request a node be fenced, and it does the rest. - - == Types of Pacemaker Clusters == - - Pacemaker makes no assumptions about your environment. This allows it - to support practically any - http://en.wikipedia.org/wiki/High-availability_cluster#Node_configurations[redundancy --configuration] including Active/Active, Active/Passive, N+1, N+M, --N-to-1 and N-to-N. -+configuration] including 'Active/Active', 'Active/Passive', 'N+1', -+'N+M', 'N-to-1' and 'N-to-N'. - - .Active/Passive Redundancy - image::images/pcmk-active-passive.png["Active/Passive Redundancy",width="10cm",height="7.5cm",align="center"] - --Two-node Active/Passive clusters using Pacemaker and DRBD are a --cost-effective solution for many High Availability situations. -+Two-node Active/Passive clusters using Pacemaker and 'DRBD' are -+a cost-effective solution for many High Availability situations. - - .Shared Failover - image::images/pcmk-shared-failover.png["Shared Failover",width="10cm",height="7.5cm",align="center"] - - By supporting many nodes, Pacemaker can dramatically reduce hardware - costs by allowing several active/passive clusters to be combined and --share a common backup node -+share a common backup node. - - .N to N Redundancy - image::images/pcmk-active-active.png["N to N Redundancy",width="10cm",height="7.5cm",align="center"] - --When shared storage is available, every node can potentially be used --for failover. Pacemaker can even run multiple copies of services to --spread out the workload. -+When shared storage is available, every node can potentially be used for -+failover. Pacemaker can even run multiple copies of services to spread -+out the workload. - -diff --git a/extra/resources/Dummy b/extra/resources/Dummy -index aec2a0c..8a38ef5 100644 ---- a/extra/resources/Dummy -+++ b/extra/resources/Dummy -@@ -137,7 +137,7 @@ dummy_stop() { - if [ $? = $OCF_SUCCESS ]; then - rm ${OCF_RESKEY_state} - fi -- rm ${VERIFY_SERIALIZED_FILE} -+ rm -f ${VERIFY_SERIALIZED_FILE} - return $OCF_SUCCESS - } - -diff --git a/extra/resources/ping b/extra/resources/ping -index e7b9973..ca9db75 100755 ---- a/extra/resources/ping -+++ b/extra/resources/ping -@@ -43,8 +43,7 @@ meta_data() { - 1.0 - - --Every time the monitor action is run, this resource agent records (in the CIB) the current number of ping nodes the host can connect to. --It is essentially the same as pingd except that it uses the system ping tool to obtain the results. -+Every time the monitor action is run, this resource agent records (in the CIB) the current number of nodes the host can connect to using the system fping (preferred) or ping tool. - - node connectivity - -diff --git a/fencing/README.md b/fencing/README.md -new file mode 100644 -index 0000000..a50c69b ---- /dev/null -+++ b/fencing/README.md -@@ -0,0 +1,145 @@ -+# Directory contents -+ -+* `admin.c`, `stonith_admin.8`: `stonith_admin` command-line tool and its man -+ page -+* `commands.c`, `internal.h`, `main.c`, `remote.c`, `stonithd.7`: stonithd and -+ its man page -+* `fence_dummy`, `fence_legacy`, `fence_legacy.8`, `fence_pcmk`, -+ `fence_pcmk.8`: Pacemaker-supplied fence agents and their man pages -+* `regression.py(.in)`: regression tests for `stonithd` -+* `standalone_config.c`, `standalone_config.h`: abandoned project -+* `test.c`: `stonith-test` command-line tool -+ -+# How fencing requests are handled -+ -+## Bird's eye view -+ -+In the broadest terms, stonith works like this: -+ -+1. The initiator (an external program such as `stonith_admin`, or the cluster -+ itself via the `crmd`) asks the local `stonithd`, "Hey, can you fence this -+ node?" -+1. The local `stonithd` asks all the `stonithd's` in the cluster (including -+ itself), "Hey, what fencing devices do you have access to that can fence -+ this node?" -+1. Each `stonithd` in the cluster replies with a list of available devices that -+ it knows about. -+1. Once the original `stonithd` gets all the replies, it asks the most -+ appropriate `stonithd` peer to actually carry out the fencing. It may send -+ out more than one such request if the target node must be fenced with -+ multiple devices. -+1. The chosen `stonithd(s)` call the appropriate fencing resource agent(s) to -+ do the fencing, then replies to the original `stonithd` with the result. -+1. The original `stonithd` broadcasts the result to all `stonithd's`. -+1. Each `stonithd` sends the result to each of its local clients (including, at -+ some point, the initiator). -+ -+## Detailed view -+ -+### Initiating a fencing request -+ -+A fencing request can be initiated by the cluster or externally, using the -+libfencing API. -+ -+* The cluster always initiates fencing via `crmd/te_actions.c:te_fence_node()` -+ (which calls the `fence()` API). This occurs when a graph synapse contains a -+ `CRM_OP_FENCE` XML operation. -+* The main external clients are `stonith_admin` and `stonith-test`. -+ -+Highlights of the fencing API: -+* `stonith_api_new()` creates and returns a new `stonith_t` object, whose -+ `cmds` member has methods for connect, disconnect, fence, etc. -+* the `fence()` method creates and sends a `STONITH_OP_FENCE XML` request with -+ the desired action and target node. Callers do not have to choose or even -+ have any knowledge about particular fencing devices. -+ -+### Fencing queries -+ -+The function calls for a stonith request go something like this as of this writing: -+ -+The local `stonithd` receives the client's request via an IPC or messaging -+layer callback, which calls -+* `stonith_command()`, which (for requests) calls -+ * `handle_request()`, which (for `STONITH_OP_FENCE` from a client) calls -+ * `initiate_remote_stonith_op()`, which creates a `STONITH_OP_QUERY` XML -+ request with the target, desired action, timeout, etc.. then broadcasts -+ the operation to the cluster group (i.e. all `stonithd` instances) and -+ starts a timer. The query is broadcast because (1) location constraints -+ might prevent the local node from accessing the stonith device directly, -+ and (2) even if the local node does have direct access, another node -+ might be preferred to carry out the fencing. -+ -+Each `stonithd` receives the original `stonithd's STONITH_OP_QUERY` broadcast -+request via IPC or messaging layer callback, which calls: -+* `stonith_command()`, which (for requests) calls -+ * `handle_request()`, which (for `STONITH_OP_QUERY` from a peer) calls -+ * `stonith_query()`, which calls -+ * `get_capable_devices()` with `stonith_query_capable_device_db()` to add -+ device information to an XML reply and send it. (A message is -+ considered a reply if it contains `T_STONITH_REPLY`, which is only set -+ by `stonithd` peers, not clients.) -+ -+The original `stonithd` receives all peers' `STONITH_OP_QUERY` replies via IPC -+or messaging layer callback, which calls: -+* `stonith_command()`, which (for replies) calls -+ * `handle_reply()` which (for `STONITH_OP_QUERY`) calls -+ * `process_remote_stonith_query()`, which allocates a new query result -+ structure, parses device information into it, and adds it to operation -+ object. It increments the number of replies received for this operation, -+ and compares it against the expected number of replies (i.e. the number -+ of active peers), and if this is the last expected reply, calls -+ * `call_remote_stonith()`, which calculates the timeout and sends -+ `STONITH_OP_FENCE` request(s) to carry out the fencing. If the target -+ node has a fencing "topology" (which allows specifications such as -+ "this node can be fenced either with device A, or devices B and C in -+ combination"), it will choose the device(s), and send out as many -+ requests as needed. If it chooses a device, it will choose the peer; a -+ peer is preferred if it has "verified" access to the desired device, -+ meaning that it has the device "running" on it and thus has a monitor -+ operation ensuring reachability. -+ -+### Fencing operations -+ -+Each `STONITH_OP_FENCE` request goes something like this as of this writing: -+ -+The chosen peer `stonithd` receives the `STONITH_OP_FENCE` request via IPC or -+messaging layer callback, which calls: -+* `stonith_command()`, which (for requests) calls -+ * `handle_request()`, which (for `STONITH_OP_FENCE` from a peer) calls -+ * `stonith_fence()`, which calls -+ * `schedule_stonith_command()` (using supplied device if -+ `F_STONITH_DEVICE` was set, otherwise the highest-priority capable -+ device obtained via `get_capable_devices()` with -+ `stonith_fence_get_devices_cb()`), which adds the operation to the -+ device's pending operations list and triggers processing. -+ -+The chosen peer `stonithd's` mainloop is triggered and calls -+* `stonith_device_dispatch()`, which calls -+ * `stonith_device_execute()`, which pops off the next item from the device's -+ pending operations list. If acting as the (internally implemented) watchdog -+ agent, it panics the node, otherwise it calls -+ * `stonith_action_create()` and `stonith_action_execute_async()` to call the fencing agent. -+ -+The chosen peer stonithd's mainloop is triggered again once the fencing agent returns, and calls -+* `stonith_action_async_done()` which adds the results to an action object then calls its -+ * done callback (`st_child_done()`), which calls `schedule_stonith_command()` -+ for a new device if there are further required actions to execute or if the -+ original action failed, then builds and sends an XML reply to the original -+ `stonithd` (via `stonith_send_async_reply()`), then checks whether any -+ pending actions are the same as the one just executed and merges them if so. -+ -+### Fencing replies -+ -+The original `stonithd` receives the `STONITH_OP_FENCE` reply via IPC or -+messaging layer callback, which calls: -+* `stonith_command()`, which (for replies) calls -+ * `handle_reply()`, which calls -+ * `process_remote_stonith_exec()`, which calls either -+ `call_remote_stonith()` (to retry a failed operation, or try the next -+ device in a topology is appropriate, which issues a new -+ `STONITH_OP_FENCE` request, proceeding as before) or `remote_op_done()` -+ (if the operation is definitively failed or successful). -+ * remote_op_done() broadcasts the result to all peers. -+ -+Finally, all peers receive the broadcast result and call -+* `remote_op_done()`, which sends the result to all local clients. -diff --git a/fencing/commands.c b/fencing/commands.c -index c9975d3..0d2d614 100644 ---- a/fencing/commands.c -+++ b/fencing/commands.c -@@ -53,15 +53,24 @@ GHashTable *topology = NULL; - GList *cmd_list = NULL; - - struct device_search_s { -+ /* target of fence action */ - char *host; -+ /* requested fence action */ - char *action; -+ /* timeout to use if a device is queried dynamically for possible targets */ - int per_device_timeout; -+ /* number of registered fencing devices at time of request */ - int replies_needed; -+ /* number of device replies received so far */ - int replies_received; -+ /* whether the target is eligible to perform requested action (or off) */ - bool allow_suicide; - -+ /* private data to pass to search callback function */ - void *user_data; -+ /* function to call when all replies have been received */ - void (*callback) (GList * devices, void *user_data); -+ /* devices capable of performing requested action (or off if remapping) */ - GListPtr capable; - }; - -@@ -173,6 +182,17 @@ get_action_timeout(stonith_device_t * device, const char *action, int default_ti - char buffer[64] = { 0, }; - const char *value = NULL; - -+ /* If "reboot" was requested but the device does not support it, -+ * we will remap to "off", so check timeout for "off" instead -+ */ -+ if (safe_str_eq(action, "reboot") -+ && is_not_set(device->flags, st_device_supports_reboot)) { -+ crm_trace("%s doesn't support reboot, using timeout for off instead", -+ device->id); -+ action = "off"; -+ } -+ -+ /* If the device config specified an action-specific timeout, use it */ - snprintf(buffer, sizeof(buffer) - 1, "pcmk_%s_timeout", action); - value = g_hash_table_lookup(device->params, buffer); - if (value) { -@@ -1241,6 +1261,38 @@ search_devices_record_result(struct device_search_s *search, const char *device, - } - } - -+/* -+ * \internal -+ * \brief Check whether the local host is allowed to execute a fencing action -+ * -+ * \param[in] device Fence device to check -+ * \param[in] action Fence action to check -+ * \param[in] target Hostname of fence target -+ * \param[in] allow_suicide Whether self-fencing is allowed for this operation -+ * -+ * \return TRUE if local host is allowed to execute action, FALSE otherwise -+ */ -+static gboolean -+localhost_is_eligible(const stonith_device_t *device, const char *action, -+ const char *target, gboolean allow_suicide) -+{ -+ gboolean localhost_is_target = safe_str_eq(target, stonith_our_uname); -+ -+ if (device && action && device->on_target_actions -+ && strstr(device->on_target_actions, action)) { -+ if (!localhost_is_target) { -+ crm_trace("%s operation with %s can only be executed for localhost not %s", -+ action, device->id, target); -+ return FALSE; -+ } -+ -+ } else if (localhost_is_target && !allow_suicide) { -+ crm_trace("%s operation does not support self-fencing", action); -+ return FALSE; -+ } -+ return TRUE; -+} -+ - static void - can_fence_host_with_device(stonith_device_t * dev, struct device_search_s *search) - { -@@ -1258,19 +1310,20 @@ can_fence_host_with_device(stonith_device_t * dev, struct device_search_s *searc - goto search_report_results; - } - -- if (dev->on_target_actions && -- search->action && -- strstr(dev->on_target_actions, search->action)) { -- /* this device can only execute this action on the target node */ -- -- if(safe_str_neq(host, stonith_our_uname)) { -- crm_trace("%s operation with %s can only be executed for localhost not %s", -- search->action, dev->id, host); -+ /* Short-circuit query if this host is not allowed to perform the action */ -+ if (safe_str_eq(search->action, "reboot")) { -+ /* A "reboot" *might* get remapped to "off" then "on", so short-circuit -+ * only if all three are disallowed. If only one or two are disallowed, -+ * we'll report that with the results. We never allow suicide for -+ * remapped "on" operations because the host is off at that point. -+ */ -+ if (!localhost_is_eligible(dev, "reboot", host, search->allow_suicide) -+ && !localhost_is_eligible(dev, "off", host, search->allow_suicide) -+ && !localhost_is_eligible(dev, "on", host, FALSE)) { - goto search_report_results; - } -- -- } else if(safe_str_eq(host, stonith_our_uname) && search->allow_suicide == FALSE) { -- crm_trace("%s operation does not support self-fencing", search->action); -+ } else if (!localhost_is_eligible(dev, search->action, host, -+ search->allow_suicide)) { - goto search_report_results; - } - -@@ -1423,6 +1476,85 @@ struct st_query_data { - int call_options; - }; - -+/* -+ * \internal -+ * \brief Add action-specific attributes to query reply XML -+ * -+ * \param[in,out] xml XML to add attributes to -+ * \param[in] action Fence action -+ * \param[in] device Fence device -+ */ -+static void -+add_action_specific_attributes(xmlNode *xml, const char *action, -+ stonith_device_t *device) -+{ -+ int action_specific_timeout; -+ int delay_max; -+ -+ CRM_CHECK(xml && action && device, return); -+ -+ if (is_action_required(action, device)) { -+ crm_trace("Action %s is required on %s", action, device->id); -+ crm_xml_add_int(xml, F_STONITH_DEVICE_REQUIRED, 1); -+ } -+ -+ action_specific_timeout = get_action_timeout(device, action, 0); -+ if (action_specific_timeout) { -+ crm_trace("Action %s has timeout %dms on %s", -+ action, action_specific_timeout, device->id); -+ crm_xml_add_int(xml, F_STONITH_ACTION_TIMEOUT, action_specific_timeout); -+ } -+ -+ delay_max = get_action_delay_max(device, action); -+ if (delay_max > 0) { -+ crm_trace("Action %s has maximum random delay %dms on %s", -+ action, delay_max, device->id); -+ crm_xml_add_int(xml, F_STONITH_DELAY_MAX, delay_max / 1000); -+ } -+} -+ -+/* -+ * \internal -+ * \brief Add "disallowed" attribute to query reply XML if appropriate -+ * -+ * \param[in,out] xml XML to add attribute to -+ * \param[in] action Fence action -+ * \param[in] device Fence device -+ * \param[in] target Fence target -+ * \param[in] allow_suicide Whether self-fencing is allowed -+ */ -+static void -+add_disallowed(xmlNode *xml, const char *action, stonith_device_t *device, -+ const char *target, gboolean allow_suicide) -+{ -+ if (!localhost_is_eligible(device, action, target, allow_suicide)) { -+ crm_trace("Action %s on %s is disallowed for local host", -+ action, device->id); -+ crm_xml_add(xml, F_STONITH_ACTION_DISALLOWED, XML_BOOLEAN_TRUE); -+ } -+} -+ -+/* -+ * \internal -+ * \brief Add child element with action-specific values to query reply XML -+ * -+ * \param[in,out] xml XML to add attribute to -+ * \param[in] action Fence action -+ * \param[in] device Fence device -+ * \param[in] target Fence target -+ * \param[in] allow_suicide Whether self-fencing is allowed -+ */ -+static void -+add_action_reply(xmlNode *xml, const char *action, stonith_device_t *device, -+ const char *target, gboolean allow_suicide) -+{ -+ xmlNode *child = create_xml_node(xml, F_STONITH_ACTION); -+ -+ crm_xml_add(child, XML_ATTR_ID, action); -+ add_action_specific_attributes(child, action, device); -+ add_disallowed(child, action, device, target, allow_suicide); -+} -+ - static void - stonith_query_capable_device_cb(GList * devices, void *user_data) - { -@@ -1432,13 +1564,12 @@ stonith_query_capable_device_cb(GList * devices, void *user_data) - xmlNode *list = NULL; - GListPtr lpc = NULL; - -- /* Pack the results into data */ -+ /* Pack the results into XML */ - list = create_xml_node(NULL, __FUNCTION__); - crm_xml_add(list, F_STONITH_TARGET, query->target); - for (lpc = devices; lpc != NULL; lpc = lpc->next) { - stonith_device_t *device = g_hash_table_lookup(device_list, lpc->data); -- int action_specific_timeout; -- int delay_max; -+ const char *action = query->action; - - if (!device) { - /* It is possible the device got unregistered while -@@ -1448,24 +1579,44 @@ stonith_query_capable_device_cb(GList * devices, void *user_data) - - available_devices++; - -- action_specific_timeout = get_action_timeout(device, query->action, 0); - dev = create_xml_node(list, F_STONITH_DEVICE); - crm_xml_add(dev, XML_ATTR_ID, device->id); - crm_xml_add(dev, "namespace", device->namespace); - crm_xml_add(dev, "agent", device->agent); - crm_xml_add_int(dev, F_STONITH_DEVICE_VERIFIED, device->verified); -- if (is_action_required(query->action, device)) { -- crm_xml_add_int(dev, F_STONITH_DEVICE_REQUIRED, 1); -- } -- if (action_specific_timeout) { -- crm_xml_add_int(dev, F_STONITH_ACTION_TIMEOUT, action_specific_timeout); -+ -+ /* If the originating stonithd wants to reboot the node, and we have a -+ * capable device that doesn't support "reboot", remap to "off" instead. -+ */ -+ if (is_not_set(device->flags, st_device_supports_reboot) -+ && safe_str_eq(query->action, "reboot")) { -+ crm_trace("%s doesn't support reboot, using values for off instead", -+ device->id); -+ action = "off"; - } - -- delay_max = get_action_delay_max(device, query->action); -- if (delay_max > 0) { -- crm_xml_add_int(dev, F_STONITH_DELAY_MAX, delay_max / 1000); -+ /* Add action-specific values if available */ -+ add_action_specific_attributes(dev, action, device); -+ if (safe_str_eq(query->action, "reboot")) { -+ /* A "reboot" *might* get remapped to "off" then "on", so after -+ * sending the "reboot"-specific values in the main element, we add -+ * sub-elements for "off" and "on" values. -+ * -+ * We short-circuited earlier if "reboot", "off" and "on" are all -+ * disallowed for the local host. However if only one or two are -+ * disallowed, we send back the results and mark which ones are -+ * disallowed. If "reboot" is disallowed, this might cause problems -+ * with older stonithd versions, which won't check for it. Older -+ * versions will ignore "off" and "on", so they are not a problem. -+ */ -+ add_disallowed(dev, action, device, query->target, -+ is_set(query->call_options, st_opt_allow_suicide)); -+ add_action_reply(dev, "off", device, query->target, -+ is_set(query->call_options, st_opt_allow_suicide)); -+ add_action_reply(dev, "on", device, query->target, FALSE); - } - -+ /* A query without a target wants device parameters */ - if (query->target == NULL) { - xmlNode *attrs = create_xml_node(dev, XML_TAG_ATTRS); - -@@ -1481,7 +1632,7 @@ stonith_query_capable_device_cb(GList * devices, void *user_data) - } - - if (list != NULL) { -- crm_trace("Attaching query list output"); -+ crm_log_xml_trace(list, "Add query results"); - add_message_xml(query->reply, F_STONITH_CALLDATA, list); - } - stonith_send_reply(query->reply, query->call_options, query->remote_peer, query->client_id); -@@ -1766,6 +1917,14 @@ st_child_done(GPid pid, int rc, const char *output, gpointer user_data) - continue; - } - -+ /* Duplicate merging will do the right thing for either type of remapped -+ * reboot. If the executing stonithd remapped an unsupported reboot to -+ * off, then cmd->action will be reboot and will be merged with any -+ * other reboot requests. If the originating stonithd remapped a -+ * topology reboot to off then on, we will get here once with -+ * cmd->action "off" and once with "on", and they will be merged -+ * separately with similar requests. -+ */ - crm_notice - ("Merging stonith action %s for node %s originating from client %s with identical stonith request from client %s", - cmd_other->action, cmd_other->victim, cmd_other->client_name, cmd->client_name); -diff --git a/fencing/internal.h b/fencing/internal.h -index 46bd3bf..5fb8f9c 100644 ---- a/fencing/internal.h -+++ b/fencing/internal.h -@@ -51,6 +51,17 @@ typedef struct stonith_device_s { - gboolean api_registered; - } stonith_device_t; - -+/* These values are used to index certain arrays by "phase". Usually an -+ * operation has only one "phase", so phase is always zero. However, some -+ * reboots are remapped to "off" then "on", in which case "reboot" will be -+ * phase 0, "off" will be phase 1 and "on" will be phase 2. -+ */ -+enum st_remap_phase { -+ st_phase_requested = 0, -+ st_phase_off = 1, -+ st_phase_on = 2 -+}; -+ - typedef struct remote_fencing_op_s { - /* The unique id associated with this operation */ - char *id; -@@ -97,7 +108,7 @@ typedef struct remote_fencing_op_s { - long long call_options; - - /*! The current state of the remote operation. This indicates -- * what phase the op is in, query, exec, done, duplicate, failed. */ -+ * what stage the op is in, query, exec, done, duplicate, failed. */ - enum op_state state; - /*! The node that owns the remote operation */ - char *originator; -@@ -114,10 +125,17 @@ typedef struct remote_fencing_op_s { - - /*! The current topology level being executed */ - guint level; -- -- /*! List of required devices the topology must execute regardless of what -- * topology level they exist at. */ -- GListPtr required_list; -+ /*! The current operation phase being executed */ -+ enum st_remap_phase phase; -+ -+ /* For phase 0 or 1 (requested action or a remapped "off"), required devices -+ * will be executed regardless of what topology level is being executed -+ * currently. For phase 1 (remapped "on"), required devices will not be -+ * attempted, because the cluster will execute them automatically when the -+ * node next joins the cluster. -+ */ -+ /*! Lists of devices marked as required for each phase */ -+ GListPtr required_list[3]; - /*! The device list of all the devices at the current executing topology level. */ - GListPtr devices_list; - /*! Current entry in the topology device list */ -@@ -129,6 +147,20 @@ typedef struct remote_fencing_op_s { - - } remote_fencing_op_t; - -+/* -+ * Complex fencing requirements are specified via fencing topologies. -+ * A topology consists of levels; each level is a list of fencing devices. -+ * Topologies are stored in a hash table by node name. When a node needs to be -+ * fenced, if it has an entry in the topology table, the levels are tried -+ * sequentially, and the devices in each level are tried sequentially. -+ * Fencing is considered successful as soon as any level succeeds; -+ * a level is considered successful if all its devices succeed. -+ * Essentially, all devices at a given level are "and-ed" and the -+ * levels are "or-ed". -+ * -+ * This structure is used for the topology table entries. -+ * Topology levels start from 1, so levels[0] is unused and always NULL. -+ */ - typedef struct stonith_topology_s { - char *node; - GListPtr levels[ST_LEVEL_MAX]; -diff --git a/fencing/main.c b/fencing/main.c -index a499175..46d7352 100644 ---- a/fencing/main.c -+++ b/fencing/main.c -@@ -1234,7 +1234,7 @@ struct qb_ipcs_service_handlers ipc_callbacks = { - static void - st_peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data) - { -- if (type == crm_status_uname) { -+ if (type != crm_status_processes) { - /* - * This is a hack until we can send to a nodeid and/or we fix node name lookups - * These messages are ignored in stonith_peer_callback() -diff --git a/fencing/regression.py.in b/fencing/regression.py.in -index fe6d418..b4e6f08 100644 ---- a/fencing/regression.py.in -+++ b/fencing/regression.py.in -@@ -23,861 +23,937 @@ import shlex - import time - - def output_from_command(command): -- test = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE) -- test.wait() -+ test = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE) -+ test.wait() - -- return test.communicate()[0].split("\n") -+ return test.communicate()[0].split("\n") - - class Test: -- def __init__(self, name, description, verbose = 0, with_cpg = 0): -- self.name = name -- self.description = description -- self.cmds = [] -- self.verbose = verbose -+ def __init__(self, name, description, verbose = 0, with_cpg = 0): -+ self.name = name -+ self.description = description -+ self.cmds = [] -+ self.verbose = verbose - -- self.result_txt = "" -- self.cmd_tool_output = "" -- self.result_exitcode = 0; -+ self.result_txt = "" -+ self.cmd_tool_output = "" -+ self.result_exitcode = 0; - -- self.stonith_options = "-s" -- self.enable_corosync = 0 -+ self.stonith_options = "-s" -+ self.enable_corosync = 0 - -- if with_cpg: -- self.stonith_options = "-c" -- self.enable_corosync = 1 -+ if with_cpg: -+ self.stonith_options = "-c" -+ self.enable_corosync = 1 - -- self.stonith_process = None -- self.stonith_output = "" -- self.stonith_patterns = [] -- self.negative_stonith_patterns = [] -+ self.stonith_process = None -+ self.stonith_output = "" -+ self.stonith_patterns = [] -+ self.negative_stonith_patterns = [] - -- self.executed = 0 -+ self.executed = 0 - -- rsc_classes = output_from_command("crm_resource --list-standards") -+ rsc_classes = output_from_command("crm_resource --list-standards") - -- def __new_cmd(self, cmd, args, exitcode, stdout_match = "", no_wait = 0, stdout_negative_match = "", kill=None): -- self.cmds.append( -- { -- "cmd" : cmd, -- "kill" : kill, -- "args" : args, -- "expected_exitcode" : exitcode, -- "stdout_match" : stdout_match, -- "stdout_negative_match" : stdout_negative_match, -- "no_wait" : no_wait, -- } -- ) -+ def __new_cmd(self, cmd, args, exitcode, stdout_match = "", no_wait = 0, stdout_negative_match = "", kill=None): -+ self.cmds.append( -+ { -+ "cmd" : cmd, -+ "kill" : kill, -+ "args" : args, -+ "expected_exitcode" : exitcode, -+ "stdout_match" : stdout_match, -+ "stdout_negative_match" : stdout_negative_match, -+ "no_wait" : no_wait, -+ } -+ ) - -- def stop_pacemaker(self): -- cmd = shlex.split("killall -9 -q pacemakerd") -- test = subprocess.Popen(cmd, stdout=subprocess.PIPE) -- test.wait() -+ def stop_pacemaker(self): -+ cmd = shlex.split("killall -9 -q pacemakerd") -+ test = subprocess.Popen(cmd, stdout=subprocess.PIPE) -+ test.wait() - -- def start_environment(self): -- ### make sure we are in full control here ### -- self.stop_pacemaker() -+ def start_environment(self): -+ ### make sure we are in full control here ### -+ self.stop_pacemaker() - -- cmd = shlex.split("killall -9 -q stonithd") -- test = subprocess.Popen(cmd, stdout=subprocess.PIPE) -- test.wait() -+ cmd = shlex.split("killall -9 -q stonithd") -+ test = subprocess.Popen(cmd, stdout=subprocess.PIPE) -+ test.wait() - -- if self.verbose: -- self.stonith_options = self.stonith_options + " -V" -- print "Starting stonithd with %s" % self.stonith_options -+ if self.verbose: -+ self.stonith_options = self.stonith_options + " -V" -+ print "Starting stonithd with %s" % self.stonith_options - -- if os.path.exists("/tmp/stonith-regression.log"): -- os.remove('/tmp/stonith-regression.log') -+ if os.path.exists("/tmp/stonith-regression.log"): -+ os.remove('/tmp/stonith-regression.log') - -- self.stonith_process = subprocess.Popen( -- shlex.split("@CRM_DAEMON_DIR@/stonithd %s -l /tmp/stonith-regression.log" % self.stonith_options)) -+ self.stonith_process = subprocess.Popen( -+ shlex.split("@CRM_DAEMON_DIR@/stonithd %s -l /tmp/stonith-regression.log" % self.stonith_options)) - -- time.sleep(1) -- -- def clean_environment(self): -- if self.stonith_process: -- self.stonith_process.terminate() -- self.stonith_process.wait() -- -- self.stonith_output = "" -- self.stonith_process = None -- -- f = open('/tmp/stonith-regression.log', 'r') -- for line in f.readlines(): -- self.stonith_output = self.stonith_output + line -- -- if self.verbose: -- print "Daemon Output Start" -- print self.stonith_output -- print "Daemon Output End" -- os.remove('/tmp/stonith-regression.log') -- -- def add_stonith_log_pattern(self, pattern): -- self.stonith_patterns.append(pattern) -- -- def add_stonith_negative_log_pattern(self, pattern): -- self.negative_stonith_patterns.append(pattern) -- -- def add_cmd(self, cmd, args): -- self.__new_cmd(cmd, args, 0, "") -- -- def add_cmd_no_wait(self, cmd, args): -- self.__new_cmd(cmd, args, 0, "", 1) -- -- def add_cmd_check_stdout(self, cmd, args, match, no_match = ""): -- self.__new_cmd(cmd, args, 0, match, 0, no_match) -- -- def add_expected_fail_cmd(self, cmd, args, exitcode = 255): -- self.__new_cmd(cmd, args, exitcode, "") -- -- def get_exitcode(self): -- return self.result_exitcode -- -- def print_result(self, filler): -- print "%s%s" % (filler, self.result_txt) -- -- def run_cmd(self, args): -- cmd = shlex.split(args['args']) -- cmd.insert(0, args['cmd']) -- -- if self.verbose: -- print "\n\nRunning: "+" ".join(cmd) -- test = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) -- -- if args['kill']: -- if self.verbose: -- print "Also running: "+args['kill'] -- subprocess.Popen(shlex.split(args['kill'])) -- -- if args['no_wait'] == 0: -- test.wait() -- else: -- return 0 -- -- output_res = test.communicate() -- output = output_res[0] + output_res[1] -- -- if self.verbose: -- print output -- -- if args['stdout_match'] != "" and output.count(args['stdout_match']) == 0: -- test.returncode = -2 -- print "STDOUT string '%s' was not found in cmd output: %s" % (args['stdout_match'], output) -- -- if args['stdout_negative_match'] != "" and output.count(args['stdout_negative_match']) != 0: -- test.returncode = -2 -- print "STDOUT string '%s' was found in cmd output: %s" % (args['stdout_negative_match'], output) -- -- return test.returncode; -- -- -- def count_negative_matches(self, outline): -- count = 0 -- for line in self.negative_stonith_patterns: -- if outline.count(line): -- count = 1 -- if self.verbose: -- print "This pattern should not have matched = '%s" % (line) -- return count -- -- def match_stonith_patterns(self): -- negative_matches = 0 -- cur = 0 -- pats = self.stonith_patterns -- total_patterns = len(self.stonith_patterns) -- -- if len(self.stonith_patterns) == 0: -- return -- -- for line in self.stonith_output.split("\n"): -- negative_matches = negative_matches + self.count_negative_matches(line) -- if len(pats) == 0: -- continue -- cur = -1 -- for p in pats: -- cur = cur + 1 -- if line.count(pats[cur]): -- del pats[cur] -- break -- -- if len(pats) > 0 or negative_matches: -- if self.verbose: -- for p in pats: -- print "Pattern Not Matched = '%s'" % p -- -- self.result_txt = "FAILURE - '%s' failed. %d patterns out of %d not matched. %d negative matches." % (self.name, len(pats), total_patterns, negative_matches) -- self.result_exitcode = -1 -- -- def run(self): -- res = 0 -- i = 1 -- self.start_environment() -- -- if self.verbose: -- print "\n--- START TEST - %s" % self.name -- -- self.result_txt = "SUCCESS - '%s'" % (self.name) -- self.result_exitcode = 0 -- for cmd in self.cmds: -- res = self.run_cmd(cmd) -- if res != cmd['expected_exitcode']: -- print "Step %d FAILED - command returned %d, expected %d" % (i, res, cmd['expected_exitcode']) -- self.result_txt = "FAILURE - '%s' failed at step %d. Command: %s %s" % (self.name, i, cmd['cmd'], cmd['args']) -- self.result_exitcode = -1 -- break -- else: -- if self.verbose: -- print "Step %d SUCCESS" % (i) -- i = i + 1 -- self.clean_environment() -- -- if self.result_exitcode == 0: -- self.match_stonith_patterns() -- -- print self.result_txt -- if self.verbose: -- print "--- END TEST - %s\n" % self.name -- -- self.executed = 1 -- return res -+ time.sleep(1) -+ -+ def clean_environment(self): -+ if self.stonith_process: -+ self.stonith_process.terminate() -+ self.stonith_process.wait() -+ -+ self.stonith_output = "" -+ self.stonith_process = None -+ -+ f = open('/tmp/stonith-regression.log', 'r') -+ for line in f.readlines(): -+ self.stonith_output = self.stonith_output + line -+ -+ if self.verbose: -+ print "Daemon Output Start" -+ print self.stonith_output -+ print "Daemon Output End" -+ os.remove('/tmp/stonith-regression.log') -+ -+ def add_stonith_log_pattern(self, pattern): -+ self.stonith_patterns.append(pattern) -+ -+ def add_stonith_negative_log_pattern(self, pattern): -+ self.negative_stonith_patterns.append(pattern) -+ -+ def add_cmd(self, cmd, args): -+ self.__new_cmd(cmd, args, 0, "") -+ -+ def add_cmd_no_wait(self, cmd, args): -+ self.__new_cmd(cmd, args, 0, "", 1) -+ -+ def add_cmd_check_stdout(self, cmd, args, match, no_match = ""): -+ self.__new_cmd(cmd, args, 0, match, 0, no_match) -+ -+ def add_expected_fail_cmd(self, cmd, args, exitcode = 255): -+ self.__new_cmd(cmd, args, exitcode, "") -+ -+ def get_exitcode(self): -+ return self.result_exitcode -+ -+ def print_result(self, filler): -+ print "%s%s" % (filler, self.result_txt) -+ -+ def run_cmd(self, args): -+ cmd = shlex.split(args['args']) -+ cmd.insert(0, args['cmd']) -+ -+ if self.verbose: -+ print "\n\nRunning: "+" ".join(cmd) -+ test = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) -+ -+ if args['kill']: -+ if self.verbose: -+ print "Also running: "+args['kill'] -+ subprocess.Popen(shlex.split(args['kill'])) -+ -+ if args['no_wait'] == 0: -+ test.wait() -+ else: -+ return 0 -+ -+ output_res = test.communicate() -+ output = output_res[0] + output_res[1] -+ -+ if self.verbose: -+ print output -+ -+ if args['stdout_match'] != "" and output.count(args['stdout_match']) == 0: -+ test.returncode = -2 -+ print "STDOUT string '%s' was not found in cmd output: %s" % (args['stdout_match'], output) -+ -+ if args['stdout_negative_match'] != "" and output.count(args['stdout_negative_match']) != 0: -+ test.returncode = -2 -+ print "STDOUT string '%s' was found in cmd output: %s" % (args['stdout_negative_match'], output) -+ -+ return test.returncode; -+ -+ -+ def count_negative_matches(self, outline): -+ count = 0 -+ for line in self.negative_stonith_patterns: -+ if outline.count(line): -+ count = 1 -+ if self.verbose: -+ print "This pattern should not have matched = '%s" % (line) -+ return count -+ -+ def match_stonith_patterns(self): -+ negative_matches = 0 -+ cur = 0 -+ pats = self.stonith_patterns -+ total_patterns = len(self.stonith_patterns) -+ -+ if len(self.stonith_patterns) == 0: -+ return -+ -+ for line in self.stonith_output.split("\n"): -+ negative_matches = negative_matches + self.count_negative_matches(line) -+ if len(pats) == 0: -+ continue -+ cur = -1 -+ for p in pats: -+ cur = cur + 1 -+ if line.count(pats[cur]): -+ del pats[cur] -+ break -+ -+ if len(pats) > 0 or negative_matches: -+ if self.verbose: -+ for p in pats: -+ print "Pattern Not Matched = '%s'" % p -+ -+ self.result_txt = "FAILURE - '%s' failed. %d patterns out of %d not matched. %d negative matches." % (self.name, len(pats), total_patterns, negative_matches) -+ self.result_exitcode = -1 -+ -+ def run(self): -+ res = 0 -+ i = 1 -+ self.start_environment() -+ -+ if self.verbose: -+ print "\n--- START TEST - %s" % self.name -+ -+ self.result_txt = "SUCCESS - '%s'" % (self.name) -+ self.result_exitcode = 0 -+ for cmd in self.cmds: -+ res = self.run_cmd(cmd) -+ if res != cmd['expected_exitcode']: -+ print "Step %d FAILED - command returned %d, expected %d" % (i, res, cmd['expected_exitcode']) -+ self.result_txt = "FAILURE - '%s' failed at step %d. Command: %s %s" % (self.name, i, cmd['cmd'], cmd['args']) -+ self.result_exitcode = -1 -+ break -+ else: -+ if self.verbose: -+ print "Step %d SUCCESS" % (i) -+ i = i + 1 -+ self.clean_environment() -+ -+ if self.result_exitcode == 0: -+ self.match_stonith_patterns() -+ -+ print self.result_txt -+ if self.verbose: -+ print "--- END TEST - %s\n" % self.name -+ -+ self.executed = 1 -+ return res - - class Tests: -- def __init__(self, verbose = 0): -- self.tests = [] -- self.verbose = verbose -- self.autogen_corosync_cfg = 0 -- if not os.path.exists("/etc/corosync/corosync.conf"): -- self.autogen_corosync_cfg = 1 -- -- def new_test(self, name, description, with_cpg = 0): -- test = Test(name, description, self.verbose, with_cpg) -- self.tests.append(test) -- return test -- -- def print_list(self): -- print "\n==== %d TESTS FOUND ====" % (len(self.tests)) -- print "%35s - %s" % ("TEST NAME", "TEST DESCRIPTION") -- print "%35s - %s" % ("--------------------", "--------------------") -- for test in self.tests: -- print "%35s - %s" % (test.name, test.description) -- print "==== END OF LIST ====\n" -- -- -- def start_corosync(self): -- if self.verbose: -- print "Starting corosync" -- -- test = subprocess.Popen("corosync", stdout=subprocess.PIPE) -- test.wait() -- time.sleep(10) -- -- def stop_corosync(self): -- cmd = shlex.split("killall -9 -q corosync") -- test = subprocess.Popen(cmd, stdout=subprocess.PIPE) -- test.wait() -- -- def run_single(self, name): -- for test in self.tests: -- if test.name == name: -- test.run() -- break; -- -- def run_tests_matching(self, pattern): -- for test in self.tests: -- if test.name.count(pattern) != 0: -- test.run() -- -- def run_cpg_only(self): -- for test in self.tests: -- if test.enable_corosync: -- test.run() -- -- def run_no_cpg(self): -- for test in self.tests: -- if not test.enable_corosync: -- test.run() -- -- def run_tests(self): -- for test in self.tests: -- test.run() -- -- def exit(self): -- for test in self.tests: -- if test.executed == 0: -- continue -- -- if test.get_exitcode() != 0: -- sys.exit(-1) -- -- sys.exit(0) -- -- def print_results(self): -- failures = 0; -- success = 0; -- print "\n\n======= FINAL RESULTS ==========" -- print "\n--- FAILURE RESULTS:" -- for test in self.tests: -- if test.executed == 0: -- continue -- -- if test.get_exitcode() != 0: -- failures = failures + 1 -- test.print_result(" ") -- else: -- success = success + 1 -- -- if failures == 0: -- print " None" -- -- print "\n--- TOTALS\n Pass:%d\n Fail:%d\n" % (success, failures) -- def build_api_sanity_tests(self): -- verbose_arg = "" -- if self.verbose: -- verbose_arg = "-V" -- -- test = self.new_test("standalone_low_level_api_test", "Sanity test client api in standalone mode.") -- test.add_cmd("@CRM_DAEMON_DIR@/stonith-test", "-t %s" % (verbose_arg)) -- -- test = self.new_test("cpg_low_level_api_test", "Sanity test client api using mainloop and cpg.", 1) -- test.add_cmd("@CRM_DAEMON_DIR@/stonith-test", "-m %s" % (verbose_arg)) -- -- def build_custom_timeout_tests(self): -- # custom timeout without topology -- test = self.new_test("cpg_custom_timeout_1", -- "Verify per device timeouts work as expected without using topology.", 1) -- test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\" -o \"pcmk_off_timeout=1\"") -- test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\" -o \"pcmk_off_timeout=4\"") -- test.add_cmd("stonith_admin", "-F node3 -t 2") -- # timeout is 2+1+4 = 7 -- test.add_stonith_log_pattern("remote op timeout set to 7") -- -- # custom timeout _WITH_ topology -- test = self.new_test("cpg_custom_timeout_2", -- "Verify per device timeouts work as expected _WITH_ topology.", 1) -- test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\" -o \"pcmk_off_timeout=1\"") -- test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\" -o \"pcmk_off_timeout=4000\"") -- test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") -- test.add_cmd("stonith_admin", "-r node3 -i 2 -v true1") -- test.add_cmd("stonith_admin", "-r node3 -i 3 -v false2") -- test.add_cmd("stonith_admin", "-F node3 -t 2") -- # timeout is 2+1+4000 = 4003 -- test.add_stonith_log_pattern("remote op timeout set to 4003") -- -- def build_fence_merge_tests(self): -- -- ### Simple test that overlapping fencing operations get merged -- test = self.new_test("cpg_custom_merge_single", -- "Verify overlapping identical fencing operations are merged, no fencing levels used.", 1) -- test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\" ") -- test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") -- test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -- test.add_cmd("stonith_admin", "-F node3 -t 10") -- ### one merger will happen -- test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -- ### the pattern below signifies that both the original and duplicate operation completed -- test.add_stonith_log_pattern("Operation off of node3 by") -- test.add_stonith_log_pattern("Operation off of node3 by") -- -- ### Test that multiple mergers occur -- test = self.new_test("cpg_custom_merge_multiple", -- "Verify multiple overlapping identical fencing operations are merged", 1) -- test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\" ") -- test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") -- test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -- test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -- test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -- test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -- test.add_cmd("stonith_admin", "-F node3 -t 10") -- ### 4 mergers should occur -- test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -- test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -- test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -- test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -- ### the pattern below signifies that both the original and duplicate operation completed -- test.add_stonith_log_pattern("Operation off of node3 by") -- test.add_stonith_log_pattern("Operation off of node3 by") -- test.add_stonith_log_pattern("Operation off of node3 by") -- test.add_stonith_log_pattern("Operation off of node3 by") -- test.add_stonith_log_pattern("Operation off of node3 by") -- -- ### Test that multiple mergers occur with topologies used -- test = self.new_test("cpg_custom_merge_with_topology", -- "Verify multiple overlapping identical fencing operations are merged with fencing levels.", 1) -- test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\" ") -- test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") -- test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") -- test.add_cmd("stonith_admin", "-r node3 -i 1 -v false2") -- test.add_cmd("stonith_admin", "-r node3 -i 2 -v true1") -- test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -- test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -- test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -- test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -- test.add_cmd("stonith_admin", "-F node3 -t 10") -- ### 4 mergers should occur -- test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -- test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -- test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -- test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -- ### the pattern below signifies that both the original and duplicate operation completed -- test.add_stonith_log_pattern("Operation off of node3 by") -- test.add_stonith_log_pattern("Operation off of node3 by") -- test.add_stonith_log_pattern("Operation off of node3 by") -- test.add_stonith_log_pattern("Operation off of node3 by") -- test.add_stonith_log_pattern("Operation off of node3 by") -- -- -- test = self.new_test("cpg_custom_no_merge", -- "Verify differing fencing operations are not merged", 1) -- test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3 node2\"") -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3 node2\" ") -- test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3 node2\"") -- test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") -- test.add_cmd("stonith_admin", "-r node3 -i 1 -v false2") -- test.add_cmd("stonith_admin", "-r node3 -i 2 -v true1") -- test.add_cmd_no_wait("stonith_admin", "-F node2 -t 10") -- test.add_cmd("stonith_admin", "-F node3 -t 10") -- test.add_stonith_negative_log_pattern("Merging stonith action off for node node3 originating from client") -- -- def build_standalone_tests(self): -- test_types = [ -- { -- "prefix" : "standalone" , -- "use_cpg" : 0, -- }, -- { -- "prefix" : "cpg" , -- "use_cpg" : 1, -- }, -- ] -- -- # test what happens when all devices timeout -- for test_type in test_types: -- test = self.new_test("%s_fence_multi_device_failure" % test_type["prefix"], -- "Verify that all devices timeout, a fencing failure is returned.", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R false3 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -- if test_type["use_cpg"] == 1: -- test.add_expected_fail_cmd("stonith_admin", "-F node3 -t 2", 194) -- test.add_stonith_log_pattern("remote op timeout set to 6") -- else: -- test.add_expected_fail_cmd("stonith_admin", "-F node3 -t 2", 55) -- -- test.add_stonith_log_pattern("for host 'node3' with device 'false1' returned: ") -- test.add_stonith_log_pattern("for host 'node3' with device 'false2' returned: ") -- test.add_stonith_log_pattern("for host 'node3' with device 'false3' returned: ") -- -- # test what happens when multiple devices can fence a node, but the first device fails. -- for test_type in test_types: -- test = self.new_test("%s_fence_device_failure_rollover" % test_type["prefix"], -- "Verify that when one fence device fails for a node, the others are tried.", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-F node3 -t 2") -- -- if test_type["use_cpg"] == 1: -- test.add_stonith_log_pattern("remote op timeout set to 6") -- -- # simple topology test for one device -- for test_type in test_types: -- if test_type["use_cpg"] == 0: -- continue -- -- test = self.new_test("%s_topology_simple" % test_type["prefix"], -- "Verify all fencing devices at a level are used.", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R true -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- -- test.add_cmd("stonith_admin", "-r node3 -i 1 -v true") -- test.add_cmd("stonith_admin", "-F node3 -t 2") -- -- test.add_stonith_log_pattern("remote op timeout set to 2") -- test.add_stonith_log_pattern("for host 'node3' with device 'true' returned: 0") -- -- -- # add topology, delete topology, verify fencing still works -- for test_type in test_types: -- if test_type["use_cpg"] == 0: -- continue -- -- test = self.new_test("%s_topology_add_remove" % test_type["prefix"], -- "Verify fencing occurrs after all topology levels are removed", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R true -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- -- test.add_cmd("stonith_admin", "-r node3 -i 1 -v true") -- test.add_cmd("stonith_admin", "-d node3 -i 1") -- test.add_cmd("stonith_admin", "-F node3 -t 2") -- -- test.add_stonith_log_pattern("remote op timeout set to 2") -- test.add_stonith_log_pattern("for host 'node3' with device 'true' returned: 0") -- -- # test what happens when the first fencing level has multiple devices. -- for test_type in test_types: -- if test_type["use_cpg"] == 0: -- continue -- -- test = self.new_test("%s_topology_device_fails" % test_type["prefix"], -- "Verify if one device in a level fails, the other is tried.", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R false -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R true -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- -- test.add_cmd("stonith_admin", "-r node3 -i 1 -v false") -- test.add_cmd("stonith_admin", "-r node3 -i 2 -v true") -- test.add_cmd("stonith_admin", "-F node3 -t 20") -- -- test.add_stonith_log_pattern("remote op timeout set to 40") -- test.add_stonith_log_pattern("for host 'node3' with device 'false' returned: -201") -- test.add_stonith_log_pattern("for host 'node3' with device 'true' returned: 0") -- -- # test what happens when the first fencing level fails. -- for test_type in test_types: -- if test_type["use_cpg"] == 0: -- continue -- -- test = self.new_test("%s_topology_multi_level_fails" % test_type["prefix"], -- "Verify if one level fails, the next leve is tried.", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R true4 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -- -- test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") -- test.add_cmd("stonith_admin", "-r node3 -i 1 -v true1") -- test.add_cmd("stonith_admin", "-r node3 -i 2 -v true2") -- test.add_cmd("stonith_admin", "-r node3 -i 2 -v false2") -- test.add_cmd("stonith_admin", "-r node3 -i 3 -v true3") -- test.add_cmd("stonith_admin", "-r node3 -i 3 -v true4") -- -- test.add_cmd("stonith_admin", "-F node3 -t 2") -- -- test.add_stonith_log_pattern("remote op timeout set to 12") -- test.add_stonith_log_pattern("for host 'node3' with device 'false1' returned: -201") -- test.add_stonith_log_pattern("for host 'node3' with device 'false2' returned: -201") -- test.add_stonith_log_pattern("for host 'node3' with device 'true3' returned: 0") -- test.add_stonith_log_pattern("for host 'node3' with device 'true4' returned: 0") -- -- -- # test what happens when the first fencing level had devices that no one has registered -- for test_type in test_types: -- if test_type["use_cpg"] == 0: -- continue -- -- test = self.new_test("%s_topology_missing_devices" % test_type["prefix"], -- "Verify topology can continue with missing devices.", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R true4 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -- -- test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") -- test.add_cmd("stonith_admin", "-r node3 -i 1 -v true1") -- test.add_cmd("stonith_admin", "-r node3 -i 2 -v true2") -- test.add_cmd("stonith_admin", "-r node3 -i 2 -v false2") -- test.add_cmd("stonith_admin", "-r node3 -i 3 -v true3") -- test.add_cmd("stonith_admin", "-r node3 -i 3 -v true4") -- -- test.add_cmd("stonith_admin", "-F node3 -t 2") -- -- # Test what happens if multiple fencing levels are defined, and then the first one is removed. -- for test_type in test_types: -- if test_type["use_cpg"] == 0: -- continue -- -- test = self.new_test("%s_topology_level_removal" % test_type["prefix"], -- "Verify level removal works.", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R true4 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -- -- test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") -- test.add_cmd("stonith_admin", "-r node3 -i 1 -v true1") -- -- test.add_cmd("stonith_admin", "-r node3 -i 2 -v true2") -- test.add_cmd("stonith_admin", "-r node3 -i 2 -v false2") -- -- test.add_cmd("stonith_admin", "-r node3 -i 3 -v true3") -- test.add_cmd("stonith_admin", "-r node3 -i 3 -v true4") -- -- # Now remove level 2, verify none of the devices in level two are hit. -- test.add_cmd("stonith_admin", "-d node3 -i 2") -- -- test.add_cmd("stonith_admin", "-F node3 -t 20") -- -- test.add_stonith_log_pattern("remote op timeout set to 8") -- test.add_stonith_log_pattern("for host 'node3' with device 'false1' returned: -201") -- test.add_stonith_negative_log_pattern("for host 'node3' with device 'false2' returned: ") -- test.add_stonith_log_pattern("for host 'node3' with device 'true3' returned: 0") -- test.add_stonith_log_pattern("for host 'node3' with device 'true4' returned: 0") -- -- # test the stonith builds the correct list of devices that can fence a node. -- for test_type in test_types: -- test = self.new_test("%s_list_devices" % test_type["prefix"], -- "Verify list of devices that can fence a node is correct", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") -- test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- -- test.add_cmd_check_stdout("stonith_admin", "-l node1 -V", "true2", "true1") -- test.add_cmd_check_stdout("stonith_admin", "-l node1 -V", "true3", "true1") -- -- # simple test of device monitor -- for test_type in test_types: -- test = self.new_test("%s_monitor" % test_type["prefix"], -- "Verify device is reachable", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") -- test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") -- -- test.add_cmd("stonith_admin", "-Q true1") -- test.add_cmd("stonith_admin", "-Q false1") -- test.add_expected_fail_cmd("stonith_admin", "-Q true2", 237) -- -- # Verify monitor occurs for duration of timeout period on failure -- for test_type in test_types: -- test = self.new_test("%s_monitor_timeout" % test_type["prefix"], -- "Verify monitor uses duration of timeout period given.", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_monitor_fail -o \"pcmk_host_list=node3\"") -- test.add_expected_fail_cmd("stonith_admin", "-Q true1 -t 5", 195) -- test.add_stonith_log_pattern("Attempt 2 to execute") -- -- # Verify monitor occurs for duration of timeout period on failure, but stops at max retries -- for test_type in test_types: -- test = self.new_test("%s_monitor_timeout_max_retries" % test_type["prefix"], -- "Verify monitor retries until max retry value or timeout is hit.", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_monitor_fail -o \"pcmk_host_list=node3\"") -- test.add_expected_fail_cmd("stonith_admin", "-Q true1 -t 15",195) -- test.add_stonith_log_pattern("Attempted to execute agent fence_dummy_monitor_fail (list) the maximum number of times") -- -- # simple register test -- for test_type in test_types: -- test = self.new_test("%s_register" % test_type["prefix"], -- "Verify devices can be registered and un-registered", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") -- -- test.add_cmd("stonith_admin", "-Q true1") -- -- test.add_cmd("stonith_admin", "-D true1") -- -- test.add_expected_fail_cmd("stonith_admin", "-Q true1", 237) -- -- -- # simple reboot test -- for test_type in test_types: -- test = self.new_test("%s_reboot" % test_type["prefix"], -- "Verify devices can be rebooted", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") -- -- test.add_cmd("stonith_admin", "-B node3 -t 2") -- -- test.add_cmd("stonith_admin", "-D true1") -- -- test.add_expected_fail_cmd("stonith_admin", "-Q true1", 237) -- -- # test fencing history. -- for test_type in test_types: -- if test_type["use_cpg"] == 0: -- continue -- test = self.new_test("%s_fence_history" % test_type["prefix"], -- "Verify last fencing operation is returned.", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") -- -- test.add_cmd("stonith_admin", "-F node3 -t 2 -V") -- -- test.add_cmd_check_stdout("stonith_admin", "-H node3", "was able to turn off node node3", "") -- -- # simple test of dynamic list query -- for test_type in test_types: -- test = self.new_test("%s_dynamic_list_query" % test_type["prefix"], -- "Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_list") -- test.add_cmd("stonith_admin", "-R true2 -a fence_dummy_list") -- test.add_cmd("stonith_admin", "-R true3 -a fence_dummy_list") -- -- test.add_cmd_check_stdout("stonith_admin", "-l fake_port_1", "3 devices found") -- -- -- # fence using dynamic list query -- for test_type in test_types: -- test = self.new_test("%s_fence_dynamic_list_query" % test_type["prefix"], -- "Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_list") -- test.add_cmd("stonith_admin", "-R true2 -a fence_dummy_list") -- test.add_cmd("stonith_admin", "-R true3 -a fence_dummy_list") -- -- test.add_cmd("stonith_admin", "-F fake_port_1 -t 5 -V"); -- -- # simple test of query using status action -- for test_type in test_types: -- test = self.new_test("%s_status_query" % test_type["prefix"], -- "Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_check=status\"") -- test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_check=status\"") -- test.add_cmd("stonith_admin", "-R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_check=status\"") -- -- test.add_cmd_check_stdout("stonith_admin", "-l fake_port_1", "3 devices found") -- -- # test what happens when no reboot action is advertised -- for test_type in test_types: -- test = self.new_test("%s_no_reboot_support" % test_type["prefix"], -- "Verify reboot action defaults to off when no reboot action is advertised by agent.", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_no_reboot -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-B node1 -t 5 -V"); -- test.add_stonith_log_pattern("does not advertise support for 'reboot', performing 'off'") -- test.add_stonith_log_pattern("with device 'true1' returned: 0 (OK)"); -- -- # make sure reboot is used when reboot action is advertised -- for test_type in test_types: -- test = self.new_test("%s_with_reboot_support" % test_type["prefix"], -- "Verify reboot action can be used when metadata advertises it.", test_type["use_cpg"]) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -- test.add_cmd("stonith_admin", "-B node1 -t 5 -V"); -- test.add_stonith_negative_log_pattern("does not advertise support for 'reboot', performing 'off'") -- test.add_stonith_log_pattern("with device 'true1' returned: 0 (OK)"); -- -- def build_nodeid_tests(self): -- our_uname = output_from_command("uname -n") -- if our_uname: -- our_uname = our_uname[0] -- -- ### verify nodeid is supplied when nodeid is in the metadata parameters -- test = self.new_test("cpg_supply_nodeid", -- "Verify nodeid is given when fence agent has nodeid as parameter", 1) -- -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-F %s -t 3" % (our_uname)) -- test.add_stonith_log_pattern("For stonith action (off) for victim %s, adding nodeid" % (our_uname)) -- -- ### verify nodeid is _NOT_ supplied when nodeid is not in the metadata parameters -- test = self.new_test("cpg_do_not_supply_nodeid", -- "Verify nodeid is _NOT_ given when fence agent does not have nodeid as parameter", 1) -- -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-F %s -t 3" % (our_uname)) -- test.add_stonith_negative_log_pattern("For stonith action (off) for victim %s, adding nodeid" % (our_uname)) -- -- ### verify nodeid use doesn't explode standalone mode -- test = self.new_test("standalone_do_not_supply_nodeid", -- "Verify nodeid in metadata parameter list doesn't kill standalone mode", 0) -- -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-F %s -t 3" % (our_uname)) -- test.add_stonith_negative_log_pattern("For stonith action (off) for victim %s, adding nodeid" % (our_uname)) -- -- -- def build_unfence_tests(self): -- our_uname = output_from_command("uname -n") -- if our_uname: -- our_uname = our_uname[0] -- -- ### verify unfencing using automatic unfencing -- test = self.new_test("cpg_unfence_required_1", -- "Verify require unfencing on all devices when automatic=true in agent's metadata", 1) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-R true2 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-U %s -t 3" % (our_uname)) -- # both devices should be executed -- test.add_stonith_log_pattern("with device 'true1' returned: 0 (OK)"); -- test.add_stonith_log_pattern("with device 'true2' returned: 0 (OK)"); -- -- -- ### verify unfencing using automatic unfencing fails if any of the required agents fail -- test = self.new_test("cpg_unfence_required_2", -- "Verify require unfencing on all devices when automatic=true in agent's metadata", 1) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-R true2 -a fence_dummy_automatic_unfence -o \"mode=fail\" -o \"pcmk_host_list=%s\"" % (our_uname)) -- test.add_expected_fail_cmd("stonith_admin", "-U %s -t 6" % (our_uname), 143) -- -- ### verify unfencing using automatic devices with topology -- test = self.new_test("cpg_unfence_required_3", -- "Verify require unfencing on all devices even when required devices are at different topology levels", 1) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-R true2 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-r %s -i 1 -v true1" % (our_uname)) -- test.add_cmd("stonith_admin", "-r %s -i 2 -v true2" % (our_uname)) -- test.add_cmd("stonith_admin", "-U %s -t 3" % (our_uname)) -- test.add_stonith_log_pattern("with device 'true1' returned: 0 (OK)"); -- test.add_stonith_log_pattern("with device 'true2' returned: 0 (OK)"); -- -- -- ### verify unfencing using automatic devices with topology -- test = self.new_test("cpg_unfence_required_4", -- "Verify all required devices are executed even with topology levels fail.", 1) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-R true2 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-R true3 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-R true4 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-R false3 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-R false4 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-r %s -i 1 -v true1" % (our_uname)) -- test.add_cmd("stonith_admin", "-r %s -i 1 -v false1" % (our_uname)) -- test.add_cmd("stonith_admin", "-r %s -i 2 -v false2" % (our_uname)) -- test.add_cmd("stonith_admin", "-r %s -i 2 -v true2" % (our_uname)) -- test.add_cmd("stonith_admin", "-r %s -i 2 -v false3" % (our_uname)) -- test.add_cmd("stonith_admin", "-r %s -i 2 -v true3" % (our_uname)) -- test.add_cmd("stonith_admin", "-r %s -i 3 -v false4" % (our_uname)) -- test.add_cmd("stonith_admin", "-r %s -i 4 -v true4" % (our_uname)) -- test.add_cmd("stonith_admin", "-U %s -t 3" % (our_uname)) -- test.add_stonith_log_pattern("with device 'true1' returned: 0 (OK)"); -- test.add_stonith_log_pattern("with device 'true2' returned: 0 (OK)"); -- test.add_stonith_log_pattern("with device 'true3' returned: 0 (OK)"); -- test.add_stonith_log_pattern("with device 'true4' returned: 0 (OK)"); -- -- ### verify unfencing using on_target device -- test = self.new_test("cpg_unfence_on_target_1", -- "Verify unfencing with on_target = true", 1) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-U %s -t 3" % (our_uname)) -- test.add_stonith_log_pattern("(on) to be executed on the target node") -- -- -- ### verify failure of unfencing using on_target device -- test = self.new_test("cpg_unfence_on_target_2", -- "Verify failure unfencing with on_target = true", 1) -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node_fake_1234\"" % (our_uname)) -- test.add_expected_fail_cmd("stonith_admin", "-U node_fake_1234 -t 3", 237) -- test.add_stonith_log_pattern("(on) to be executed on the target node") -- -- -- ### verify unfencing using on_target device with topology -- test = self.new_test("cpg_unfence_on_target_3", -- "Verify unfencing with on_target = true using topology", 1) -- -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -- -- test.add_cmd("stonith_admin", "-r %s -i 1 -v true1" % (our_uname)) -- test.add_cmd("stonith_admin", "-r %s -i 2 -v true2" % (our_uname)) -- -- test.add_cmd("stonith_admin", "-U %s -t 3" % (our_uname)) -- test.add_stonith_log_pattern("(on) to be executed on the target node") -- -- ### verify unfencing using on_target device with topology fails when victim node doesn't exist -- test = self.new_test("cpg_unfence_on_target_4", -- "Verify unfencing failure with on_target = true using topology", 1) -- -- test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node_fake\"" % (our_uname)) -- test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node_fake\"" % (our_uname)) -- -- test.add_cmd("stonith_admin", "-r node_fake -i 1 -v true1") -- test.add_cmd("stonith_admin", "-r node_fake -i 2 -v true2") -- -- test.add_expected_fail_cmd("stonith_admin", "-U node_fake -t 3", 237) -- test.add_stonith_log_pattern("(on) to be executed on the target node") -- -- -- def setup_environment(self, use_corosync): -- if self.autogen_corosync_cfg and use_corosync: -- corosync_conf = (""" -+ def __init__(self, verbose = 0): -+ self.tests = [] -+ self.verbose = verbose -+ self.autogen_corosync_cfg = 0 -+ if not os.path.exists("/etc/corosync/corosync.conf"): -+ self.autogen_corosync_cfg = 1 -+ -+ def new_test(self, name, description, with_cpg = 0): -+ test = Test(name, description, self.verbose, with_cpg) -+ self.tests.append(test) -+ return test -+ -+ def print_list(self): -+ print "\n==== %d TESTS FOUND ====" % (len(self.tests)) -+ print "%35s - %s" % ("TEST NAME", "TEST DESCRIPTION") -+ print "%35s - %s" % ("--------------------", "--------------------") -+ for test in self.tests: -+ print "%35s - %s" % (test.name, test.description) -+ print "==== END OF LIST ====\n" -+ -+ -+ def start_corosync(self): -+ if self.verbose: -+ print "Starting corosync" -+ -+ test = subprocess.Popen("corosync", stdout=subprocess.PIPE) -+ test.wait() -+ time.sleep(10) -+ -+ def stop_corosync(self): -+ cmd = shlex.split("killall -9 -q corosync") -+ test = subprocess.Popen(cmd, stdout=subprocess.PIPE) -+ test.wait() -+ -+ def run_single(self, name): -+ for test in self.tests: -+ if test.name == name: -+ test.run() -+ break; -+ -+ def run_tests_matching(self, pattern): -+ for test in self.tests: -+ if test.name.count(pattern) != 0: -+ test.run() -+ -+ def run_cpg_only(self): -+ for test in self.tests: -+ if test.enable_corosync: -+ test.run() -+ -+ def run_no_cpg(self): -+ for test in self.tests: -+ if not test.enable_corosync: -+ test.run() -+ -+ def run_tests(self): -+ for test in self.tests: -+ test.run() -+ -+ def exit(self): -+ for test in self.tests: -+ if test.executed == 0: -+ continue -+ -+ if test.get_exitcode() != 0: -+ sys.exit(-1) -+ -+ sys.exit(0) -+ -+ def print_results(self): -+ failures = 0; -+ success = 0; -+ print "\n\n======= FINAL RESULTS ==========" -+ print "\n--- FAILURE RESULTS:" -+ for test in self.tests: -+ if test.executed == 0: -+ continue -+ -+ if test.get_exitcode() != 0: -+ failures = failures + 1 -+ test.print_result(" ") -+ else: -+ success = success + 1 -+ -+ if failures == 0: -+ print " None" -+ -+ print "\n--- TOTALS\n Pass:%d\n Fail:%d\n" % (success, failures) -+ def build_api_sanity_tests(self): -+ verbose_arg = "" -+ if self.verbose: -+ verbose_arg = "-V" -+ -+ test = self.new_test("standalone_low_level_api_test", "Sanity test client api in standalone mode.") -+ test.add_cmd("@CRM_DAEMON_DIR@/stonith-test", "-t %s" % (verbose_arg)) -+ -+ test = self.new_test("cpg_low_level_api_test", "Sanity test client api using mainloop and cpg.", 1) -+ test.add_cmd("@CRM_DAEMON_DIR@/stonith-test", "-m %s" % (verbose_arg)) -+ -+ def build_custom_timeout_tests(self): -+ # custom timeout without topology -+ test = self.new_test("cpg_custom_timeout_1", -+ "Verify per device timeouts work as expected without using topology.", 1) -+ test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\" -o \"pcmk_off_timeout=1\"") -+ test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\" -o \"pcmk_off_timeout=4\"") -+ test.add_cmd("stonith_admin", "-F node3 -t 2") -+ # timeout is 2+1+4 = 7 -+ test.add_stonith_log_pattern("remote op timeout set to 7") -+ -+ # custom timeout _WITH_ topology -+ test = self.new_test("cpg_custom_timeout_2", -+ "Verify per device timeouts work as expected _WITH_ topology.", 1) -+ test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\" -o \"pcmk_off_timeout=1\"") -+ test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\" -o \"pcmk_off_timeout=4000\"") -+ test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") -+ test.add_cmd("stonith_admin", "-r node3 -i 2 -v true1") -+ test.add_cmd("stonith_admin", "-r node3 -i 3 -v false2") -+ test.add_cmd("stonith_admin", "-F node3 -t 2") -+ # timeout is 2+1+4000 = 4003 -+ test.add_stonith_log_pattern("remote op timeout set to 4003") -+ -+ def build_fence_merge_tests(self): -+ -+ ### Simple test that overlapping fencing operations get merged -+ test = self.new_test("cpg_custom_merge_single", -+ "Verify overlapping identical fencing operations are merged, no fencing levels used.", 1) -+ test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\" ") -+ test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") -+ test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -+ test.add_cmd("stonith_admin", "-F node3 -t 10") -+ ### one merger will happen -+ test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -+ ### the pattern below signifies that both the original and duplicate operation completed -+ test.add_stonith_log_pattern("Operation off of node3 by") -+ test.add_stonith_log_pattern("Operation off of node3 by") -+ -+ ### Test that multiple mergers occur -+ test = self.new_test("cpg_custom_merge_multiple", -+ "Verify multiple overlapping identical fencing operations are merged", 1) -+ test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"delay=2\" -o \"pcmk_host_list=node3\" ") -+ test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") -+ test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -+ test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -+ test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -+ test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -+ test.add_cmd("stonith_admin", "-F node3 -t 10") -+ ### 4 mergers should occur -+ test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -+ test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -+ test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -+ test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -+ ### the pattern below signifies that both the original and duplicate operation completed -+ test.add_stonith_log_pattern("Operation off of node3 by") -+ test.add_stonith_log_pattern("Operation off of node3 by") -+ test.add_stonith_log_pattern("Operation off of node3 by") -+ test.add_stonith_log_pattern("Operation off of node3 by") -+ test.add_stonith_log_pattern("Operation off of node3 by") -+ -+ ### Test that multiple mergers occur with topologies used -+ test = self.new_test("cpg_custom_merge_with_topology", -+ "Verify multiple overlapping identical fencing operations are merged with fencing levels.", 1) -+ test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\" ") -+ test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") -+ test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") -+ test.add_cmd("stonith_admin", "-r node3 -i 1 -v false2") -+ test.add_cmd("stonith_admin", "-r node3 -i 2 -v true1") -+ test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -+ test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -+ test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -+ test.add_cmd_no_wait("stonith_admin", "-F node3 -t 10") -+ test.add_cmd("stonith_admin", "-F node3 -t 10") -+ ### 4 mergers should occur -+ test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -+ test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -+ test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -+ test.add_stonith_log_pattern("Merging stonith action off for node node3 originating from client") -+ ### the pattern below signifies that both the original and duplicate operation completed -+ test.add_stonith_log_pattern("Operation off of node3 by") -+ test.add_stonith_log_pattern("Operation off of node3 by") -+ test.add_stonith_log_pattern("Operation off of node3 by") -+ test.add_stonith_log_pattern("Operation off of node3 by") -+ test.add_stonith_log_pattern("Operation off of node3 by") -+ -+ -+ test = self.new_test("cpg_custom_no_merge", -+ "Verify differing fencing operations are not merged", 1) -+ test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3 node2\"") -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3 node2\" ") -+ test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3 node2\"") -+ test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") -+ test.add_cmd("stonith_admin", "-r node3 -i 1 -v false2") -+ test.add_cmd("stonith_admin", "-r node3 -i 2 -v true1") -+ test.add_cmd_no_wait("stonith_admin", "-F node2 -t 10") -+ test.add_cmd("stonith_admin", "-F node3 -t 10") -+ test.add_stonith_negative_log_pattern("Merging stonith action off for node node3 originating from client") -+ -+ def build_standalone_tests(self): -+ test_types = [ -+ { -+ "prefix" : "standalone" , -+ "use_cpg" : 0, -+ }, -+ { -+ "prefix" : "cpg" , -+ "use_cpg" : 1, -+ }, -+ ] -+ -+ # test what happens when all devices timeout -+ for test_type in test_types: -+ test = self.new_test("%s_fence_multi_device_failure" % test_type["prefix"], -+ "Verify that all devices timeout, a fencing failure is returned.", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R false3 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -+ if test_type["use_cpg"] == 1: -+ test.add_expected_fail_cmd("stonith_admin", "-F node3 -t 2", 194) -+ test.add_stonith_log_pattern("remote op timeout set to 6") -+ else: -+ test.add_expected_fail_cmd("stonith_admin", "-F node3 -t 2", 55) -+ -+ test.add_stonith_log_pattern("for host 'node3' with device 'false1' returned: ") -+ test.add_stonith_log_pattern("for host 'node3' with device 'false2' returned: ") -+ test.add_stonith_log_pattern("for host 'node3' with device 'false3' returned: ") -+ -+ # test what happens when multiple devices can fence a node, but the first device fails. -+ for test_type in test_types: -+ test = self.new_test("%s_fence_device_failure_rollover" % test_type["prefix"], -+ "Verify that when one fence device fails for a node, the others are tried.", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-F node3 -t 2") -+ -+ if test_type["use_cpg"] == 1: -+ test.add_stonith_log_pattern("remote op timeout set to 6") -+ -+ # simple topology test for one device -+ for test_type in test_types: -+ if test_type["use_cpg"] == 0: -+ continue -+ -+ test = self.new_test("%s_topology_simple" % test_type["prefix"], -+ "Verify all fencing devices at a level are used.", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R true -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ -+ test.add_cmd("stonith_admin", "-r node3 -i 1 -v true") -+ test.add_cmd("stonith_admin", "-F node3 -t 2") -+ -+ test.add_stonith_log_pattern("remote op timeout set to 2") -+ test.add_stonith_log_pattern("for host 'node3' with device 'true' returned: 0") -+ -+ -+ # add topology, delete topology, verify fencing still works -+ for test_type in test_types: -+ if test_type["use_cpg"] == 0: -+ continue -+ -+ test = self.new_test("%s_topology_add_remove" % test_type["prefix"], -+ "Verify fencing occurrs after all topology levels are removed", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R true -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ -+ test.add_cmd("stonith_admin", "-r node3 -i 1 -v true") -+ test.add_cmd("stonith_admin", "-d node3 -i 1") -+ test.add_cmd("stonith_admin", "-F node3 -t 2") -+ -+ test.add_stonith_log_pattern("remote op timeout set to 2") -+ test.add_stonith_log_pattern("for host 'node3' with device 'true' returned: 0") -+ -+ # test what happens when the first fencing level has multiple devices. -+ for test_type in test_types: -+ if test_type["use_cpg"] == 0: -+ continue -+ -+ test = self.new_test("%s_topology_device_fails" % test_type["prefix"], -+ "Verify if one device in a level fails, the other is tried.", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R false -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R true -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ -+ test.add_cmd("stonith_admin", "-r node3 -i 1 -v false") -+ test.add_cmd("stonith_admin", "-r node3 -i 2 -v true") -+ test.add_cmd("stonith_admin", "-F node3 -t 20") -+ -+ test.add_stonith_log_pattern("remote op timeout set to 40") -+ test.add_stonith_log_pattern("for host 'node3' with device 'false' returned: -201") -+ test.add_stonith_log_pattern("for host 'node3' with device 'true' returned: 0") -+ -+ # test what happens when the first fencing level fails. -+ for test_type in test_types: -+ if test_type["use_cpg"] == 0: -+ continue -+ -+ test = self.new_test("%s_topology_multi_level_fails" % test_type["prefix"], -+ "Verify if one level fails, the next leve is tried.", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R true4 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -+ -+ test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") -+ test.add_cmd("stonith_admin", "-r node3 -i 1 -v true1") -+ test.add_cmd("stonith_admin", "-r node3 -i 2 -v true2") -+ test.add_cmd("stonith_admin", "-r node3 -i 2 -v false2") -+ test.add_cmd("stonith_admin", "-r node3 -i 3 -v true3") -+ test.add_cmd("stonith_admin", "-r node3 -i 3 -v true4") -+ -+ test.add_cmd("stonith_admin", "-F node3 -t 3") -+ -+ test.add_stonith_log_pattern("remote op timeout set to 18") -+ test.add_stonith_log_pattern("for host 'node3' with device 'false1' returned: -201") -+ test.add_stonith_log_pattern("for host 'node3' with device 'false2' returned: -201") -+ test.add_stonith_log_pattern("for host 'node3' with device 'true3' returned: 0") -+ test.add_stonith_log_pattern("for host 'node3' with device 'true4' returned: 0") -+ -+ -+ # test what happens when the first fencing level had devices that no one has registered -+ for test_type in test_types: -+ if test_type["use_cpg"] == 0: -+ continue -+ -+ test = self.new_test("%s_topology_missing_devices" % test_type["prefix"], -+ "Verify topology can continue with missing devices.", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R true4 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -+ -+ test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") -+ test.add_cmd("stonith_admin", "-r node3 -i 1 -v true1") -+ test.add_cmd("stonith_admin", "-r node3 -i 2 -v true2") -+ test.add_cmd("stonith_admin", "-r node3 -i 2 -v false2") -+ test.add_cmd("stonith_admin", "-r node3 -i 3 -v true3") -+ test.add_cmd("stonith_admin", "-r node3 -i 3 -v true4") -+ -+ test.add_cmd("stonith_admin", "-F node3 -t 2") -+ -+ # Test what happens if multiple fencing levels are defined, and then the first one is removed. -+ for test_type in test_types: -+ if test_type["use_cpg"] == 0: -+ continue -+ -+ test = self.new_test("%s_topology_level_removal" % test_type["prefix"], -+ "Verify level removal works.", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R true4 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node1 node2 node3\"") -+ -+ test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") -+ test.add_cmd("stonith_admin", "-r node3 -i 1 -v true1") -+ -+ test.add_cmd("stonith_admin", "-r node3 -i 2 -v true2") -+ test.add_cmd("stonith_admin", "-r node3 -i 2 -v false2") -+ -+ test.add_cmd("stonith_admin", "-r node3 -i 3 -v true3") -+ test.add_cmd("stonith_admin", "-r node3 -i 3 -v true4") -+ -+ # Now remove level 2, verify none of the devices in level two are hit. -+ test.add_cmd("stonith_admin", "-d node3 -i 2") -+ -+ test.add_cmd("stonith_admin", "-F node3 -t 20") -+ -+ test.add_stonith_log_pattern("remote op timeout set to 8") -+ test.add_stonith_log_pattern("for host 'node3' with device 'false1' returned: -201") -+ test.add_stonith_negative_log_pattern("for host 'node3' with device 'false2' returned: ") -+ test.add_stonith_log_pattern("for host 'node3' with device 'true3' returned: 0") -+ test.add_stonith_log_pattern("for host 'node3' with device 'true4' returned: 0") -+ -+ # test the stonith builds the correct list of devices that can fence a node. -+ for test_type in test_types: -+ test = self.new_test("%s_list_devices" % test_type["prefix"], -+ "Verify list of devices that can fence a node is correct", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") -+ test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ -+ test.add_cmd_check_stdout("stonith_admin", "-l node1 -V", "true2", "true1") -+ test.add_cmd_check_stdout("stonith_admin", "-l node1 -V", "true3", "true1") -+ -+ # simple test of device monitor -+ for test_type in test_types: -+ test = self.new_test("%s_monitor" % test_type["prefix"], -+ "Verify device is reachable", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") -+ test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=node3\"") -+ -+ test.add_cmd("stonith_admin", "-Q true1") -+ test.add_cmd("stonith_admin", "-Q false1") -+ test.add_expected_fail_cmd("stonith_admin", "-Q true2", 237) -+ -+ # Verify monitor occurs for duration of timeout period on failure -+ for test_type in test_types: -+ test = self.new_test("%s_monitor_timeout" % test_type["prefix"], -+ "Verify monitor uses duration of timeout period given.", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_monitor_fail -o \"pcmk_host_list=node3\"") -+ test.add_expected_fail_cmd("stonith_admin", "-Q true1 -t 5", 195) -+ test.add_stonith_log_pattern("Attempt 2 to execute") -+ -+ # Verify monitor occurs for duration of timeout period on failure, but stops at max retries -+ for test_type in test_types: -+ test = self.new_test("%s_monitor_timeout_max_retries" % test_type["prefix"], -+ "Verify monitor retries until max retry value or timeout is hit.", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_monitor_fail -o \"pcmk_host_list=node3\"") -+ test.add_expected_fail_cmd("stonith_admin", "-Q true1 -t 15",195) -+ test.add_stonith_log_pattern("Attempted to execute agent fence_dummy_monitor_fail (list) the maximum number of times") -+ -+ # simple register test -+ for test_type in test_types: -+ test = self.new_test("%s_register" % test_type["prefix"], -+ "Verify devices can be registered and un-registered", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") -+ -+ test.add_cmd("stonith_admin", "-Q true1") -+ -+ test.add_cmd("stonith_admin", "-D true1") -+ -+ test.add_expected_fail_cmd("stonith_admin", "-Q true1", 237) -+ -+ -+ # simple reboot test -+ for test_type in test_types: -+ test = self.new_test("%s_reboot" % test_type["prefix"], -+ "Verify devices can be rebooted", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") -+ -+ test.add_cmd("stonith_admin", "-B node3 -t 2") -+ -+ test.add_cmd("stonith_admin", "-D true1") -+ -+ test.add_expected_fail_cmd("stonith_admin", "-Q true1", 237) -+ -+ # test fencing history. -+ for test_type in test_types: -+ if test_type["use_cpg"] == 0: -+ continue -+ test = self.new_test("%s_fence_history" % test_type["prefix"], -+ "Verify last fencing operation is returned.", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node3\"") -+ -+ test.add_cmd("stonith_admin", "-F node3 -t 2 -V") -+ -+ test.add_cmd_check_stdout("stonith_admin", "-H node3", "was able to turn off node node3", "") -+ -+ # simple test of dynamic list query -+ for test_type in test_types: -+ test = self.new_test("%s_dynamic_list_query" % test_type["prefix"], -+ "Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_list") -+ test.add_cmd("stonith_admin", "-R true2 -a fence_dummy_list") -+ test.add_cmd("stonith_admin", "-R true3 -a fence_dummy_list") -+ -+ test.add_cmd_check_stdout("stonith_admin", "-l fake_port_1", "3 devices found") -+ -+ -+ # fence using dynamic list query -+ for test_type in test_types: -+ test = self.new_test("%s_fence_dynamic_list_query" % test_type["prefix"], -+ "Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_list") -+ test.add_cmd("stonith_admin", "-R true2 -a fence_dummy_list") -+ test.add_cmd("stonith_admin", "-R true3 -a fence_dummy_list") -+ -+ test.add_cmd("stonith_admin", "-F fake_port_1 -t 5 -V"); -+ -+ # simple test of query using status action -+ for test_type in test_types: -+ test = self.new_test("%s_status_query" % test_type["prefix"], -+ "Verify dynamic list of fencing devices can be retrieved.", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_check=status\"") -+ test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_check=status\"") -+ test.add_cmd("stonith_admin", "-R true3 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_check=status\"") -+ -+ test.add_cmd_check_stdout("stonith_admin", "-l fake_port_1", "3 devices found") -+ -+ # test what happens when no reboot action is advertised -+ for test_type in test_types: -+ test = self.new_test("%s_no_reboot_support" % test_type["prefix"], -+ "Verify reboot action defaults to off when no reboot action is advertised by agent.", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_no_reboot -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-B node1 -t 5 -V"); -+ test.add_stonith_log_pattern("does not advertise support for 'reboot', performing 'off'") -+ test.add_stonith_log_pattern("with device 'true1' returned: 0 (OK)"); -+ -+ # make sure reboot is used when reboot action is advertised -+ for test_type in test_types: -+ test = self.new_test("%s_with_reboot_support" % test_type["prefix"], -+ "Verify reboot action can be used when metadata advertises it.", test_type["use_cpg"]) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=node1 node2 node3\"") -+ test.add_cmd("stonith_admin", "-B node1 -t 5 -V"); -+ test.add_stonith_negative_log_pattern("does not advertise support for 'reboot', performing 'off'") -+ test.add_stonith_log_pattern("with device 'true1' returned: 0 (OK)"); -+ -+ def build_nodeid_tests(self): -+ our_uname = output_from_command("uname -n") -+ if our_uname: -+ our_uname = our_uname[0] -+ -+ ### verify nodeid is supplied when nodeid is in the metadata parameters -+ test = self.new_test("cpg_supply_nodeid", -+ "Verify nodeid is given when fence agent has nodeid as parameter", 1) -+ -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-F %s -t 3" % (our_uname)) -+ test.add_stonith_log_pattern("For stonith action (off) for victim %s, adding nodeid" % (our_uname)) -+ -+ ### verify nodeid is _NOT_ supplied when nodeid is not in the metadata parameters -+ test = self.new_test("cpg_do_not_supply_nodeid", -+ "Verify nodeid is _NOT_ given when fence agent does not have nodeid as parameter", 1) -+ -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-F %s -t 3" % (our_uname)) -+ test.add_stonith_negative_log_pattern("For stonith action (off) for victim %s, adding nodeid" % (our_uname)) -+ -+ ### verify nodeid use doesn't explode standalone mode -+ test = self.new_test("standalone_do_not_supply_nodeid", -+ "Verify nodeid in metadata parameter list doesn't kill standalone mode", 0) -+ -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-F %s -t 3" % (our_uname)) -+ test.add_stonith_negative_log_pattern("For stonith action (off) for victim %s, adding nodeid" % (our_uname)) -+ -+ -+ def build_unfence_tests(self): -+ our_uname = output_from_command("uname -n") -+ if our_uname: -+ our_uname = our_uname[0] -+ -+ ### verify unfencing using automatic unfencing -+ test = self.new_test("cpg_unfence_required_1", -+ "Verify require unfencing on all devices when automatic=true in agent's metadata", 1) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-R true2 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-U %s -t 3" % (our_uname)) -+ # both devices should be executed -+ test.add_stonith_log_pattern("with device 'true1' returned: 0 (OK)"); -+ test.add_stonith_log_pattern("with device 'true2' returned: 0 (OK)"); -+ -+ -+ ### verify unfencing using automatic unfencing fails if any of the required agents fail -+ test = self.new_test("cpg_unfence_required_2", -+ "Verify require unfencing on all devices when automatic=true in agent's metadata", 1) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-R true2 -a fence_dummy_automatic_unfence -o \"mode=fail\" -o \"pcmk_host_list=%s\"" % (our_uname)) -+ test.add_expected_fail_cmd("stonith_admin", "-U %s -t 6" % (our_uname), 143) -+ -+ ### verify unfencing using automatic devices with topology -+ test = self.new_test("cpg_unfence_required_3", -+ "Verify require unfencing on all devices even when required devices are at different topology levels", 1) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-R true2 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-r %s -i 1 -v true1" % (our_uname)) -+ test.add_cmd("stonith_admin", "-r %s -i 2 -v true2" % (our_uname)) -+ test.add_cmd("stonith_admin", "-U %s -t 3" % (our_uname)) -+ test.add_stonith_log_pattern("with device 'true1' returned: 0 (OK)"); -+ test.add_stonith_log_pattern("with device 'true2' returned: 0 (OK)"); -+ -+ -+ ### verify unfencing using automatic devices with topology -+ test = self.new_test("cpg_unfence_required_4", -+ "Verify all required devices are executed even with topology levels fail.", 1) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-R true2 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-R true3 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-R true4 -a fence_dummy_automatic_unfence -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-R false1 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-R false2 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-R false3 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-R false4 -a fence_dummy -o \"mode=fail\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-r %s -i 1 -v true1" % (our_uname)) -+ test.add_cmd("stonith_admin", "-r %s -i 1 -v false1" % (our_uname)) -+ test.add_cmd("stonith_admin", "-r %s -i 2 -v false2" % (our_uname)) -+ test.add_cmd("stonith_admin", "-r %s -i 2 -v true2" % (our_uname)) -+ test.add_cmd("stonith_admin", "-r %s -i 2 -v false3" % (our_uname)) -+ test.add_cmd("stonith_admin", "-r %s -i 2 -v true3" % (our_uname)) -+ test.add_cmd("stonith_admin", "-r %s -i 3 -v false4" % (our_uname)) -+ test.add_cmd("stonith_admin", "-r %s -i 4 -v true4" % (our_uname)) -+ test.add_cmd("stonith_admin", "-U %s -t 3" % (our_uname)) -+ test.add_stonith_log_pattern("with device 'true1' returned: 0 (OK)"); -+ test.add_stonith_log_pattern("with device 'true2' returned: 0 (OK)"); -+ test.add_stonith_log_pattern("with device 'true3' returned: 0 (OK)"); -+ test.add_stonith_log_pattern("with device 'true4' returned: 0 (OK)"); -+ -+ ### verify unfencing using on_target device -+ test = self.new_test("cpg_unfence_on_target_1", -+ "Verify unfencing with on_target = true", 1) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-U %s -t 3" % (our_uname)) -+ test.add_stonith_log_pattern("(on) to be executed on the target node") -+ -+ -+ ### verify failure of unfencing using on_target device -+ test = self.new_test("cpg_unfence_on_target_2", -+ "Verify failure unfencing with on_target = true", 1) -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node_fake_1234\"" % (our_uname)) -+ test.add_expected_fail_cmd("stonith_admin", "-U node_fake_1234 -t 3", 237) -+ test.add_stonith_log_pattern("(on) to be executed on the target node") -+ -+ -+ ### verify unfencing using on_target device with topology -+ test = self.new_test("cpg_unfence_on_target_3", -+ "Verify unfencing with on_target = true using topology", 1) -+ -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node3\"" % (our_uname)) -+ -+ test.add_cmd("stonith_admin", "-r %s -i 1 -v true1" % (our_uname)) -+ test.add_cmd("stonith_admin", "-r %s -i 2 -v true2" % (our_uname)) -+ -+ test.add_cmd("stonith_admin", "-U %s -t 3" % (our_uname)) -+ test.add_stonith_log_pattern("(on) to be executed on the target node") -+ -+ ### verify unfencing using on_target device with topology fails when victim node doesn't exist -+ test = self.new_test("cpg_unfence_on_target_4", -+ "Verify unfencing failure with on_target = true using topology", 1) -+ -+ test.add_cmd("stonith_admin", "-R true1 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node_fake\"" % (our_uname)) -+ test.add_cmd("stonith_admin", "-R true2 -a fence_dummy -o \"mode=pass\" -o \"pcmk_host_list=%s node_fake\"" % (our_uname)) -+ -+ test.add_cmd("stonith_admin", "-r node_fake -i 1 -v true1") -+ test.add_cmd("stonith_admin", "-r node_fake -i 2 -v true2") -+ -+ test.add_expected_fail_cmd("stonith_admin", "-U node_fake -t 3", 237) -+ test.add_stonith_log_pattern("(on) to be executed on the target node") -+ -+ def build_remap_tests(self): -+ test = self.new_test("cpg_remap_simple", -+ "Verify sequential topology reboot is remapped to all-off-then-all-on", 1) -+ test.add_cmd("stonith_admin", -+ """-R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """ -+ """-o "pcmk_off_timeout=1" -o "pcmk_reboot_timeout=10" """) -+ test.add_cmd("stonith_admin", -+ """-R true2 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """ -+ """-o "pcmk_off_timeout=2" -o "pcmk_reboot_timeout=20" """) -+ test.add_cmd("stonith_admin", "-r node_fake -i 1 -v true1 -v true2") -+ test.add_cmd("stonith_admin", "-B node_fake -t 5") -+ test.add_stonith_log_pattern("Remapping multiple-device reboot of node_fake") -+ # timeout should be sum of off timeouts (1+2=3), not reboot timeouts (10+20=30) -+ test.add_stonith_log_pattern("remote op timeout set to 3 for fencing of node node_fake") -+ test.add_stonith_log_pattern("perform op off node_fake with true1") -+ test.add_stonith_log_pattern("perform op off node_fake with true2") -+ test.add_stonith_log_pattern("Remapped off of node_fake complete, remapping to on") -+ # fence_dummy sets "on" as an on_target action -+ test.add_stonith_log_pattern("Ignoring true1 'on' failure (no capable peers) for node_fake") -+ test.add_stonith_log_pattern("Ignoring true2 'on' failure (no capable peers) for node_fake") -+ test.add_stonith_log_pattern("Undoing remap of reboot of node_fake") -+ -+ test = self.new_test("cpg_remap_automatic", -+ "Verify remapped topology reboot skips automatic 'on'", 1) -+ test.add_cmd("stonith_admin", -+ """-R true1 -a fence_dummy_automatic_unfence """ -+ """-o "mode=pass" -o "pcmk_host_list=node_fake" """) -+ test.add_cmd("stonith_admin", -+ """-R true2 -a fence_dummy_automatic_unfence """ -+ """-o "mode=pass" -o "pcmk_host_list=node_fake" """) -+ test.add_cmd("stonith_admin", "-r node_fake -i 1 -v true1 -v true2") -+ test.add_cmd("stonith_admin", "-B node_fake -t 5") -+ test.add_stonith_log_pattern("Remapping multiple-device reboot of node_fake") -+ test.add_stonith_log_pattern("perform op off node_fake with true1") -+ test.add_stonith_log_pattern("perform op off node_fake with true2") -+ test.add_stonith_log_pattern("Remapped off of node_fake complete, remapping to on") -+ test.add_stonith_log_pattern("Undoing remap of reboot of node_fake") -+ test.add_stonith_negative_log_pattern("perform op on node_fake with") -+ test.add_stonith_negative_log_pattern("'on' failure") -+ -+ test = self.new_test("cpg_remap_complex_1", -+ "Verify remapped topology reboot in second level works if non-remapped first level fails", 1) -+ test.add_cmd("stonith_admin", """-R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node_fake" """) -+ test.add_cmd("stonith_admin", """-R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) -+ test.add_cmd("stonith_admin", """-R true2 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) -+ test.add_cmd("stonith_admin", "-r node_fake -i 1 -v false1") -+ test.add_cmd("stonith_admin", "-r node_fake -i 2 -v true1 -v true2") -+ test.add_cmd("stonith_admin", "-B node_fake -t 5") -+ test.add_stonith_log_pattern("perform op reboot node_fake with false1") -+ test.add_stonith_log_pattern("Remapping multiple-device reboot of node_fake") -+ test.add_stonith_log_pattern("perform op off node_fake with true1") -+ test.add_stonith_log_pattern("perform op off node_fake with true2") -+ test.add_stonith_log_pattern("Remapped off of node_fake complete, remapping to on") -+ test.add_stonith_log_pattern("Ignoring true1 'on' failure (no capable peers) for node_fake") -+ test.add_stonith_log_pattern("Ignoring true2 'on' failure (no capable peers) for node_fake") -+ test.add_stonith_log_pattern("Undoing remap of reboot of node_fake") -+ -+ test = self.new_test("cpg_remap_complex_2", -+ "Verify remapped topology reboot failure in second level proceeds to third level", 1) -+ test.add_cmd("stonith_admin", """-R false1 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node_fake" """) -+ test.add_cmd("stonith_admin", """-R false2 -a fence_dummy -o "mode=fail" -o "pcmk_host_list=node_fake" """) -+ test.add_cmd("stonith_admin", """-R true1 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) -+ test.add_cmd("stonith_admin", """-R true2 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) -+ test.add_cmd("stonith_admin", """-R true3 -a fence_dummy -o "mode=pass" -o "pcmk_host_list=node_fake" """) -+ test.add_cmd("stonith_admin", "-r node_fake -i 1 -v false1") -+ test.add_cmd("stonith_admin", "-r node_fake -i 2 -v true1 -v false2 -v true3") -+ test.add_cmd("stonith_admin", "-r node_fake -i 3 -v true2") -+ test.add_cmd("stonith_admin", "-B node_fake -t 5") -+ test.add_stonith_log_pattern("perform op reboot node_fake with false1") -+ test.add_stonith_log_pattern("Remapping multiple-device reboot of node_fake") -+ test.add_stonith_log_pattern("perform op off node_fake with true1") -+ test.add_stonith_log_pattern("perform op off node_fake with false2") -+ test.add_stonith_log_pattern("Attempted to execute agent fence_dummy (off) the maximum number of times") -+ test.add_stonith_log_pattern("Undoing remap of reboot of node_fake") -+ test.add_stonith_log_pattern("perform op reboot node_fake with true2") -+ test.add_stonith_negative_log_pattern("node_fake with true3") -+ -+ def setup_environment(self, use_corosync): -+ if self.autogen_corosync_cfg and use_corosync: -+ corosync_conf = (""" - totem { - version: 2 - crypto_cipher: none -@@ -908,15 +984,15 @@ logging { - } - """) - -- os.system("cat <<-END >>/etc/corosync/corosync.conf\n%s\nEND" % (corosync_conf)) -+ os.system("cat <<-END >>/etc/corosync/corosync.conf\n%s\nEND" % (corosync_conf)) - - -- if use_corosync: -- ### make sure we are in control ### -- self.stop_corosync() -- self.start_corosync() -+ if use_corosync: -+ ### make sure we are in control ### -+ self.stop_corosync() -+ self.start_corosync() - -- monitor_fail_agent = ("""#!/usr/bin/python -+ monitor_fail_agent = ("""#!/usr/bin/python - import sys - def main(): - for line in sys.stdin.readlines(): -@@ -927,7 +1003,7 @@ if __name__ == "__main__": - main() - """) - -- dynamic_list_agent = ("""#!/usr/bin/python -+ dynamic_list_agent = ("""#!/usr/bin/python - import sys - def main(): - for line in sys.stdin.readlines(): -@@ -942,140 +1018,141 @@ if __name__ == "__main__": - """) - - -- os.system("cat <<-END >>/usr/sbin/fence_dummy_list\n%s\nEND" % (dynamic_list_agent)) -- os.system("chmod 711 /usr/sbin/fence_dummy_list") -+ os.system("cat <<-END >>/usr/sbin/fence_dummy_list\n%s\nEND" % (dynamic_list_agent)) -+ os.system("chmod 711 /usr/sbin/fence_dummy_list") - -- os.system("cat <<-END >>/usr/sbin/fence_dummy_monitor_fail\n%s\nEND" % (monitor_fail_agent)) -- os.system("chmod 711 /usr/sbin/fence_dummy_monitor_fail") -+ os.system("cat <<-END >>/usr/sbin/fence_dummy_monitor_fail\n%s\nEND" % (monitor_fail_agent)) -+ os.system("chmod 711 /usr/sbin/fence_dummy_monitor_fail") - -- os.system("cp /usr/share/pacemaker/tests/cts/fence_dummy /usr/sbin/fence_dummy") -+ os.system("cp /usr/share/pacemaker/tests/cts/fence_dummy /usr/sbin/fence_dummy") - -- # modifies dummy agent to do require unfencing -- os.system("cat /usr/share/pacemaker/tests/cts/fence_dummy | sed 's/on_target=/automatic=/g' > /usr/sbin/fence_dummy_automatic_unfence"); -- os.system("chmod 711 /usr/sbin/fence_dummy_automatic_unfence") -+ # modifies dummy agent to do require unfencing -+ os.system("cat /usr/share/pacemaker/tests/cts/fence_dummy | sed 's/on_target=/automatic=/g' > /usr/sbin/fence_dummy_automatic_unfence"); -+ os.system("chmod 711 /usr/sbin/fence_dummy_automatic_unfence") - -- # modifies dummy agent to not advertise reboot -- os.system("cat /usr/share/pacemaker/tests/cts/fence_dummy | sed 's/^.*.*//g' > /usr/sbin/fence_dummy_no_reboot"); -- os.system("chmod 711 /usr/sbin/fence_dummy_no_reboot") -+ # modifies dummy agent to not advertise reboot -+ os.system("cat /usr/share/pacemaker/tests/cts/fence_dummy | sed 's/^.*.*//g' > /usr/sbin/fence_dummy_no_reboot"); -+ os.system("chmod 711 /usr/sbin/fence_dummy_no_reboot") - -- def cleanup_environment(self, use_corosync): -- if use_corosync: -- self.stop_corosync() -+ def cleanup_environment(self, use_corosync): -+ if use_corosync: -+ self.stop_corosync() - -- if self.verbose and os.path.exists('/var/log/corosync.log'): -- print "Corosync output" -- f = open('/var/log/corosync.log', 'r') -- for line in f.readlines(): -- print line.strip() -- os.remove('/var/log/corosync.log') -+ if self.verbose and os.path.exists('/var/log/corosync.log'): -+ print "Corosync output" -+ f = open('/var/log/corosync.log', 'r') -+ for line in f.readlines(): -+ print line.strip() -+ os.remove('/var/log/corosync.log') - -- if self.autogen_corosync_cfg: -- os.system("rm -f /etc/corosync/corosync.conf") -+ if self.autogen_corosync_cfg: -+ os.system("rm -f /etc/corosync/corosync.conf") - -- os.system("rm -f /usr/sbin/fence_dummy_monitor_fail") -- os.system("rm -f /usr/sbin/fence_dummy_list") -- os.system("rm -f /usr/sbin/fence_dummy") -- os.system("rm -f /usr/sbin/fence_dummy_automatic_unfence") -- os.system("rm -f /usr/sbin/fence_dummy_no_reboot") -+ os.system("rm -f /usr/sbin/fence_dummy_monitor_fail") -+ os.system("rm -f /usr/sbin/fence_dummy_list") -+ os.system("rm -f /usr/sbin/fence_dummy") -+ os.system("rm -f /usr/sbin/fence_dummy_automatic_unfence") -+ os.system("rm -f /usr/sbin/fence_dummy_no_reboot") - - class TestOptions: -- def __init__(self): -- self.options = {} -- self.options['list-tests'] = 0 -- self.options['run-all'] = 1 -- self.options['run-only'] = "" -- self.options['run-only-pattern'] = "" -- self.options['verbose'] = 0 -- self.options['invalid-arg'] = "" -- self.options['cpg-only'] = 0 -- self.options['no-cpg'] = 0 -- self.options['show-usage'] = 0 -- -- def build_options(self, argv): -- args = argv[1:] -- skip = 0 -- for i in range(0, len(args)): -- if skip: -- skip = 0 -- continue -- elif args[i] == "-h" or args[i] == "--help": -- self.options['show-usage'] = 1 -- elif args[i] == "-l" or args[i] == "--list-tests": -- self.options['list-tests'] = 1 -- elif args[i] == "-V" or args[i] == "--verbose": -- self.options['verbose'] = 1 -- elif args[i] == "-n" or args[i] == "--no-cpg": -- self.options['no-cpg'] = 1 -- elif args[i] == "-c" or args[i] == "--cpg-only": -- self.options['cpg-only'] = 1 -- elif args[i] == "-r" or args[i] == "--run-only": -- self.options['run-only'] = args[i+1] -- skip = 1 -- elif args[i] == "-p" or args[i] == "--run-only-pattern": -- self.options['run-only-pattern'] = args[i+1] -- skip = 1 -- -- def show_usage(self): -- print "usage: " + sys.argv[0] + " [options]" -- print "If no options are provided, all tests will run" -- print "Options:" -- print "\t [--help | -h] Show usage" -- print "\t [--list-tests | -l] Print out all registered tests." -- print "\t [--cpg-only | -c] Only run tests that require corosync." -- print "\t [--no-cpg | -n] Only run tests that do not require corosync" -- print "\t [--run-only | -r 'testname'] Run a specific test" -- print "\t [--verbose | -V] Verbose output" -- print "\t [--run-only-pattern | -p 'string'] Run only tests containing the string value" -- print "\n\tExample: Run only the test 'start_top'" -- print "\t\t python ./regression.py --run-only start_stop" -- print "\n\tExample: Run only the tests with the string 'systemd' present in them" -- print "\t\t python ./regression.py --run-only-pattern systemd" -+ def __init__(self): -+ self.options = {} -+ self.options['list-tests'] = 0 -+ self.options['run-all'] = 1 -+ self.options['run-only'] = "" -+ self.options['run-only-pattern'] = "" -+ self.options['verbose'] = 0 -+ self.options['invalid-arg'] = "" -+ self.options['cpg-only'] = 0 -+ self.options['no-cpg'] = 0 -+ self.options['show-usage'] = 0 -+ -+ def build_options(self, argv): -+ args = argv[1:] -+ skip = 0 -+ for i in range(0, len(args)): -+ if skip: -+ skip = 0 -+ continue -+ elif args[i] == "-h" or args[i] == "--help": -+ self.options['show-usage'] = 1 -+ elif args[i] == "-l" or args[i] == "--list-tests": -+ self.options['list-tests'] = 1 -+ elif args[i] == "-V" or args[i] == "--verbose": -+ self.options['verbose'] = 1 -+ elif args[i] == "-n" or args[i] == "--no-cpg": -+ self.options['no-cpg'] = 1 -+ elif args[i] == "-c" or args[i] == "--cpg-only": -+ self.options['cpg-only'] = 1 -+ elif args[i] == "-r" or args[i] == "--run-only": -+ self.options['run-only'] = args[i+1] -+ skip = 1 -+ elif args[i] == "-p" or args[i] == "--run-only-pattern": -+ self.options['run-only-pattern'] = args[i+1] -+ skip = 1 -+ -+ def show_usage(self): -+ print "usage: " + sys.argv[0] + " [options]" -+ print "If no options are provided, all tests will run" -+ print "Options:" -+ print "\t [--help | -h] Show usage" -+ print "\t [--list-tests | -l] Print out all registered tests." -+ print "\t [--cpg-only | -c] Only run tests that require corosync." -+ print "\t [--no-cpg | -n] Only run tests that do not require corosync" -+ print "\t [--run-only | -r 'testname'] Run a specific test" -+ print "\t [--verbose | -V] Verbose output" -+ print "\t [--run-only-pattern | -p 'string'] Run only tests containing the string value" -+ print "\n\tExample: Run only the test 'start_top'" -+ print "\t\t python ./regression.py --run-only start_stop" -+ print "\n\tExample: Run only the tests with the string 'systemd' present in them" -+ print "\t\t python ./regression.py --run-only-pattern systemd" - - def main(argv): -- o = TestOptions() -- o.build_options(argv) -- -- use_corosync = 1 -- -- tests = Tests(o.options['verbose']) -- tests.build_standalone_tests() -- tests.build_custom_timeout_tests() -- tests.build_api_sanity_tests() -- tests.build_fence_merge_tests() -- tests.build_unfence_tests() -- tests.build_nodeid_tests() -- -- if o.options['list-tests']: -- tests.print_list() -- sys.exit(0) -- elif o.options['show-usage']: -- o.show_usage() -- sys.exit(0) -- -- print "Starting ..." -- -- if o.options['no-cpg']: -- use_corosync = 0 -- -- tests.setup_environment(use_corosync) -- -- if o.options['run-only-pattern'] != "": -- tests.run_tests_matching(o.options['run-only-pattern']) -- tests.print_results() -- elif o.options['run-only'] != "": -- tests.run_single(o.options['run-only']) -- tests.print_results() -- elif o.options['no-cpg']: -- tests.run_no_cpg() -- tests.print_results() -- elif o.options['cpg-only']: -- tests.run_cpg_only() -- tests.print_results() -- else: -- tests.run_tests() -- tests.print_results() -- -- tests.cleanup_environment(use_corosync) -- tests.exit() -+ o = TestOptions() -+ o.build_options(argv) -+ -+ use_corosync = 1 -+ -+ tests = Tests(o.options['verbose']) -+ tests.build_standalone_tests() -+ tests.build_custom_timeout_tests() -+ tests.build_api_sanity_tests() -+ tests.build_fence_merge_tests() -+ tests.build_unfence_tests() -+ tests.build_nodeid_tests() -+ tests.build_remap_tests() -+ -+ if o.options['list-tests']: -+ tests.print_list() -+ sys.exit(0) -+ elif o.options['show-usage']: -+ o.show_usage() -+ sys.exit(0) -+ -+ print "Starting ..." -+ -+ if o.options['no-cpg']: -+ use_corosync = 0 -+ -+ tests.setup_environment(use_corosync) -+ -+ if o.options['run-only-pattern'] != "": -+ tests.run_tests_matching(o.options['run-only-pattern']) -+ tests.print_results() -+ elif o.options['run-only'] != "": -+ tests.run_single(o.options['run-only']) -+ tests.print_results() -+ elif o.options['no-cpg']: -+ tests.run_no_cpg() -+ tests.print_results() -+ elif o.options['cpg-only']: -+ tests.run_cpg_only() -+ tests.print_results() -+ else: -+ tests.run_tests() -+ tests.print_results() -+ -+ tests.cleanup_environment(use_corosync) -+ tests.exit() - if __name__=="__main__": -- main(sys.argv) -+ main(sys.argv) -diff --git a/fencing/remote.c b/fencing/remote.c -index a568035..2c00b5f 100644 ---- a/fencing/remote.c -+++ b/fencing/remote.c -@@ -47,17 +47,37 @@ - - #define TIMEOUT_MULTIPLY_FACTOR 1.2 - -+/* When one stonithd queries its peers for devices able to handle a fencing -+ * request, each peer will reply with a list of such devices available to it. -+ * Each reply will be parsed into a st_query_result_t, with each device's -+ * information kept in a device_properties_t. -+ */ -+ -+typedef struct device_properties_s { -+ /* Whether access to this device has been verified */ -+ gboolean verified; -+ -+ /* The remaining members are indexed by the operation's "phase" */ -+ -+ /* Whether this device has been executed in each phase */ -+ gboolean executed[3]; -+ /* Whether this device is disallowed from executing in each phase */ -+ gboolean disallowed[3]; -+ /* Action-specific timeout for each phase */ -+ int custom_action_timeout[3]; -+ /* Action-specific maximum random delay for each phase */ -+ int delay_max[3]; -+} device_properties_t; -+ - typedef struct st_query_result_s { -+ /* Name of peer that sent this result */ - char *host; -- int devices; -- /* only try peers for non-topology based operations once */ -+ /* Only try peers for non-topology based operations once */ - gboolean tried; -- GListPtr device_list; -- GHashTable *custom_action_timeouts; -- GHashTable *delay_maxes; -- /* Subset of devices that peer has verified connectivity on */ -- GHashTable *verified_devices; -- -+ /* Number of entries in the devices table */ -+ int ndevices; -+ /* Devices available to this host that are capable of fencing the target */ -+ GHashTable *devices; - } st_query_result_t; - - GHashTable *remote_op_list = NULL; -@@ -67,8 +87,8 @@ extern xmlNode *stonith_create_op(int call_id, const char *token, const char *op - int call_options); - - static void report_timeout_period(remote_fencing_op_t * op, int op_timeout); --static int get_op_total_timeout(remote_fencing_op_t * op, st_query_result_t * chosen_peer, -- int default_timeout); -+static int get_op_total_timeout(const remote_fencing_op_t *op, -+ const st_query_result_t *chosen_peer); - - static gint - sort_strings(gconstpointer a, gconstpointer b) -@@ -83,15 +103,126 @@ free_remote_query(gpointer data) - st_query_result_t *query = data; - - crm_trace("Free'ing query result from %s", query->host); -+ g_hash_table_destroy(query->devices); - free(query->host); -- g_list_free_full(query->device_list, free); -- g_hash_table_destroy(query->custom_action_timeouts); -- g_hash_table_destroy(query->delay_maxes); -- g_hash_table_destroy(query->verified_devices); - free(query); - } - } - -+struct peer_count_data { -+ const remote_fencing_op_t *op; -+ gboolean verified_only; -+ int count; -+}; -+ -+/*! -+ * \internal -+ * \brief Increment a counter if a device has not been executed yet -+ * -+ * \param[in] key Device ID (ignored) -+ * \param[in] value Device properties -+ * \param[in] user_data Peer count data -+ */ -+static void -+count_peer_device(gpointer key, gpointer value, gpointer user_data) -+{ -+ device_properties_t *props = (device_properties_t*)value; -+ struct peer_count_data *data = user_data; -+ -+ if (!props->executed[data->op->phase] -+ && (!data->verified_only || props->verified)) { -+ ++(data->count); -+ } -+} -+ -+/*! -+ * \internal -+ * \brief Check the number of available devices in a peer's query results -+ * -+ * \param[in] op Operation that results are for -+ * \param[in] peer Peer to count -+ * \param[in] verified_only Whether to count only verified devices -+ * -+ * \return Number of devices available to peer that were not already executed -+ */ -+static int -+count_peer_devices(const remote_fencing_op_t *op, const st_query_result_t *peer, -+ gboolean verified_only) -+{ -+ struct peer_count_data data; -+ -+ data.op = op; -+ data.verified_only = verified_only; -+ data.count = 0; -+ if (peer) { -+ g_hash_table_foreach(peer->devices, count_peer_device, &data); -+ } -+ return data.count; -+} -+ -+/*! -+ * \internal -+ * \brief Search for a device in a query result -+ * -+ * \param[in] op Operation that result is for -+ * \param[in] peer Query result for a peer -+ * \param[in] device Device ID to search for -+ * -+ * \return Device properties if found, NULL otherwise -+ */ -+static device_properties_t * -+find_peer_device(const remote_fencing_op_t *op, const st_query_result_t *peer, -+ const char *device) -+{ -+ device_properties_t *props = g_hash_table_lookup(peer->devices, device); -+ -+ return (props && !props->executed[op->phase] -+ && !props->disallowed[op->phase])? props : NULL; -+} -+ -+/*! -+ * \internal -+ * \brief Find a device in a peer's device list and mark it as executed -+ * -+ * \param[in] op Operation that peer result is for -+ * \param[in,out] peer Peer with results to search -+ * \param[in] device ID of device to mark as done -+ * \param[in] verified_devices_only Only consider verified devices -+ * -+ * \return TRUE if device was found and marked, FALSE otherwise -+ */ -+static gboolean -+grab_peer_device(const remote_fencing_op_t *op, st_query_result_t *peer, -+ const char *device, gboolean verified_devices_only) -+{ -+ device_properties_t *props = find_peer_device(op, peer, device); -+ -+ if ((props == NULL) || (verified_devices_only && !props->verified)) { -+ return FALSE; -+ } -+ -+ crm_trace("Removing %s from %s (%d remaining)", -+ device, peer->host, count_peer_devices(op, peer, FALSE)); -+ props->executed[op->phase] = TRUE; -+ return TRUE; -+} -+ -+/* -+ * \internal -+ * \brief Free the list of required devices for a particular phase -+ * -+ * \param[in,out] op Operation to modify -+ * \param[in] phase Phase to modify -+ */ -+static void -+free_required_list(remote_fencing_op_t *op, enum st_remap_phase phase) -+{ -+ if (op->required_list[phase]) { -+ g_list_free_full(op->required_list[phase], free); -+ op->required_list[phase] = NULL; -+ } -+} -+ - static void - clear_remote_op_timers(remote_fencing_op_t * op) - { -@@ -137,13 +268,100 @@ free_remote_op(gpointer data) - g_list_free_full(op->devices_list, free); - op->devices_list = NULL; - } -- if (op->required_list) { -- g_list_free_full(op->required_list, free); -- op->required_list = NULL; -- } -+ free_required_list(op, st_phase_requested); -+ free_required_list(op, st_phase_off); -+ free_required_list(op, st_phase_on); - free(op); - } - -+/* -+ * \internal -+ * \brief Return an operation's originally requested action (before any remap) -+ * -+ * \param[in] op Operation to check -+ * -+ * \return Operation's original action -+ */ -+static const char * -+op_requested_action(const remote_fencing_op_t *op) -+{ -+ return ((op->phase > st_phase_requested)? "reboot" : op->action); -+} -+ -+/* -+ * \internal -+ * \brief Remap a "reboot" operation to the "off" phase -+ * -+ * \param[in,out] op Operation to remap -+ */ -+static void -+op_phase_off(remote_fencing_op_t *op) -+{ -+ crm_info("Remapping multiple-device reboot of %s (%s) to off", -+ op->target, op->id); -+ op->phase = st_phase_off; -+ -+ /* Happily, "off" and "on" are shorter than "reboot", so we can reuse the -+ * memory allocation at each phase. -+ */ -+ strcpy(op->action, "off"); -+} -+ -+/*! -+ * \internal -+ * \brief Advance a remapped reboot operation to the "on" phase -+ * -+ * \param[in,out] op Operation to remap -+ */ -+static void -+op_phase_on(remote_fencing_op_t *op) -+{ -+ GListPtr iter = NULL; -+ -+ crm_info("Remapped off of %s complete, remapping to on for %s.%.8s", -+ op->target, op->client_name, op->id); -+ op->phase = st_phase_on; -+ strcpy(op->action, "on"); -+ -+ /* Any devices that are required for "on" will be automatically executed by -+ * the cluster when the node next joins, so we skip them here. -+ */ -+ for (iter = op->required_list[op->phase]; iter != NULL; iter = iter->next) { -+ GListPtr match = g_list_find_custom(op->devices_list, iter->data, -+ sort_strings); -+ -+ if (match) { -+ op->devices_list = g_list_remove(op->devices_list, match->data); -+ } -+ } -+ -+ /* We know this level will succeed, because phase 1 completed successfully -+ * and we ignore any errors from phase 2. So we can free the required list, -+ * which will keep them from being executed after the device list is done. -+ */ -+ free_required_list(op, op->phase); -+ -+ /* Rewind device list pointer */ -+ op->devices = op->devices_list; -+} -+ -+/*! -+ * \internal -+ * \brief Reset a remapped reboot operation -+ * -+ * \param[in,out] op Operation to reset -+ */ -+static void -+undo_op_remap(remote_fencing_op_t *op) -+{ -+ if (op->phase > 0) { -+ crm_info("Undoing remap of reboot of %s for %s.%.8s", -+ op->target, op->client_name, op->id); -+ op->phase = st_phase_requested; -+ strcpy(op->action, "reboot"); -+ } -+} -+ - static xmlNode * - create_op_done_notify(remote_fencing_op_t * op, int rc) - { -@@ -271,6 +489,7 @@ remote_op_done(remote_fencing_op_t * op, xmlNode * data, int rc, int dup) - - op->completed = time(NULL); - clear_remote_op_timers(op); -+ undo_op_remap(op); - - if (op->notify_sent == TRUE) { - crm_err("Already sent notifications for '%s of %s by %s' (for=%s@%s.%.8s, state=%d): %s", -@@ -279,10 +498,12 @@ remote_op_done(remote_fencing_op_t * op, xmlNode * data, int rc, int dup) - goto remote_op_done_cleanup; - } - -- if (!op->delegate && data) { -+ if (!op->delegate && data && rc != -ENODEV && rc != -EHOSTUNREACH) { - xmlNode *ndata = get_xpath_object("//@" F_STONITH_DELEGATE, data, LOG_TRACE); - if(ndata) { - op->delegate = crm_element_value_copy(ndata, F_STONITH_DELEGATE); -+ } else { -+ op->delegate = crm_element_value_copy(data, F_ORIG); - } - } - -@@ -377,6 +598,16 @@ remote_op_timeout(gpointer userdata) - - crm_debug("Action %s (%s) for %s (%s) timed out", - op->action, op->id, op->target, op->client_name); -+ -+ if (op->phase == st_phase_on) { -+ /* A remapped reboot operation timed out in the "on" phase, but the -+ * "off" phase completed successfully, so quit trying any further -+ * devices, and return success. -+ */ -+ remote_op_done(op, NULL, pcmk_ok, FALSE); -+ return FALSE; -+ } -+ - op->state = st_failed; - - remote_op_done(op, NULL, -ETIME, FALSE); -@@ -426,22 +657,43 @@ topology_is_empty(stonith_topology_t *tp) - return TRUE; - } - -+/* -+ * \internal -+ * \brief Add a device to the required list for a particular phase -+ * -+ * \param[in,out] op Operation to modify -+ * \param[in] phase Phase to modify -+ * \param[in] device Device ID to add -+ */ - static void --add_required_device(remote_fencing_op_t * op, const char *device) -+add_required_device(remote_fencing_op_t *op, enum st_remap_phase phase, -+ const char *device) - { -- GListPtr match = g_list_find_custom(op->required_list, device, sort_strings); -- if (match) { -- /* device already marked required */ -- return; -+ GListPtr match = g_list_find_custom(op->required_list[phase], device, -+ sort_strings); -+ -+ if (!match) { -+ op->required_list[phase] = g_list_prepend(op->required_list[phase], -+ strdup(device)); - } -- op->required_list = g_list_prepend(op->required_list, strdup(device)); -+} - -- /* make sure the required devices is in the current list of devices to be executed */ -- if (op->devices_list) { -- GListPtr match = g_list_find_custom(op->devices_list, device, sort_strings); -- if (match == NULL) { -- op->devices_list = g_list_append(op->devices_list, strdup(device)); -- } -+/* -+ * \internal -+ * \brief Remove a device from the required list for the current phase -+ * -+ * \param[in,out] op Operation to modify -+ * \param[in] device Device ID to remove -+ */ -+static void -+remove_required_device(remote_fencing_op_t *op, const char *device) -+{ -+ GListPtr match = g_list_find_custom(op->required_list[op->phase], device, -+ sort_strings); -+ -+ if (match) { -+ op->required_list[op->phase] = g_list_remove(op->required_list[op->phase], -+ match->data); - } - } - -@@ -458,18 +710,6 @@ set_op_device_list(remote_fencing_op_t * op, GListPtr devices) - for (lpc = devices; lpc != NULL; lpc = lpc->next) { - op->devices_list = g_list_append(op->devices_list, strdup(lpc->data)); - } -- -- /* tack on whatever required devices have not been executed -- * to the end of the current devices list. This ensures that -- * the required devices will get executed regardless of what topology -- * level they exist at. */ -- for (lpc = op->required_list; lpc != NULL; lpc = lpc->next) { -- GListPtr match = g_list_find_custom(op->devices_list, lpc->data, sort_strings); -- if (match == NULL) { -- op->devices_list = g_list_append(op->devices_list, strdup(lpc->data)); -- } -- } -- - op->devices = op->devices_list; - } - -@@ -491,6 +731,7 @@ find_topology_for_host(const char *host) - crm_info("Bad regex '%s' for fencing level", tp->node); - } else { - status = regexec(&r_patt, host, 0, NULL, 0); -+ regfree(&r_patt); - } - - if (status == 0) { -@@ -529,6 +770,9 @@ stonith_topology_next(remote_fencing_op_t * op) - - set_bit(op->call_options, st_opt_topology); - -+ /* This is a new level, so undo any remapping left over from previous */ -+ undo_op_remap(op); -+ - do { - op->level++; - -@@ -539,6 +783,15 @@ stonith_topology_next(remote_fencing_op_t * op) - op->level, op->target, g_list_length(tp->levels[op->level]), - op->client_name, op->originator, op->id); - set_op_device_list(op, tp->levels[op->level]); -+ -+ if (g_list_next(op->devices_list) && safe_str_eq(op->action, "reboot")) { -+ /* A reboot has been requested for a topology level with multiple -+ * devices. Instead of rebooting the devices sequentially, we will -+ * turn them all off, then turn them all on again. (Think about -+ * switched power outlets for redundant power supplies.) -+ */ -+ op_phase_off(op); -+ } - return pcmk_ok; - } - -@@ -563,6 +816,7 @@ merge_duplicates(remote_fencing_op_t * op) - g_hash_table_iter_init(&iter, remote_op_list); - while (g_hash_table_iter_next(&iter, NULL, (void **)&other)) { - crm_node_t *peer = NULL; -+ const char *other_action = op_requested_action(other); - - if (other->state > st_exec) { - /* Must be in-progress */ -@@ -570,8 +824,9 @@ merge_duplicates(remote_fencing_op_t * op) - } else if (safe_str_neq(op->target, other->target)) { - /* Must be for the same node */ - continue; -- } else if (safe_str_neq(op->action, other->action)) { -- crm_trace("Must be for the same action: %s vs. ", op->action, other->action); -+ } else if (safe_str_neq(op->action, other_action)) { -+ crm_trace("Must be for the same action: %s vs. %s", -+ op->action, other_action); - continue; - } else if (safe_str_eq(op->client_name, other->client_name)) { - crm_trace("Must be for different clients: %s", op->client_name); -@@ -602,7 +857,7 @@ merge_duplicates(remote_fencing_op_t * op) - if (other->total_timeout == 0) { - crm_trace("Making a best-guess as to the timeout used"); - other->total_timeout = op->total_timeout = -- TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, NULL, op->base_timeout); -+ TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, NULL); - } - crm_notice - ("Merging stonith action %s for node %s originating from client %s.%.8s with identical request from %s@%s.%.8s (%ds)", -@@ -792,16 +1047,16 @@ initiate_remote_stonith_op(crm_client_t * client, xmlNode * request, gboolean ma - op->id, op->state); - } - -- query = stonith_create_op(op->client_callid, op->id, STONITH_OP_QUERY, NULL, 0); -+ query = stonith_create_op(op->client_callid, op->id, STONITH_OP_QUERY, -+ NULL, op->call_options); - - crm_xml_add(query, F_STONITH_REMOTE_OP_ID, op->id); - crm_xml_add(query, F_STONITH_TARGET, op->target); -- crm_xml_add(query, F_STONITH_ACTION, op->action); -+ crm_xml_add(query, F_STONITH_ACTION, op_requested_action(op)); - crm_xml_add(query, F_STONITH_ORIGIN, op->originator); - crm_xml_add(query, F_STONITH_CLIENTID, op->client_id); - crm_xml_add(query, F_STONITH_CLIENTNAME, op->client_name); - crm_xml_add_int(query, F_STONITH_TIMEOUT, op->base_timeout); -- crm_xml_add_int(query, F_STONITH_CALLOPTS, op->call_options); - - send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE); - free_xml(query); -@@ -835,7 +1090,7 @@ find_best_peer(const char *device, remote_fencing_op_t * op, enum find_best_peer - st_query_result_t *peer = iter->data; - - crm_trace("Testing result from %s for %s with %d devices: %d %x", -- peer->host, op->target, peer->devices, peer->tried, options); -+ peer->host, op->target, peer->ndevices, peer->tried, options); - if ((options & FIND_PEER_SKIP_TARGET) && safe_str_eq(peer->host, op->target)) { - continue; - } -@@ -844,25 +1099,13 @@ find_best_peer(const char *device, remote_fencing_op_t * op, enum find_best_peer - } - - if (is_set(op->call_options, st_opt_topology)) { -- /* Do they have the next device of the current fencing level? */ -- GListPtr match = NULL; -- -- if (verified_devices_only && !g_hash_table_lookup(peer->verified_devices, device)) { -- continue; -- } - -- match = g_list_find_custom(peer->device_list, device, sort_strings); -- if (match) { -- crm_trace("Removing %s from %s (%d remaining)", (char *)match->data, peer->host, -- g_list_length(peer->device_list)); -- peer->device_list = g_list_remove(peer->device_list, match->data); -+ if (grab_peer_device(op, peer, device, verified_devices_only)) { - return peer; - } - -- } else if (peer->devices > 0 && peer->tried == FALSE) { -- if (verified_devices_only && !g_hash_table_size(peer->verified_devices)) { -- continue; -- } -+ } else if ((peer->tried == FALSE) -+ && count_peer_devices(op, peer, verified_devices_only)) { - - /* No topology: Use the current best peer */ - crm_trace("Simple fencing"); -@@ -883,11 +1126,14 @@ stonith_choose_peer(remote_fencing_op_t * op) - do { - if (op->devices) { - device = op->devices->data; -- crm_trace("Checking for someone to fence %s with %s", op->target, device); -+ crm_trace("Checking for someone to fence (%s) %s with %s", -+ op->action, op->target, device); - } else { -- crm_trace("Checking for someone to fence %s", op->target); -+ crm_trace("Checking for someone to fence (%s) %s", -+ op->action, op->target); - } - -+ /* Best choice is a peer other than the target with verified access */ - peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET|FIND_PEER_VERIFIED_ONLY); - if (peer) { - crm_trace("Found verified peer %s for %s", peer->host, device?device:""); -@@ -899,62 +1145,101 @@ stonith_choose_peer(remote_fencing_op_t * op) - return NULL; - } - -+ /* If no other peer has verified access, next best is unverified access */ - peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET); - if (peer) { - crm_trace("Found best unverified peer %s", peer->host); - return peer; - } - -- peer = find_best_peer(device, op, FIND_PEER_TARGET_ONLY); -- if(peer) { -- crm_trace("%s will fence itself", peer->host); -- return peer; -+ /* If no other peer can do it, last option is self-fencing -+ * (which is never allowed for the "on" phase of a remapped reboot) -+ */ -+ if (op->phase != st_phase_on) { -+ peer = find_best_peer(device, op, FIND_PEER_TARGET_ONLY); -+ if (peer) { -+ crm_trace("%s will fence itself", peer->host); -+ return peer; -+ } - } - -- /* Try the next fencing level if there is one */ -- } while (is_set(op->call_options, st_opt_topology) -+ /* Try the next fencing level if there is one (unless we're in the "on" -+ * phase of a remapped "reboot", because we ignore errors in that case) -+ */ -+ } while ((op->phase != st_phase_on) -+ && is_set(op->call_options, st_opt_topology) - && stonith_topology_next(op) == pcmk_ok); - -- crm_notice("Couldn't find anyone to fence %s with %s", op->target, device?device:""); -+ crm_notice("Couldn't find anyone to fence (%s) %s with %s", -+ op->action, op->target, (device? device : "any device")); - return NULL; - } - - static int --get_device_timeout(st_query_result_t * peer, const char *device, int default_timeout) -+get_device_timeout(const remote_fencing_op_t *op, const st_query_result_t *peer, -+ const char *device) - { -- gpointer res; -- int delay_max = 0; -+ device_properties_t *props; - - if (!peer || !device) { -- return default_timeout; -+ return op->base_timeout; - } - -- res = g_hash_table_lookup(peer->delay_maxes, device); -- if (res && GPOINTER_TO_INT(res) > 0) { -- delay_max = GPOINTER_TO_INT(res); -+ props = g_hash_table_lookup(peer->devices, device); -+ if (!props) { -+ return op->base_timeout; - } - -- res = g_hash_table_lookup(peer->custom_action_timeouts, device); -+ return (props->custom_action_timeout[op->phase]? -+ props->custom_action_timeout[op->phase] : op->base_timeout) -+ + props->delay_max[op->phase]; -+} - -- return res ? GPOINTER_TO_INT(res) + delay_max : default_timeout + delay_max; -+struct timeout_data { -+ const remote_fencing_op_t *op; -+ const st_query_result_t *peer; -+ int total_timeout; -+}; -+ -+/*! -+ * \internal -+ * \brief Add timeout to a total if device has not been executed yet -+ * -+ * \param[in] key GHashTable key (device ID) -+ * \param[in] value GHashTable value (device properties) -+ * \param[in] user_data Timeout data -+ */ -+static void -+add_device_timeout(gpointer key, gpointer value, gpointer user_data) -+{ -+ const char *device_id = key; -+ device_properties_t *props = value; -+ struct timeout_data *timeout = user_data; -+ -+ if (!props->executed[timeout->op->phase] -+ && !props->disallowed[timeout->op->phase]) { -+ timeout->total_timeout += get_device_timeout(timeout->op, -+ timeout->peer, device_id); -+ } - } - - static int --get_peer_timeout(st_query_result_t * peer, int default_timeout) -+get_peer_timeout(const remote_fencing_op_t *op, const st_query_result_t *peer) - { -- int total_timeout = 0; -+ struct timeout_data timeout; - -- GListPtr cur = NULL; -+ timeout.op = op; -+ timeout.peer = peer; -+ timeout.total_timeout = 0; - -- for (cur = peer->device_list; cur; cur = cur->next) { -- total_timeout += get_device_timeout(peer, cur->data, default_timeout); -- } -+ g_hash_table_foreach(peer->devices, add_device_timeout, &timeout); - -- return total_timeout ? total_timeout : default_timeout; -+ return (timeout.total_timeout? timeout.total_timeout : op->base_timeout); - } - - static int --get_op_total_timeout(remote_fencing_op_t * op, st_query_result_t * chosen_peer, int default_timeout) -+get_op_total_timeout(const remote_fencing_op_t *op, -+ const st_query_result_t *chosen_peer) - { - int total_timeout = 0; - stonith_topology_t *tp = find_topology_for_host(op->target); -@@ -977,11 +1262,11 @@ get_op_total_timeout(remote_fencing_op_t * op, st_query_result_t * chosen_peer, - } - for (device_list = tp->levels[i]; device_list; device_list = device_list->next) { - for (iter = op->query_results; iter != NULL; iter = iter->next) { -- st_query_result_t *peer = iter->data; -+ const st_query_result_t *peer = iter->data; - -- if (g_list_find_custom(peer->device_list, device_list->data, sort_strings)) { -- total_timeout += -- get_device_timeout(peer, device_list->data, default_timeout); -+ if (find_peer_device(op, peer, device_list->data)) { -+ total_timeout += get_device_timeout(op, peer, -+ device_list->data); - break; - } - } /* End Loop3: match device with peer that owns device, find device's timeout period */ -@@ -989,12 +1274,12 @@ get_op_total_timeout(remote_fencing_op_t * op, st_query_result_t * chosen_peer, - } /*End Loop1: iterate through fencing levels */ - - } else if (chosen_peer) { -- total_timeout = get_peer_timeout(chosen_peer, default_timeout); -+ total_timeout = get_peer_timeout(op, chosen_peer); - } else { -- total_timeout = default_timeout; -+ total_timeout = op->base_timeout; - } - -- return total_timeout ? total_timeout : default_timeout; -+ return total_timeout ? total_timeout : op->base_timeout; - } - - static void -@@ -1049,6 +1334,55 @@ report_timeout_period(remote_fencing_op_t * op, int op_timeout) - } - } - -+/* -+ * \internal -+ * \brief Advance an operation to the next device in its topology -+ * -+ * \param[in,out] op Operation to advance -+ * \param[in] device ID of device just completed -+ * \param[in] msg XML reply that contained device result (if available) -+ * \param[in] rc Return code of device's execution -+ */ -+static void -+advance_op_topology(remote_fencing_op_t *op, const char *device, xmlNode *msg, -+ int rc) -+{ -+ /* Advance to the next device at this topology level, if any */ -+ if (op->devices) { -+ op->devices = op->devices->next; -+ } -+ -+ /* If this device was required, it's not anymore */ -+ remove_required_device(op, device); -+ -+ /* If there are no more devices at this topology level, -+ * run through any required devices not already executed -+ */ -+ if (op->devices == NULL) { -+ op->devices = op->required_list[op->phase]; -+ } -+ -+ if ((op->devices == NULL) && (op->phase == st_phase_off)) { -+ /* We're done with this level and with required devices, but we had -+ * remapped "reboot" to "off", so start over with "on". If any devices -+ * need to be turned back on, op->devices will be non-NULL after this. -+ */ -+ op_phase_on(op); -+ } -+ -+ if (op->devices) { -+ /* Necessary devices remain, so execute the next one */ -+ crm_trace("Next for %s on behalf of %s@%s (rc was %d)", -+ op->target, op->originator, op->client_name, rc); -+ call_remote_stonith(op, NULL); -+ } else { -+ /* We're done with all devices and phases, so finalize operation */ -+ crm_trace("Marking complex fencing op for %s as complete", op->target); -+ op->state = st_done; -+ remote_op_done(op, msg, rc, FALSE); -+ } -+} -+ - void - call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer) - { -@@ -1061,7 +1395,7 @@ call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer) - } - - if (!op->op_timer_total) { -- int total_timeout = get_op_total_timeout(op, peer, op->base_timeout); -+ int total_timeout = get_op_total_timeout(op, peer); - - op->total_timeout = TIMEOUT_MULTIPLY_FACTOR * total_timeout; - op->op_timer_total = g_timeout_add(1000 * op->total_timeout, remote_op_timeout, op); -@@ -1071,13 +1405,13 @@ call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer) - } - - if (is_set(op->call_options, st_opt_topology) && op->devices) { -- /* Ignore any preference, they might not have the device we need */ -- /* When using topology, the stonith_choose_peer function pops off -- * the peer from the op's query results. Make sure to calculate -- * the op_timeout before calling this function when topology is in use */ -+ /* Ignore any peer preference, they might not have the device we need */ -+ /* When using topology, stonith_choose_peer() removes the device from -+ * further consideration, so be sure to calculate timeout beforehand */ - peer = stonith_choose_peer(op); -+ - device = op->devices->data; -- timeout = get_device_timeout(peer, device, op->base_timeout); -+ timeout = get_device_timeout(op, peer, device); - } - - if (peer) { -@@ -1094,15 +1428,15 @@ call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer) - crm_xml_add_int(remote_op, F_STONITH_CALLOPTS, op->call_options); - - if (device) { -- timeout_one = -- TIMEOUT_MULTIPLY_FACTOR * get_device_timeout(peer, device, op->base_timeout); -+ timeout_one = TIMEOUT_MULTIPLY_FACTOR * -+ get_device_timeout(op, peer, device); - crm_info("Requesting that %s perform op %s %s with %s for %s (%ds)", peer->host, - op->action, op->target, device, op->client_name, timeout_one); - crm_xml_add(remote_op, F_STONITH_DEVICE, device); - crm_xml_add(remote_op, F_STONITH_MODE, "slave"); - - } else { -- timeout_one = TIMEOUT_MULTIPLY_FACTOR * get_peer_timeout(peer, op->base_timeout); -+ timeout_one = TIMEOUT_MULTIPLY_FACTOR * get_peer_timeout(op, peer); - crm_info("Requesting that %s perform op %s %s for %s (%ds, %ds)", - peer->host, op->action, op->target, op->client_name, timeout_one, stonith_watchdog_timeout_ms); - crm_xml_add(remote_op, F_STONITH_MODE, "smart"); -@@ -1115,16 +1449,18 @@ call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer) - } - - if(stonith_watchdog_timeout_ms > 0 && device && safe_str_eq(device, "watchdog")) { -- crm_notice("Waiting %ds for %s to self-terminate for %s.%.8s (%p)", -- stonith_watchdog_timeout_ms/1000, op->target, op->client_name, op->id, device); -+ crm_notice("Waiting %ds for %s to self-fence (%s) for %s.%.8s (%p)", -+ stonith_watchdog_timeout_ms/1000, op->target, -+ op->action, op->client_name, op->id, device); - op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op); - -- /* TODO: We should probably look into peer->device_list to verify watchdog is going to be in use */ -+ /* TODO check devices to verify watchdog will be in use */ - } else if(stonith_watchdog_timeout_ms > 0 - && safe_str_eq(peer->host, op->target) - && safe_str_neq(op->action, "on")) { -- crm_notice("Waiting %ds for %s to self-terminate for %s.%.8s (%p)", -- stonith_watchdog_timeout_ms/1000, op->target, op->client_name, op->id, device); -+ crm_notice("Waiting %ds for %s to self-fence (%s) for %s.%.8s (%p)", -+ stonith_watchdog_timeout_ms/1000, op->target, -+ op->action, op->client_name, op->id, device); - op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op); - - } else { -@@ -1137,13 +1473,23 @@ call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer) - free_xml(remote_op); - return; - -+ } else if (op->phase == st_phase_on) { -+ /* A remapped "on" cannot be executed, but the node was already -+ * turned off successfully, so ignore the error and continue. -+ */ -+ crm_warn("Ignoring %s 'on' failure (no capable peers) for %s after successful 'off'", -+ device, op->target); -+ advance_op_topology(op, device, NULL, pcmk_ok); -+ return; -+ - } else if (op->owner == FALSE) { -- crm_err("The termination of %s for %s is not ours to control", op->target, op->client_name); -+ crm_err("Fencing (%s) of %s for %s is not ours to control", -+ op->action, op->target, op->client_name); - - } else if (op->query_timer == 0) { - /* We've exhausted all available peers */ -- crm_info("No remaining peers capable of terminating %s for %s (%d)", op->target, -- op->client_name, op->state); -+ crm_info("No remaining peers capable of fencing (%s) %s for %s (%d)", -+ op->target, op->action, op->client_name, op->state); - CRM_LOG_ASSERT(op->state < st_done); - remote_op_timeout(op); - -@@ -1153,33 +1499,37 @@ call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer) - /* if the operation never left the query state, - * but we have all the expected replies, then no devices - * are available to execute the fencing operation. */ -+ - if(stonith_watchdog_timeout_ms && (device == NULL || safe_str_eq(device, "watchdog"))) { -- crm_notice("Waiting %ds for %s to self-terminate for %s.%.8s (%p)", -- stonith_watchdog_timeout_ms/1000, op->target, op->client_name, op->id, device); -+ crm_notice("Waiting %ds for %s to self-fence (%s) for %s.%.8s (%p)", -+ stonith_watchdog_timeout_ms/1000, op->target, -+ op->action, op->client_name, op->id, device); - - op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op); - return; - } - - if (op->state == st_query) { -- crm_info("None of the %d peers have devices capable of terminating %s for %s (%d)", -- op->replies, op->target, op->client_name, op->state); -+ crm_info("None of the %d peers have devices capable of fencing (%s) %s for %s (%d)", -+ op->replies, op->action, op->target, op->client_name, -+ op->state); - - rc = -ENODEV; - } else { -- crm_info("None of the %d peers are capable of terminating %s for %s (%d)", -- op->replies, op->target, op->client_name, op->state); -+ crm_info("None of the %d peers are capable of fencing (%s) %s for %s (%d)", -+ op->replies, op->action, op->target, op->client_name, -+ op->state); - } - - op->state = st_failed; - remote_op_done(op, NULL, rc, FALSE); - - } else if (device) { -- crm_info("Waiting for additional peers capable of terminating %s with %s for %s.%.8s", -- op->target, device, op->client_name, op->id); -+ crm_info("Waiting for additional peers capable of fencing (%s) %s with %s for %s.%.8s", -+ op->action, op->target, device, op->client_name, op->id); - } else { -- crm_info("Waiting for additional peers capable of terminating %s for %s%.8s", -- op->target, op->client_name, op->id); -+ crm_info("Waiting for additional peers capable of fencing (%s) %s for %s%.8s", -+ op->action, op->target, op->client_name, op->id); - } - } - -@@ -1200,7 +1550,7 @@ sort_peers(gconstpointer a, gconstpointer b) - const st_query_result_t *peer_a = a; - const st_query_result_t *peer_b = b; - -- return (peer_b->devices - peer_a->devices); -+ return (peer_b->ndevices - peer_a->ndevices); - } - - /*! -@@ -1212,7 +1562,7 @@ all_topology_devices_found(remote_fencing_op_t * op) - { - GListPtr device = NULL; - GListPtr iter = NULL; -- GListPtr match = NULL; -+ device_properties_t *match = NULL; - stonith_topology_t *tp = NULL; - gboolean skip_target = FALSE; - int i; -@@ -1236,7 +1586,7 @@ all_topology_devices_found(remote_fencing_op_t * op) - if (skip_target && safe_str_eq(peer->host, op->target)) { - continue; - } -- match = g_list_find_custom(peer->device_list, device->data, sort_strings); -+ match = find_peer_device(op, peer, device->data); - } - if (!match) { - return FALSE; -@@ -1247,10 +1597,169 @@ all_topology_devices_found(remote_fencing_op_t * op) - return TRUE; - } - -+/* -+ * \internal -+ * \brief Parse action-specific device properties from XML -+ * -+ * \param[in] msg XML element containing the properties -+ * \param[in] peer Name of peer that sent XML (for logs) -+ * \param[in] device Device ID (for logs) -+ * \param[in] action Action the properties relate to (for logs) -+ * \param[in] phase Phase the properties relate to -+ * \param[in,out] props Device properties to update -+ */ -+static void -+parse_action_specific(xmlNode *xml, const char *peer, const char *device, -+ const char *action, remote_fencing_op_t *op, -+ enum st_remap_phase phase, device_properties_t *props) -+{ -+ int required; -+ -+ props->custom_action_timeout[phase] = 0; -+ crm_element_value_int(xml, F_STONITH_ACTION_TIMEOUT, -+ &props->custom_action_timeout[phase]); -+ if (props->custom_action_timeout[phase]) { -+ crm_trace("Peer %s with device %s returned %s action timeout %d", -+ peer, device, action, props->custom_action_timeout[phase]); -+ } -+ -+ props->delay_max[phase] = 0; -+ crm_element_value_int(xml, F_STONITH_DELAY_MAX, &props->delay_max[phase]); -+ if (props->delay_max[phase]) { -+ crm_trace("Peer %s with device %s returned maximum of random delay %d for %s", -+ peer, device, props->delay_max[phase], action); -+ } -+ -+ required = 0; -+ crm_element_value_int(xml, F_STONITH_DEVICE_REQUIRED, &required); -+ if (required) { -+ /* If the action is marked as required, add the device to the -+ * operation's list of required devices for this phase. We use this -+ * for unfencing when executing a topology. In phase 0 (requested -+ * action) or phase 1 (remapped "off"), required devices get executed -+ * regardless of their topology level; in phase 2 (remapped "on"), -+ * required devices are not attempted, because the cluster will -+ * execute them automatically later. -+ */ -+ crm_trace("Peer %s requires device %s to execute for action %s", -+ peer, device, action); -+ add_required_device(op, phase, device); -+ } -+ -+ /* If a reboot is remapped to off+on, it's possible that a node is allowed -+ * to perform one action but not another. -+ */ -+ if (crm_is_true(crm_element_value(xml, F_STONITH_ACTION_DISALLOWED))) { -+ props->disallowed[phase] = TRUE; -+ crm_trace("Peer %s is disallowed from executing %s for device %s", -+ peer, action, device); -+ } -+} -+ -+/* -+ * \internal -+ * \brief Parse one device's properties from peer's XML query reply -+ * -+ * \param[in] xml XML node containing device properties -+ * \param[in,out] op Operation that query and reply relate to -+ * \param[in,out] result Peer's results -+ * \param[in] device ID of device being parsed -+ */ -+static void -+add_device_properties(xmlNode *xml, remote_fencing_op_t *op, -+ st_query_result_t *result, const char *device) -+{ -+ xmlNode *child; -+ int verified = 0; -+ device_properties_t *props = calloc(1, sizeof(device_properties_t)); -+ -+ /* Add a new entry to this result's devices list */ -+ CRM_ASSERT(props != NULL); -+ g_hash_table_insert(result->devices, strdup(device), props); -+ -+ /* Peers with verified (monitored) access will be preferred */ -+ crm_element_value_int(xml, F_STONITH_DEVICE_VERIFIED, &verified); -+ if (verified) { -+ crm_trace("Peer %s has confirmed a verified device %s", -+ result->host, device); -+ props->verified = TRUE; -+ } -+ -+ /* Parse action-specific device properties */ -+ parse_action_specific(xml, result->host, device, op_requested_action(op), -+ op, st_phase_requested, props); -+ for (child = __xml_first_child(xml); child != NULL; child = __xml_next(child)) { -+ /* Replies for "reboot" operations will include the action-specific -+ * values for "off" and "on" in child elements, just in case the reboot -+ * winds up getting remapped. -+ */ -+ if (safe_str_eq(ID(child), "off")) { -+ parse_action_specific(child, result->host, device, "off", -+ op, st_phase_off, props); -+ } else if (safe_str_eq(ID(child), "on")) { -+ parse_action_specific(child, result->host, device, "on", -+ op, st_phase_on, props); -+ } -+ } -+} -+ -+/* -+ * \internal -+ * \brief Parse a peer's XML query reply and add it to operation's results -+ * -+ * \param[in,out] op Operation that query and reply relate to -+ * \param[in] host Name of peer that sent this reply -+ * \param[in] ndevices Number of devices expected in reply -+ * \param[in] xml XML node containing device list -+ * -+ * \return Newly allocated result structure with parsed reply -+ */ -+static st_query_result_t * -+add_result(remote_fencing_op_t *op, const char *host, int ndevices, xmlNode *xml) -+{ -+ st_query_result_t *result = calloc(1, sizeof(st_query_result_t)); -+ xmlNode *child; -+ -+ CRM_CHECK(result != NULL, return NULL); -+ result->host = strdup(host); -+ result->devices = g_hash_table_new_full(crm_str_hash, g_str_equal, free, free); -+ -+ /* Each child element describes one capable device available to the peer */ -+ for (child = __xml_first_child(xml); child != NULL; child = __xml_next(child)) { -+ const char *device = ID(child); -+ -+ if (device) { -+ add_device_properties(child, op, result, device); -+ } -+ } -+ -+ result->ndevices = g_hash_table_size(result->devices); -+ CRM_CHECK(ndevices == result->ndevices, -+ crm_err("Query claimed to have %d devices but %d found", -+ ndevices, result->ndevices)); -+ -+ op->query_results = g_list_insert_sorted(op->query_results, result, sort_peers); -+ return result; -+} -+ -+/* -+ * \internal -+ * \brief Handle a peer's reply to our fencing query -+ * -+ * Parse a query result from XML and store it in the remote operation -+ * table, and when enough replies have been received, issue a fencing request. -+ * -+ * \param[in] msg XML reply received -+ * -+ * \return pcmk_ok on success, -errno on error -+ * -+ * \note See initiate_remote_stonith_op() for how the XML query was initially -+ * formed, and stonith_query() for how the peer formed its XML reply. -+ */ - int - process_remote_stonith_query(xmlNode * msg) - { -- int devices = 0; -+ int ndevices = 0; - gboolean host_is_target = FALSE; - gboolean have_all_replies = FALSE; - const char *id = NULL; -@@ -1259,7 +1768,6 @@ process_remote_stonith_query(xmlNode * msg) - st_query_result_t *result = NULL; - uint32_t replies_expected; - xmlNode *dev = get_xpath_object("//@" F_STONITH_REMOTE_OP_ID, msg, LOG_ERR); -- xmlNode *child = NULL; - - CRM_CHECK(dev != NULL, return -EPROTO); - -@@ -1268,7 +1776,7 @@ process_remote_stonith_query(xmlNode * msg) - - dev = get_xpath_object("//@" F_STONITH_AVAILABLE_DEVICES, msg, LOG_ERR); - CRM_CHECK(dev != NULL, return -EPROTO); -- crm_element_value_int(dev, F_STONITH_AVAILABLE_DEVICES, &devices); -+ crm_element_value_int(dev, F_STONITH_AVAILABLE_DEVICES, &ndevices); - - op = g_hash_table_lookup(remote_op_list, id); - if (op == NULL) { -@@ -1283,75 +1791,13 @@ process_remote_stonith_query(xmlNode * msg) - host = crm_element_value(msg, F_ORIG); - host_is_target = safe_str_eq(host, op->target); - -- if (devices <= 0) { -- /* If we're doing 'known' then we might need to fire anyway */ -- crm_trace("Query result %d of %d from %s for %s/%s (%d devices) %s", -- op->replies, replies_expected, host, -- op->target, op->action, devices, id); -- if (have_all_replies) { -- crm_info("All query replies have arrived, continuing (%d expected/%d received for id %s)", -- replies_expected, op->replies, id); -- call_remote_stonith(op, NULL); -- } -- return pcmk_ok; -- } -- - crm_info("Query result %d of %d from %s for %s/%s (%d devices) %s", - op->replies, replies_expected, host, -- op->target, op->action, devices, id); -- result = calloc(1, sizeof(st_query_result_t)); -- result->host = strdup(host); -- result->devices = devices; -- result->custom_action_timeouts = g_hash_table_new_full(crm_str_hash, g_str_equal, free, NULL); -- result->delay_maxes = g_hash_table_new_full(crm_str_hash, g_str_equal, free, NULL); -- result->verified_devices = g_hash_table_new_full(crm_str_hash, g_str_equal, free, NULL); -- -- for (child = __xml_first_child(dev); child != NULL; child = __xml_next(child)) { -- const char *device = ID(child); -- int action_timeout = 0; -- int delay_max = 0; -- int verified = 0; -- int required = 0; -- -- if (device) { -- result->device_list = g_list_prepend(result->device_list, strdup(device)); -- crm_element_value_int(child, F_STONITH_ACTION_TIMEOUT, &action_timeout); -- crm_element_value_int(child, F_STONITH_DELAY_MAX, &delay_max); -- crm_element_value_int(child, F_STONITH_DEVICE_VERIFIED, &verified); -- crm_element_value_int(child, F_STONITH_DEVICE_REQUIRED, &required); -- if (action_timeout) { -- crm_trace("Peer %s with device %s returned action timeout %d", -- result->host, device, action_timeout); -- g_hash_table_insert(result->custom_action_timeouts, -- strdup(device), GINT_TO_POINTER(action_timeout)); -- } -- if (delay_max > 0) { -- crm_trace("Peer %s with device %s returned maximum of random delay %d", -- result->host, device, delay_max); -- g_hash_table_insert(result->delay_maxes, -- strdup(device), GINT_TO_POINTER(delay_max)); -- } -- if (verified) { -- crm_trace("Peer %s has confirmed a verified device %s", result->host, device); -- g_hash_table_insert(result->verified_devices, -- strdup(device), GINT_TO_POINTER(verified)); -- } -- if (required) { -- crm_trace("Peer %s requires device %s to execute for action %s", -- result->host, device, op->action); -- /* This matters when executing a topology. Required devices will get -- * executed regardless of their topology level. We use this for unfencing. */ -- add_required_device(op, device); -- } -- } -+ op->target, op->action, ndevices, id); -+ if (ndevices > 0) { -+ result = add_result(op, host, ndevices, dev); - } - -- CRM_CHECK(devices == g_list_length(result->device_list), -- crm_err("Mis-match: Query claimed to have %d devices but %d found", devices, -- g_list_length(result->device_list))); -- -- op->query_results = g_list_insert_sorted(op->query_results, result, sort_peers); -- - if (is_set(op->call_options, st_opt_topology)) { - /* If we start the fencing before all the topology results are in, - * it is possible fencing levels will be skipped because of the missing -@@ -1368,11 +1814,13 @@ process_remote_stonith_query(xmlNode * msg) - } - - } else if (op->state == st_query) { -+ int nverified = count_peer_devices(op, result, TRUE); -+ - /* We have a result for a non-topology fencing op that looks promising, - * go ahead and start fencing before query timeout */ -- if (host_is_target == FALSE && g_hash_table_size(result->verified_devices)) { -+ if (result && (host_is_target == FALSE) && nverified) { - /* we have a verified device living on a peer that is not the target */ -- crm_trace("Found %d verified devices", g_hash_table_size(result->verified_devices)); -+ crm_trace("Found %d verified devices", nverified); - call_remote_stonith(op, result); - - } else if (have_all_replies) { -@@ -1384,14 +1832,25 @@ process_remote_stonith_query(xmlNode * msg) - crm_trace("Waiting for more peer results before launching fencing operation"); - } - -- } else if (op->state == st_done) { -+ } else if (result && (op->state == st_done)) { - crm_info("Discarding query result from %s (%d devices): Operation is in state %d", -- result->host, result->devices, op->state); -+ result->host, result->ndevices, op->state); - } - - return pcmk_ok; - } - -+/* -+ * \internal -+ * \brief Handle a peer's reply to a fencing request -+ * -+ * Parse a fencing reply from XML, and either finalize the operation -+ * or attempt another device as appropriate. -+ * -+ * \param[in] msg XML reply received -+ * -+ * \return pcmk_ok on success, -errno on error -+ */ - int - process_remote_stonith_exec(xmlNode * msg) - { -@@ -1472,26 +1931,20 @@ process_remote_stonith_exec(xmlNode * msg) - return rc; - } - -- /* An operation completed succesfully but has not yet been marked as done. -- * Continue the topology if more devices exist at the current level, otherwise -- * mark as done. */ -+ if ((op->phase == 2) && (rc != pcmk_ok)) { -+ /* A remapped "on" failed, but the node was already turned off -+ * successfully, so ignore the error and continue. -+ */ -+ crm_warn("Ignoring %s 'on' failure (exit code %d) for %s after successful 'off'", -+ device, rc, op->target); -+ rc = pcmk_ok; -+ } -+ - if (rc == pcmk_ok) { -- GListPtr required_match = g_list_find_custom(op->required_list, device, sort_strings); -- if (op->devices) { -- /* Success, are there any more? */ -- op->devices = op->devices->next; -- } -- if (required_match) { -- op->required_list = g_list_remove(op->required_list, required_match->data); -- } -- /* if no more devices at this fencing level, we are done, -- * else we need to contine with executing the next device in the list */ -- if (op->devices == NULL) { -- crm_trace("Marking complex fencing op for %s as complete", op->target); -- op->state = st_done; -- remote_op_done(op, msg, rc, FALSE); -- return rc; -- } -+ /* An operation completed successfully. Try another device if -+ * necessary, otherwise mark the operation as done. */ -+ advance_op_topology(op, device, msg, rc); -+ return rc; - } else { - /* This device failed, time to try another topology level. If no other - * levels are available, mark this operation as failed and report results. */ -@@ -1516,7 +1969,7 @@ process_remote_stonith_exec(xmlNode * msg) - /* fall-through and attempt other fencing action using another peer */ - } - -- /* Retry on failure or execute the rest of the topology */ -+ /* Retry on failure */ - crm_trace("Next for %s on behalf of %s@%s (rc was %d)", op->target, op->originator, - op->client_name, rc); - call_remote_stonith(op, NULL); -@@ -1595,6 +2048,9 @@ stonith_check_fence_tolerance(int tolerance, const char *target, const char *act - continue; - } else if (rop->state != st_done) { - continue; -+ /* We don't have to worry about remapped reboots here -+ * because if state is done, any remapping has been undone -+ */ - } else if (strcmp(rop->action, action) != 0) { - continue; - } else if ((rop->completed + tolerance) < now) { -diff --git a/include/crm/fencing/internal.h b/include/crm/fencing/internal.h -index a6f58b1..a59151b 100644 ---- a/include/crm/fencing/internal.h -+++ b/include/crm/fencing/internal.h -@@ -63,6 +63,8 @@ xmlNode *create_device_registration_xml(const char *id, const char *namespace, c - # define F_STONITH_TOLERANCE "st_tolerance" - /*! Action specific timeout period returned in query of fencing devices. */ - # define F_STONITH_ACTION_TIMEOUT "st_action_timeout" -+/*! Host in query result is not allowed to run this action */ -+# define F_STONITH_ACTION_DISALLOWED "st_action_disallowed" - /*! Maximum of random fencing delay for a device */ - # define F_STONITH_DELAY_MAX "st_delay_max" - /*! Has this device been verified using a monitor type -diff --git a/include/crm/lrmd.h b/include/crm/lrmd.h -index e3a0d63..730cad3 100644 ---- a/include/crm/lrmd.h -+++ b/include/crm/lrmd.h -@@ -200,8 +200,6 @@ typedef struct lrmd_event_data_s { - enum ocf_exitcode rc; - /*! The lrmd status returned for exec_complete events */ - int op_status; -- /*! exit failure reason string from resource agent operation */ -- const char *exit_reason; - /*! stdout from resource agent operation */ - const char *output; - /*! Timestamp of when op ran */ -@@ -226,6 +224,9 @@ typedef struct lrmd_event_data_s { - * to the proper client. */ - const char *remote_nodename; - -+ /*! exit failure reason string from resource agent operation */ -+ const char *exit_reason; -+ - } lrmd_event_data_t; - - lrmd_event_data_t *lrmd_copy_event(lrmd_event_data_t * event); -diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h -index 4bfa3fe..4214959 100644 ---- a/include/crm/pengine/status.h -+++ b/include/crm/pengine/status.h -@@ -137,10 +137,6 @@ struct node_shared_s { - gboolean shutdown; - gboolean expected_up; - gboolean is_dc; -- gboolean rsc_discovery_enabled; -- -- gboolean remote_requires_reset; -- gboolean remote_was_fenced; - - int num_resources; - GListPtr running_rsc; /* resource_t* */ -@@ -157,14 +153,17 @@ struct node_shared_s { - GHashTable *digest_cache; - - gboolean maintenance; -+ gboolean rsc_discovery_enabled; -+ gboolean remote_requires_reset; -+ gboolean remote_was_fenced; - }; - - struct node_s { - int weight; - gboolean fixed; -- int rsc_discover_mode; - int count; - struct node_shared_s *details; -+ int rsc_discover_mode; - }; - - # include -@@ -262,7 +261,6 @@ struct resource_s { - int migration_threshold; - - gboolean is_remote_node; -- gboolean exclusive_discover; - - unsigned long long flags; - -@@ -296,6 +294,7 @@ struct resource_s { - char *pending_task; - - const char *isolation_wrapper; -+ gboolean exclusive_discover; - }; - - struct pe_action_s { -diff --git a/lib/cib/cib_ops.c b/lib/cib/cib_ops.c -index 5f73559..8966ae2 100644 ---- a/lib/cib/cib_ops.c -+++ b/lib/cib/cib_ops.c -@@ -373,7 +373,10 @@ cib_process_modify(const char *op, int options, const char *section, xmlNode * r - - for (lpc = 0; lpc < max; lpc++) { - xmlNode *match = getXpathResult(xpathObj, lpc); -- crm_debug("Destroying %s", (char *)xmlGetNodePath(match)); -+ xmlChar *match_path = xmlGetNodePath(match); -+ -+ crm_debug("Destroying %s", match_path); -+ free(match_path); - free_xml(match); - } - -diff --git a/lib/cib/cib_utils.c b/lib/cib/cib_utils.c -index 28b8e81..d321517 100644 ---- a/lib/cib/cib_utils.c -+++ b/lib/cib/cib_utils.c -@@ -533,7 +533,7 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer - int current_schema = get_schema_version(schema); - - if (minimum_schema == 0) { -- minimum_schema = get_schema_version("pacemaker-1.1"); -+ minimum_schema = get_schema_version("pacemaker-1.2"); - } - - /* Does the CIB support the "update-*" attributes... */ -diff --git a/lib/cluster/membership.c b/lib/cluster/membership.c -index 28f41cb..b7958eb 100644 ---- a/lib/cluster/membership.c -+++ b/lib/cluster/membership.c -@@ -734,6 +734,14 @@ crm_update_peer_proc(const char *source, crm_node_t * node, uint32_t flag, const - if (crm_status_callback) { - crm_status_callback(crm_status_processes, node, &last); - } -+ -+ /* The client callback shouldn't touch the peer caches, -+ * but as a safety net, bail if the peer cache was destroyed. -+ */ -+ if (crm_peer_cache == NULL) { -+ return NULL; -+ } -+ - if (crm_autoreap) { - node = crm_update_peer_state(__FUNCTION__, node, - is_set(node->processes, crm_get_cluster_proc())? -diff --git a/lib/common/Makefile.am b/lib/common/Makefile.am -index f5c0766..a593f40 100644 ---- a/lib/common/Makefile.am -+++ b/lib/common/Makefile.am -@@ -37,7 +37,7 @@ if BUILD_CIBSECRETS - libcrmcommon_la_SOURCES += cib_secrets.c - endif - --libcrmcommon_la_LDFLAGS = -version-info 8:0:5 -+libcrmcommon_la_LDFLAGS = -version-info 7:0:4 - libcrmcommon_la_LIBADD = @LIBADD_DL@ $(GNUTLSLIBS) - libcrmcommon_la_SOURCES += $(top_builddir)/lib/gnu/md5.c - -diff --git a/lib/common/xml.c b/lib/common/xml.c -index e272049..8eed245 100644 ---- a/lib/common/xml.c -+++ b/lib/common/xml.c -@@ -3430,12 +3430,18 @@ dump_xml_attr(xmlAttrPtr attr, int options, char **buffer, int *offset, int *max - { - char *p_value = NULL; - const char *p_name = NULL; -+ xml_private_t *p = NULL; - - CRM_ASSERT(buffer != NULL); - if (attr == NULL || attr->children == NULL) { - return; - } - -+ p = attr->_private; -+ if (p && is_set(p->flags, xpf_deleted)) { -+ return; -+ } -+ - p_name = (const char *)attr->name; - p_value = crm_xml_escape((const char *)attr->children->content); - buffer_print(*buffer, *max, *offset, " %s=\"%s\"", p_name, p_value); -@@ -3812,6 +3818,10 @@ dump_xml_comment(xmlNode * data, int options, char **buffer, int *offset, int *m - void - crm_xml_dump(xmlNode * data, int options, char **buffer, int *offset, int *max, int depth) - { -+ if(data == NULL) { -+ *offset = 0; -+ *max = 0; -+ } - #if 0 - if (is_not_set(options, xml_log_option_filtered)) { - /* Turning this code on also changes the PE tests for some reason -@@ -4564,6 +4574,8 @@ subtract_xml_object(xmlNode * parent, xmlNode * left, xmlNode * right, - /* changes to name/value pairs */ - for (xIter = crm_first_attr(left); xIter != NULL; xIter = xIter->next) { - const char *prop_name = (const char *)xIter->name; -+ xmlAttrPtr right_attr = NULL; -+ xml_private_t *p = NULL; - - if (strcmp(prop_name, XML_ATTR_ID) == 0) { - continue; -@@ -4582,8 +4594,13 @@ subtract_xml_object(xmlNode * parent, xmlNode * left, xmlNode * right, - continue; - } - -+ right_attr = xmlHasProp(right, (const xmlChar *)prop_name); -+ if (right_attr) { -+ p = right_attr->_private; -+ } -+ - right_val = crm_element_value(right, prop_name); -- if (right_val == NULL) { -+ if (right_val == NULL || (p && is_set(p->flags, xpf_deleted))) { - /* new */ - *changed = TRUE; - if (full) { -diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c -index 80f0064..67114c2 100644 ---- a/lib/fencing/st_client.c -+++ b/lib/fencing/st_client.c -@@ -1100,57 +1100,62 @@ stonith_api_device_metadata(stonith_t * stonith, int call_options, const char *a - if (safe_str_eq(provider, "redhat")) { - stonith_action_t *action = stonith_action_create(agent, "metadata", NULL, 0, 5, NULL, NULL); - int exec_rc = stonith_action_execute(action, &rc, &buffer); -+ xmlNode *xml = NULL; -+ xmlNode *actions = NULL; -+ xmlXPathObject *xpathObj = NULL; - - if (exec_rc < 0 || rc != 0 || buffer == NULL) { -+ crm_warn("Could not obtain metadata for %s", agent); - crm_debug("Query failed: %d %d: %s", exec_rc, rc, crm_str(buffer)); - free(buffer); /* Just in case */ - return -EINVAL; -+ } - -- } else { -- -- xmlNode *xml = string2xml(buffer); -- xmlNode *actions = NULL; -- xmlXPathObject *xpathObj = NULL; -+ xml = string2xml(buffer); -+ if(xml == NULL) { -+ crm_warn("Metadata for %s is invalid", agent); -+ free(buffer); -+ return -EINVAL; -+ } - -- xpathObj = xpath_search(xml, "//actions"); -- if (numXpathResults(xpathObj) > 0) { -- actions = getXpathResult(xpathObj, 0); -- } -+ xpathObj = xpath_search(xml, "//actions"); -+ if (numXpathResults(xpathObj) > 0) { -+ actions = getXpathResult(xpathObj, 0); -+ } - -- freeXpathObject(xpathObj); -+ freeXpathObject(xpathObj); - -- /* Now fudge the metadata so that the start/stop actions appear */ -- xpathObj = xpath_search(xml, "//action[@name='stop']"); -- if (numXpathResults(xpathObj) <= 0) { -- xmlNode *tmp = NULL; -+ /* Now fudge the metadata so that the start/stop actions appear */ -+ xpathObj = xpath_search(xml, "//action[@name='stop']"); -+ if (numXpathResults(xpathObj) <= 0) { -+ xmlNode *tmp = NULL; - -- tmp = create_xml_node(actions, "action"); -- crm_xml_add(tmp, "name", "stop"); -- crm_xml_add(tmp, "timeout", "20s"); -+ tmp = create_xml_node(actions, "action"); -+ crm_xml_add(tmp, "name", "stop"); -+ crm_xml_add(tmp, "timeout", "20s"); - -- tmp = create_xml_node(actions, "action"); -- crm_xml_add(tmp, "name", "start"); -- crm_xml_add(tmp, "timeout", "20s"); -- } -+ tmp = create_xml_node(actions, "action"); -+ crm_xml_add(tmp, "name", "start"); -+ crm_xml_add(tmp, "timeout", "20s"); -+ } - -- freeXpathObject(xpathObj); -+ freeXpathObject(xpathObj); - -- /* Now fudge the metadata so that the port isn't required in the configuration */ -- xpathObj = xpath_search(xml, "//parameter[@name='port']"); -- if (numXpathResults(xpathObj) > 0) { -- /* We'll fill this in */ -- xmlNode *tmp = getXpathResult(xpathObj, 0); -+ /* Now fudge the metadata so that the port isn't required in the configuration */ -+ xpathObj = xpath_search(xml, "//parameter[@name='port']"); -+ if (numXpathResults(xpathObj) > 0) { -+ /* We'll fill this in */ -+ xmlNode *tmp = getXpathResult(xpathObj, 0); - -- crm_xml_add(tmp, "required", "0"); -- } -+ crm_xml_add(tmp, "required", "0"); -+ } - -- freeXpathObject(xpathObj); -- free(buffer); -- buffer = dump_xml_formatted(xml); -- free_xml(xml); -- if (!buffer) { -- return -EINVAL; -- } -+ freeXpathObject(xpathObj); -+ free(buffer); -+ buffer = dump_xml_formatted(xml); -+ free_xml(xml); -+ if (!buffer) { -+ return -EINVAL; - } - - } else { -@@ -1280,7 +1285,10 @@ stonith_api_query(stonith_t * stonith, int call_options, const char *target, - - CRM_LOG_ASSERT(match != NULL); - if(match != NULL) { -- crm_info("%s[%d] = %s", "//@agent", lpc, xmlGetNodePath(match)); -+ xmlChar *match_path = xmlGetNodePath(match); -+ -+ crm_info("%s[%d] = %s", "//@agent", lpc, match_path); -+ free(match_path); - *devices = stonith_key_value_add(*devices, NULL, crm_element_value(match, XML_ATTR_ID)); - } - } -diff --git a/lib/lrmd/Makefile.am b/lib/lrmd/Makefile.am -index e98d1e5..f961ae1 100644 ---- a/lib/lrmd/Makefile.am -+++ b/lib/lrmd/Makefile.am -@@ -25,7 +25,7 @@ AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include \ - lib_LTLIBRARIES = liblrmd.la - - liblrmd_la_SOURCES = lrmd_client.c proxy_common.c --liblrmd_la_LDFLAGS = -version-info 3:0:0 -+liblrmd_la_LDFLAGS = -version-info 3:0:2 - liblrmd_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la \ - $(top_builddir)/lib/services/libcrmservice.la \ - $(top_builddir)/lib/fencing/libstonithd.la -diff --git a/lib/pengine/Makefile.am b/lib/pengine/Makefile.am -index 29b7206..78da075 100644 ---- a/lib/pengine/Makefile.am -+++ b/lib/pengine/Makefile.am -@@ -30,7 +30,7 @@ libpe_rules_la_LDFLAGS = -version-info 2:4:0 - libpe_rules_la_SOURCES = rules.c common.c - libpe_rules_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la - --libpe_status_la_LDFLAGS = -version-info 8:0:0 -+libpe_status_la_LDFLAGS = -version-info 8:0:4 - libpe_status_la_SOURCES = status.c unpack.c utils.c complex.c native.c group.c clone.c rules.c common.c - libpe_status_la_LIBADD = @CURSESLIBS@ $(top_builddir)/lib/common/libcrmcommon.la - -diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c -index 73c44a8..106c674 100644 ---- a/lib/pengine/unpack.c -+++ b/lib/pengine/unpack.c -@@ -2834,8 +2834,9 @@ static bool check_operation_expiry(resource_t *rsc, node_t *node, int rc, xmlNod - - node_t *remote_node = pe_find_node(data_set->nodes, rsc->id); - if (remote_node && remote_node->details->remote_was_fenced == 0) { -- -- crm_info("Waiting to clear monitor failure for remote node %s until fencing has occured", rsc->id); -+ if (strstr(ID(xml_op), "last_failure")) { -+ crm_info("Waiting to clear monitor failure for remote node %s until fencing has occured", rsc->id); -+ } - /* disabling failure timeout for this operation because we believe - * fencing of the remote node should occur first. */ - failure_timeout = 0; -@@ -2866,6 +2867,9 @@ static bool check_operation_expiry(resource_t *rsc, node_t *node, int rc, xmlNod - } else { - expired = FALSE; - } -+ } else if (rsc->remote_reconnect_interval && strstr(ID(xml_op), "last_failure")) { -+ /* always clear last failure when reconnect interval is set */ -+ clear_failcount = 1; - } - } - -diff --git a/lib/services/pcmk-dbus.h b/lib/services/pcmk-dbus.h -index afb8a2a..b9a713b 100644 ---- a/lib/services/pcmk-dbus.h -+++ b/lib/services/pcmk-dbus.h -@@ -1,3 +1,7 @@ -+#ifndef DBUS_TIMEOUT_USE_DEFAULT -+# define DBUS_TIMEOUT_USE_DEFAULT -1 -+#endif -+ - DBusConnection *pcmk_dbus_connect(void); - void pcmk_dbus_connection_setup_with_select(DBusConnection *c); - void pcmk_dbus_disconnect(DBusConnection *connection); -diff --git a/lrmd/lrmd.c b/lrmd/lrmd.c -index bd4d33e..0cf98cc 100644 ---- a/lrmd/lrmd.c -+++ b/lrmd/lrmd.c -@@ -219,6 +219,7 @@ free_lrmd_cmd(lrmd_cmd_t * cmd) - } - free(cmd->origin); - free(cmd->action); -+ free(cmd->real_action); - free(cmd->userdata_str); - free(cmd->rsc_id); - free(cmd->output); -diff --git a/pacemaker.spec.in b/pacemaker.spec.in -index 0e3200f..2dfb4a6 100644 ---- a/pacemaker.spec.in -+++ b/pacemaker.spec.in -@@ -54,7 +54,7 @@ - - Name: pacemaker - Summary: Scalable High-Availability cluster resource manager --Version: 1.1.11 -+Version: 1.1.13 - Release: %{pcmk_release}%{?dist} - License: GPLv2+ and LGPLv2+ - Url: http://www.clusterlabs.org -diff --git a/pengine/Makefile.am b/pengine/Makefile.am -index d14d911..31532cf 100644 ---- a/pengine/Makefile.am -+++ b/pengine/Makefile.am -@@ -61,7 +61,7 @@ endif - noinst_HEADERS = allocate.h utils.h pengine.h - #utils.h pengine.h - --libpengine_la_LDFLAGS = -version-info 8:0:0 -+libpengine_la_LDFLAGS = -version-info 8:0:4 - # -L$(top_builddir)/lib/pils -lpils -export-dynamic -module -avoid-version - libpengine_la_SOURCES = pengine.c allocate.c utils.c constraints.c - libpengine_la_SOURCES += native.c group.c clone.c master.c graph.c utilization.c -diff --git a/pengine/allocate.c b/pengine/allocate.c -index 4b6fca1..68cafd4 100644 ---- a/pengine/allocate.c -+++ b/pengine/allocate.c -@@ -1681,10 +1681,38 @@ apply_remote_node_ordering(pe_working_set_t *data_set) - resource_t *remote_rsc = NULL; - resource_t *container = NULL; - -+ if (action->rsc == NULL) { -+ continue; -+ } -+ -+ /* Special case. */ -+ if (action->rsc && -+ action->rsc->is_remote_node && -+ safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) { -+ -+ /* if we are clearing the failcount of an actual remote node connect -+ * resource, then make sure this happens before allowing the connection -+ * to start if we are planning on starting the connection during this -+ * transition */ -+ custom_action_order(action->rsc, -+ NULL, -+ action, -+ action->rsc, -+ generate_op_key(action->rsc->id, RSC_START, 0), -+ NULL, -+ pe_order_optional, -+ data_set); -+ -+ continue; -+ } -+ -+ /* detect if the action occurs on a remote node. if so create -+ * ordering constraints that guarantee the action occurs while -+ * the remote node is active (after start, before stop...) things -+ * like that */ - if (action->node == NULL || - is_remote_node(action->node) == FALSE || - action->node->details->remote_rsc == NULL || -- action->rsc == NULL || - is_set(action->flags, pe_action_pseudo)) { - continue; - } -diff --git a/pengine/regression.sh b/pengine/regression.sh -index d57da17..d184798 100755 ---- a/pengine/regression.sh -+++ b/pengine/regression.sh -@@ -566,6 +566,8 @@ do_test colocated-utilization-primitive-2 "Colocated Utilization - Choose the mo - do_test colocated-utilization-group "Colocated Utilization - Group" - do_test colocated-utilization-clone "Colocated Utilization - Clone" - -+do_test utilization-check-allowed-nodes "Only check the capacities of the nodes that can run the resource" -+ - echo "" - do_test reprobe-target_rc "Ensure correct target_rc for reprobe of inactive resources" - do_test node-maintenance-1 "cl#5128 - Node maintenance" -diff --git a/pengine/test10/utilization-check-allowed-nodes.dot b/pengine/test10/utilization-check-allowed-nodes.dot -new file mode 100644 -index 0000000..d09efbc ---- /dev/null -+++ b/pengine/test10/utilization-check-allowed-nodes.dot -@@ -0,0 +1,19 @@ -+digraph "g" { -+"load_stopped_node1 node1" [ style=bold color="green" fontcolor="orange"] -+"load_stopped_node2 node2" [ style=bold color="green" fontcolor="orange"] -+"probe_complete node1" -> "probe_complete" [ style = bold] -+"probe_complete node1" [ style=bold color="green" fontcolor="black"] -+"probe_complete node2" -> "probe_complete" [ style = bold] -+"probe_complete node2" [ style=bold color="green" fontcolor="black"] -+"probe_complete" -> "rsc1_start_0 node2" [ style = bold] -+"probe_complete" [ style=bold color="green" fontcolor="orange"] -+"rsc1_monitor_0 node1" -> "probe_complete node1" [ style = bold] -+"rsc1_monitor_0 node1" [ style=bold color="green" fontcolor="black"] -+"rsc1_monitor_0 node2" -> "probe_complete node2" [ style = bold] -+"rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black"] -+"rsc1_start_0 node2" [ style=bold color="green" fontcolor="black"] -+"rsc2_monitor_0 node1" -> "probe_complete node1" [ style = bold] -+"rsc2_monitor_0 node1" [ style=bold color="green" fontcolor="black"] -+"rsc2_monitor_0 node2" -> "probe_complete node2" [ style = bold] -+"rsc2_monitor_0 node2" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/pengine/test10/utilization-check-allowed-nodes.exp b/pengine/test10/utilization-check-allowed-nodes.exp -new file mode 100644 -index 0000000..134ccb3 ---- /dev/null -+++ b/pengine/test10/utilization-check-allowed-nodes.exp -@@ -0,0 +1,112 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/utilization-check-allowed-nodes.scores b/pengine/test10/utilization-check-allowed-nodes.scores -new file mode 100644 -index 0000000..26887e2 ---- /dev/null -+++ b/pengine/test10/utilization-check-allowed-nodes.scores -@@ -0,0 +1,5 @@ -+Allocation scores: -+native_color: rsc1 allocation score on node1: -INFINITY -+native_color: rsc1 allocation score on node2: 0 -+native_color: rsc2 allocation score on node1: -INFINITY -+native_color: rsc2 allocation score on node2: 0 -diff --git a/pengine/test10/utilization-check-allowed-nodes.summary b/pengine/test10/utilization-check-allowed-nodes.summary -new file mode 100644 -index 0000000..12bf19a ---- /dev/null -+++ b/pengine/test10/utilization-check-allowed-nodes.summary -@@ -0,0 +1,26 @@ -+ -+Current cluster status: -+Online: [ node1 node2 ] -+ -+ rsc1 (ocf::pacemaker:Dummy): Stopped -+ rsc2 (ocf::pacemaker:Dummy): Stopped -+ -+Transition Summary: -+ * Start rsc1 (node2) -+ -+Executing cluster transition: -+ * Resource action: rsc1 monitor on node2 -+ * Resource action: rsc1 monitor on node1 -+ * Resource action: rsc2 monitor on node2 -+ * Resource action: rsc2 monitor on node1 -+ * Pseudo action: probe_complete -+ * Pseudo action: load_stopped_node1 -+ * Pseudo action: load_stopped_node2 -+ * Resource action: rsc1 start on node2 -+ -+Revised cluster status: -+Online: [ node1 node2 ] -+ -+ rsc1 (ocf::pacemaker:Dummy): Started node2 -+ rsc2 (ocf::pacemaker:Dummy): Stopped -+ -diff --git a/pengine/test10/utilization-check-allowed-nodes.xml b/pengine/test10/utilization-check-allowed-nodes.xml -new file mode 100644 -index 0000000..39cf51f ---- /dev/null -+++ b/pengine/test10/utilization-check-allowed-nodes.xml -@@ -0,0 +1,39 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/utilization.c b/pengine/utilization.c -index 982fcc9..db41b21 100644 ---- a/pengine/utilization.c -+++ b/pengine/utilization.c -@@ -344,9 +344,10 @@ process_utilization(resource_t * rsc, node_t ** prefer, pe_working_set_t * data_ - int alloc_details = scores_log_level + 1; - - if (safe_str_neq(data_set->placement_strategy, "default")) { -- GListPtr gIter = NULL; -+ GHashTableIter iter; - GListPtr colocated_rscs = NULL; - gboolean any_capable = FALSE; -+ node_t *node = NULL; - - colocated_rscs = find_colocated_rscs(colocated_rscs, rsc, rsc); - if (colocated_rscs) { -@@ -356,8 +357,11 @@ process_utilization(resource_t * rsc, node_t ** prefer, pe_working_set_t * data_ - - unallocated_utilization = sum_unallocated_utilization(rsc, colocated_rscs); - -- for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { -- node_t *node = (node_t *) gIter->data; -+ g_hash_table_iter_init(&iter, rsc->allowed_nodes); -+ while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { -+ if (can_run_resources(node) == FALSE || node->weight < 0) { -+ continue; -+ } - - if (have_enough_capacity(node, rscs_id, unallocated_utilization)) { - any_capable = TRUE; -@@ -371,8 +375,11 @@ process_utilization(resource_t * rsc, node_t ** prefer, pe_working_set_t * data_ - } - - if (any_capable) { -- for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { -- node_t *node = (node_t *) gIter->data; -+ g_hash_table_iter_init(&iter, rsc->allowed_nodes); -+ while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { -+ if (can_run_resources(node) == FALSE || node->weight < 0) { -+ continue; -+ } - - if (have_enough_capacity(node, rscs_id, unallocated_utilization) == FALSE) { - pe_rsc_debug(rsc, "Resource %s and its colocated resources cannot be allocated to node %s: no enough capacity", -@@ -394,8 +401,11 @@ process_utilization(resource_t * rsc, node_t ** prefer, pe_working_set_t * data_ - } - - if (any_capable == FALSE) { -- for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) { -- node_t *node = (node_t *) gIter->data; -+ g_hash_table_iter_init(&iter, rsc->allowed_nodes); -+ while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) { -+ if (can_run_resources(node) == FALSE || node->weight < 0) { -+ continue; -+ } - - if (have_enough_capacity(node, rsc->id, rsc->utilization) == FALSE) { - pe_rsc_debug(rsc, "Resource %s cannot be allocated to node %s: no enough capacity", -diff --git a/tools/fake_transition.c b/tools/fake_transition.c -index e8c37f7..fe5de95 100644 ---- a/tools/fake_transition.c -+++ b/tools/fake_transition.c -@@ -65,11 +65,14 @@ inject_transient_attr(xmlNode * cib_node, const char *name, const char *value) - xmlNode *attrs = NULL; - xmlNode *container = NULL; - xmlNode *nvp = NULL; -+ xmlChar *node_path; - const char *node_uuid = ID(cib_node); - char *nvp_id = crm_concat(name, node_uuid, '-'); - -- quiet_log("Injecting attribute %s=%s into %s '%s'", name, value, xmlGetNodePath(cib_node), -+ node_path = xmlGetNodePath(cib_node); -+ quiet_log("Injecting attribute %s=%s into %s '%s'", name, value, node_path, - ID(cib_node)); -+ free(node_path); - - attrs = first_named_child(cib_node, XML_TAG_TRANSIENT_NODEATTRS); - if (attrs == NULL) { -diff --git a/valgrind-pcmk.suppressions b/valgrind-pcmk.suppressions -index e7caa55..2e382df 100644 ---- a/valgrind-pcmk.suppressions -+++ b/valgrind-pcmk.suppressions -@@ -20,6 +20,15 @@ - } - - { -+ Another bash leak -+ Memcheck:Leak -+ fun:malloc -+ fun:xmalloc -+ fun:set_default_locale -+ fun:main -+} -+ -+{ - Ignore option parsing - Memcheck:Leak - fun:realloc -@@ -294,4 +303,4 @@ - obj:*/libgobject-* - fun:call_init.part.0 - fun:_dl_init --} -\ No newline at end of file -+} -diff --git a/version.m4 b/version.m4 -index 22faf65..3d5e96b 100644 ---- a/version.m4 -+++ b/version.m4 -@@ -1 +1 @@ --m4_define([VERSION_NUMBER], [1.1.12]) -+m4_define([VERSION_NUMBER], [1.1.13]) diff --git a/SOURCES/pacemaker-rollup-3a7715d.patch b/SOURCES/pacemaker-rollup-3a7715d.patch deleted file mode 100644 index 6b1935c..0000000 --- a/SOURCES/pacemaker-rollup-3a7715d.patch +++ /dev/null @@ -1,4919 +0,0 @@ -diff --git a/attrd/commands.c b/attrd/commands.c -index 18c0523..c6586c7 100644 ---- a/attrd/commands.c -+++ b/attrd/commands.c -@@ -832,7 +832,6 @@ attrd_cib_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *u - } - } - done: -- free(name); - if(a && a->changed && election_state(writer) == election_won) { - write_attribute(a); - } -@@ -1019,8 +1018,10 @@ write_attribute(attribute_t *a) - crm_info("Sent update %d with %d changes for %s, id=%s, set=%s", - a->update, cib_updates, a->id, (a->uuid? a->uuid : ""), a->set); - -- the_cib->cmds->register_callback( -- the_cib, a->update, 120, FALSE, strdup(a->id), "attrd_cib_callback", attrd_cib_callback); -+ the_cib->cmds->register_callback_full(the_cib, a->update, 120, FALSE, -+ strdup(a->id), -+ "attrd_cib_callback", -+ attrd_cib_callback, free); - } - free_xml(xml_top); - } -diff --git a/attrd/legacy.c b/attrd/legacy.c -index 4aae4c4..8a18c38 100644 ---- a/attrd/legacy.c -+++ b/attrd/legacy.c -@@ -635,6 +635,20 @@ struct attrd_callback_s { - char *value; - }; - -+/* -+ * \internal -+ * \brief Free an attrd callback structure -+ */ -+static void -+free_attrd_callback(void *user_data) -+{ -+ struct attrd_callback_s *data = user_data; -+ -+ free(data->attr); -+ free(data->value); -+ free(data); -+} -+ - static void - attrd_cib_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) - { -@@ -646,7 +660,7 @@ attrd_cib_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *u - - } else if (call_id < 0) { - crm_warn("Update %s=%s failed: %s", data->attr, data->value, pcmk_strerror(call_id)); -- goto cleanup; -+ return; - } - - switch (rc) { -@@ -674,10 +688,6 @@ attrd_cib_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *u - crm_err("Update %d for %s=%s failed: %s", - call_id, data->attr, data->value, pcmk_strerror(rc)); - } -- cleanup: -- free(data->value); -- free(data->attr); -- free(data); - } - - void -@@ -749,8 +759,10 @@ attrd_perform_update(attr_hash_entry_t * hash_entry) - if (hash_entry->value != NULL) { - data->value = strdup(hash_entry->value); - } -- cib_conn->cmds->register_callback(cib_conn, rc, 120, FALSE, data, "attrd_cib_callback", -- attrd_cib_callback); -+ cib_conn->cmds->register_callback_full(cib_conn, rc, 120, FALSE, data, -+ "attrd_cib_callback", -+ attrd_cib_callback, -+ free_attrd_callback); - return; - } - -diff --git a/bumplibs.sh b/bumplibs.sh -index 68f2f58..2044efa 100755 ---- a/bumplibs.sh -+++ b/bumplibs.sh -@@ -3,6 +3,7 @@ - declare -A headers - headers[crmcommon]="include/crm/common include/crm/crm.h" - headers[crmcluster]="include/crm/cluster.h" -+headers[crmservice]="include/crm/services.h" - headers[transitioner]="include/crm/transition.h" - headers[cib]="include/crm/cib.h include/crm/cib/util.h" - headers[pe_rules]="include/crm/pengine/rules.h" -@@ -11,8 +12,17 @@ headers[pengine]="include/crm/pengine/common.h include/crm/pengine/complex.h i - headers[stonithd]="include/crm/stonith-ng.h" - headers[lrmd]="include/crm/lrmd.h" - --LAST_RELEASE=`test -e /Volumes || git tag -l | grep Pacemaker | grep -v rc | sort -Vr | head -n 1` --for lib in crmcommon crmcluster transitioner cib pe_rules pe_status stonithd pengine lrmd; do -+if [ ! -z $1 ]; then -+ LAST_RELEASE=$1 -+else -+ LAST_RELEASE=`test -e /Volumes || git tag -l | grep Pacemaker | grep -v rc | sort -Vr | head -n 1` -+fi -+libs=$(find . -name "*.am" -exec grep "lib.*_la_LDFLAGS.*version-info" \{\} \; | sed -e s/_la_LDFLAGS.*// -e s/^lib//) -+for lib in $libs; do -+ if [ -z "${headers[$lib]}" ]; then -+ echo "Unknown headers for lib$lib" -+ exit 0 -+ fi - git diff -w $LAST_RELEASE..HEAD ${headers[$lib]} - echo "" - -@@ -27,6 +37,7 @@ for lib in crmcommon crmcluster transitioner cib pe_rules pe_status stonithd pen - fi - - sources=`grep "lib${lib}_la_SOURCES" $am | sed s/.*=// | sed 's:$(top_builddir)/::' | sed 's:$(top_srcdir)/::' | sed 's:\\\::' | sed 's:$(libpe_rules_la_SOURCES):rules.c\ common.c:'` -+ - full_sources="" - for f in $sources; do - if -@@ -48,6 +59,11 @@ for lib in crmcommon crmcluster transitioner cib pe_rules pe_status stonithd pen - echo "" - echo "New arguments to functions or changes to the middle of structs are incompatible additions" - echo "" -+ echo "Where possible:" -+ echo "- move new fields to the end of structs" -+ echo "- use bitfields instead of booleans" -+ echo "- when adding arguments, create new functions that the old version can call" -+ echo "" - read -p "Are the changes to lib$lib: [a]dditions, [i]ncompatible additions, [r]emovals or [f]ixes? [None]: " CHANGE - - git show $LAST_RELEASE:$am | grep version-info -diff --git a/cib/callbacks.c b/cib/callbacks.c -index 1452ded..28844b8 100644 ---- a/cib/callbacks.c -+++ b/cib/callbacks.c -@@ -1570,7 +1570,7 @@ static gboolean - cib_force_exit(gpointer data) - { - crm_notice("Forcing exit!"); -- terminate_cib(__FUNCTION__, TRUE); -+ terminate_cib(__FUNCTION__, -1); - return FALSE; - } - -@@ -1656,7 +1656,7 @@ initiate_exit(void) - - active = crm_active_peers(); - if (active < 2) { -- terminate_cib(__FUNCTION__, FALSE); -+ terminate_cib(__FUNCTION__, 0); - return; - } - -@@ -1675,9 +1675,19 @@ initiate_exit(void) - extern int remote_fd; - extern int remote_tls_fd; - -+/* -+ * \internal -+ * \brief Close remote sockets, free the global CIB and quit -+ * -+ * \param[in] caller Name of calling function (for log message) -+ * \param[in] fast If 1, skip disconnect; if -1, also exit error -+ */ - void --terminate_cib(const char *caller, gboolean fast) -+terminate_cib(const char *caller, int fast) - { -+ crm_info("%s: Exiting%s...", caller, -+ (fast < 0)? " fast" : mainloop ? " from mainloop" : ""); -+ - if (remote_fd > 0) { - close(remote_fd); - remote_fd = 0; -@@ -1687,27 +1697,29 @@ terminate_cib(const char *caller, gboolean fast) - remote_tls_fd = 0; - } - -- if (!fast) { -- crm_info("%s: Disconnecting from cluster infrastructure", caller); -- crm_cluster_disconnect(&crm_cluster); -- } -- - uninitializeCib(); - -- crm_info("%s: Exiting%s...", caller, fast ? " fast" : mainloop ? " from mainloop" : ""); -+ if (fast < 0) { -+ /* Quit fast on error */ -+ cib_ipc_servers_destroy(ipcs_ro, ipcs_rw, ipcs_shm); -+ crm_exit(EINVAL); - -- if (fast == FALSE && mainloop != NULL && g_main_is_running(mainloop)) { -+ } else if ((mainloop != NULL) && g_main_is_running(mainloop)) { -+ /* Quit via returning from the main loop. If fast == 1, we skip the -+ * disconnect here, and it will be done when the main loop returns -+ * (this allows the peer status callback to avoid messing with the -+ * peer caches). -+ */ -+ if (fast == 0) { -+ crm_cluster_disconnect(&crm_cluster); -+ } - g_main_quit(mainloop); - - } else { -- qb_ipcs_destroy(ipcs_ro); -- qb_ipcs_destroy(ipcs_rw); -- qb_ipcs_destroy(ipcs_shm); -- -- if (fast) { -- crm_exit(EINVAL); -- } else { -- crm_exit(pcmk_ok); -- } -+ /* Quit via clean exit. Even the peer status callback can disconnect -+ * here, because we're not returning control to the caller. */ -+ crm_cluster_disconnect(&crm_cluster); -+ cib_ipc_servers_destroy(ipcs_ro, ipcs_rw, ipcs_shm); -+ crm_exit(pcmk_ok); - } - } -diff --git a/cib/callbacks.h b/cib/callbacks.h -index bca9992..a49428e 100644 ---- a/cib/callbacks.h -+++ b/cib/callbacks.h -@@ -71,7 +71,7 @@ extern void cib_common_callback_worker(uint32_t id, uint32_t flags, xmlNode * op - - void cib_shutdown(int nsig); - void initiate_exit(void); --void terminate_cib(const char *caller, gboolean fast); -+void terminate_cib(const char *caller, int fast); - - extern gboolean cib_legacy_mode(void); - -diff --git a/cib/main.c b/cib/main.c -index e20a2b6..cbaf7b5 100644 ---- a/cib/main.c -+++ b/cib/main.c -@@ -71,8 +71,6 @@ gboolean cib_register_ha(ll_cluster_t * hb_cluster, const char *client_name); - void *hb_conn = NULL; - #endif - --extern void terminate_cib(const char *caller, gboolean fast); -- - GMainLoop *mainloop = NULL; - const char *cib_root = NULL; - char *cib_our_uname = NULL; -@@ -414,7 +412,7 @@ cib_cs_destroy(gpointer user_data) - crm_info("Corosync disconnection complete"); - } else { - crm_err("Corosync connection lost! Exiting."); -- terminate_cib(__FUNCTION__, TRUE); -+ terminate_cib(__FUNCTION__, -1); - } - } - #endif -@@ -422,30 +420,29 @@ cib_cs_destroy(gpointer user_data) - static void - cib_peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data) - { -- if ((type == crm_status_processes) && legacy_mode -- && is_not_set(node->processes, crm_get_cluster_proc())) { -- uint32_t old = 0; -- -- if (data) { -- old = *(const uint32_t *)data; -- } -+ switch (type) { -+ case crm_status_processes: -+ if (legacy_mode && is_not_set(node->processes, crm_get_cluster_proc())) { -+ uint32_t old = data? *(const uint32_t *)data : 0; -+ -+ if ((node->processes ^ old) & crm_proc_cpg) { -+ crm_info("Attempting to disable legacy mode after %s left the cluster", -+ node->uname); -+ legacy_mode = FALSE; -+ } -+ } -+ break; - -- if ((node->processes ^ old) & crm_proc_cpg) { -- crm_info("Attempting to disable legacy mode after %s left the cluster", node->uname); -- legacy_mode = FALSE; -- } -- } -+ case crm_status_uname: -+ case crm_status_rstate: -+ case crm_status_nstate: -+ if (cib_shutdown_flag && (crm_active_peers() < 2) -+ && crm_hash_table_size(client_connections) == 0) { - -- if (cib_shutdown_flag && crm_active_peers() < 2 && crm_hash_table_size(client_connections) == 0) { -- crm_info("No more peers"); -- /* @TODO -- * terminate_cib() calls crm_cluster_disconnect() which calls -- * crm_peer_destroy() which destroys the peer caches, which a peer -- * status callback shouldn't do. For now, there is a workaround in -- * crm_update_peer_proc(), but CIB should be refactored to avoid -- * destroying the peer caches here. -- */ -- terminate_cib(__FUNCTION__, FALSE); -+ crm_info("No more peers"); -+ terminate_cib(__FUNCTION__, 1); -+ } -+ break; - } - } - -@@ -455,10 +452,10 @@ cib_ha_connection_destroy(gpointer user_data) - { - if (cib_shutdown_flag) { - crm_info("Heartbeat disconnection complete... exiting"); -- terminate_cib(__FUNCTION__, FALSE); -+ terminate_cib(__FUNCTION__, 0); - } else { - crm_err("Heartbeat connection lost! Exiting."); -- terminate_cib(__FUNCTION__, TRUE); -+ terminate_cib(__FUNCTION__, -1); - } - } - #endif -@@ -541,8 +538,12 @@ cib_init(void) - /* Create the mainloop and run it... */ - mainloop = g_main_new(FALSE); - crm_info("Starting %s mainloop", crm_system_name); -- - g_main_run(mainloop); -+ -+ /* If main loop returned, clean up and exit. We disconnect in case -+ * terminate_cib() was called with fast=1. -+ */ -+ crm_cluster_disconnect(&crm_cluster); - cib_ipc_servers_destroy(ipcs_ro, ipcs_rw, ipcs_shm); - - return crm_exit(pcmk_ok); -diff --git a/cib/messages.c b/cib/messages.c -index 363562c..eca63b9 100644 ---- a/cib/messages.c -+++ b/cib/messages.c -@@ -87,7 +87,7 @@ cib_process_shutdown_req(const char *op, int options, const char *section, xmlNo - - } else if (cib_shutdown_flag) { - crm_info("Shutdown ACK from %s", host); -- terminate_cib(__FUNCTION__, FALSE); -+ terminate_cib(__FUNCTION__, 0); - return pcmk_ok; - - } else { -diff --git a/crmd/crmd_utils.h b/crmd/crmd_utils.h -index 78ccad2..78214bf 100644 ---- a/crmd/crmd_utils.h -+++ b/crmd/crmd_utils.h -@@ -102,11 +102,14 @@ gboolean too_many_st_failures(void); - void st_fail_count_reset(const char * target); - void crmd_peer_down(crm_node_t *peer, bool full); - -+/* Convenience macro for registering a CIB callback -+ * (assumes that data can be freed with free()) -+ */ - # define fsa_register_cib_callback(id, flag, data, fn) do { \ - CRM_ASSERT(fsa_cib_conn); \ -- fsa_cib_conn->cmds->register_callback( \ -+ fsa_cib_conn->cmds->register_callback_full( \ - fsa_cib_conn, id, 10 * (1 + crm_active_peers()), \ -- flag, data, #fn, fn); \ -+ flag, data, #fn, fn, free); \ - } while(0) - - # define start_transition(state) do { \ -diff --git a/crmd/join_client.c b/crmd/join_client.c -index 286cd92..65e3bed 100644 ---- a/crmd/join_client.c -+++ b/crmd/join_client.c -@@ -116,8 +116,8 @@ do_cl_join_offer_respond(long long action, - - /* we only ever want the last one */ - if (query_call_id > 0) { -- /* Calling remove_cib_op_callback() would result in a memory leak of the data field */ - crm_trace("Cancelling previous join query: %d", query_call_id); -+ remove_cib_op_callback(query_call_id, FALSE); - query_call_id = 0; - } - -@@ -173,7 +173,6 @@ join_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void * - - done: - free_xml(generation); -- free(join_id); - } - - /* A_CL_JOIN_RESULT */ -diff --git a/crmd/join_dc.c b/crmd/join_dc.c -index f777296..5280b6e 100644 ---- a/crmd/join_dc.c -+++ b/crmd/join_dc.c -@@ -452,8 +452,6 @@ finalize_sync_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, voi - crm_debug("No longer the DC in S_FINALIZE_JOIN: %s/%s", - AM_I_DC ? "DC" : "CRMd", fsa_state2string(fsa_state)); - } -- -- free(user_data); - } - - static void -diff --git a/crmd/lrm_state.c b/crmd/lrm_state.c -index 162ad03..c03fa0b 100644 ---- a/crmd/lrm_state.c -+++ b/crmd/lrm_state.c -@@ -490,7 +490,7 @@ remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) - if (remote_proxy_new(lrm_state->node_name, session, channel) == NULL) { - remote_proxy_notify_destroy(lrmd, session); - } -- crm_info("new remote proxy client established to %s, session id %s", channel, session); -+ crm_trace("new remote proxy client established to %s, session id %s", channel, session); - } else if (safe_str_eq(op, "destroy")) { - remote_proxy_end_session(session); - -@@ -534,7 +534,16 @@ remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg) - } - - } else if(is_set(flags, crm_ipc_proxied)) { -- int rc = crm_ipc_send(proxy->ipc, request, flags, 5000, NULL); -+ const char *type = crm_element_value(request, F_TYPE); -+ int rc = 0; -+ -+ if (safe_str_eq(type, T_ATTRD) -+ && crm_element_value(request, F_ATTRD_HOST) == NULL) { -+ crm_xml_add(request, F_ATTRD_HOST, proxy->node_name); -+ crm_xml_add_int(request, F_ATTRD_HOST_ID, get_local_nodeid(0)); -+ } -+ -+ rc = crm_ipc_send(proxy->ipc, request, flags, 5000, NULL); - - if(rc < 0) { - xmlNode *op_reply = create_xml_node(NULL, "nack"); -diff --git a/crmd/membership.c b/crmd/membership.c -index 447e6a8..27ae710 100644 ---- a/crmd/membership.c -+++ b/crmd/membership.c -@@ -200,7 +200,6 @@ remove_conflicting_node_callback(xmlNode * msg, int call_id, int rc, - do_crm_log_unlikely(rc == 0 ? LOG_DEBUG : LOG_NOTICE, - "Deletion of the unknown conflicting node \"%s\": %s (rc=%d)", - node_uuid, pcmk_strerror(rc), rc); -- free(node_uuid); - } - - static void -@@ -215,11 +214,9 @@ search_conflicting_node_callback(xmlNode * msg, int call_id, int rc, - crm_notice("Searching conflicting nodes for %s failed: %s (%d)", - new_node_uuid, pcmk_strerror(rc), rc); - } -- free(new_node_uuid); - return; - - } else if (output == NULL) { -- free(new_node_uuid); - return; - } - -@@ -283,8 +280,6 @@ search_conflicting_node_callback(xmlNode * msg, int call_id, int rc, - free_xml(node_state_xml); - } - } -- -- free(new_node_uuid); - } - - static void -diff --git a/crmd/pengine.c b/crmd/pengine.c -index c9544a9..46df648 100644 ---- a/crmd/pengine.c -+++ b/crmd/pengine.c -@@ -77,8 +77,6 @@ save_cib_contents(xmlNode * msg, int call_id, int rc, xmlNode * output, void *us - - free(filename); - } -- -- free(id); - } - - static void -@@ -320,9 +318,10 @@ do_pe_invoke_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void - crm_debug("Discarding PE request in state: %s", fsa_state2string(fsa_state)); - return; - -- } else if (num_cib_op_callbacks() != 0) { -- crm_debug("Re-asking for the CIB: %d peer updates still pending", num_cib_op_callbacks()); -- -+ /* this callback counts as 1 */ -+ } else if (num_cib_op_callbacks() > 1) { -+ crm_debug("Re-asking for the CIB: %d other peer updates still pending", -+ (num_cib_op_callbacks() - 1)); - sleep(1); - register_fsa_action(A_PE_INVOKE); - return; -diff --git a/crmd/te_callbacks.c b/crmd/te_callbacks.c -index 68742c2..c22b273 100644 ---- a/crmd/te_callbacks.c -+++ b/crmd/te_callbacks.c -@@ -294,6 +294,49 @@ static char *get_node_from_xpath(const char *xpath) - return nodeid; - } - -+static char *extract_node_uuid(const char *xpath) -+{ -+ char *mutable_path = strdup(xpath); -+ char *node_uuid = NULL; -+ char *search = NULL; -+ char *match = NULL; -+ -+ match = strstr(mutable_path, "node_state[@id=\'") + strlen("node_state[@id=\'"); -+ search = strchr(match, '\''); -+ search[0] = 0; -+ -+ node_uuid = strdup(match); -+ free(mutable_path); -+ return node_uuid; -+} -+ -+static void abort_unless_down(const char *xpath, const char *op, xmlNode *change, const char *reason) -+{ -+ char *node_uuid = NULL; -+ crm_action_t *down = NULL; -+ -+ if(safe_str_neq(op, "delete")) { -+ abort_transition(INFINITY, tg_restart, reason, change); -+ return; -+ } -+ -+ node_uuid = extract_node_uuid(xpath); -+ if(node_uuid == NULL) { -+ crm_err("Could not extract node ID from %s", xpath); -+ abort_transition(INFINITY, tg_restart, reason, change); -+ return; -+ } -+ -+ down = match_down_event(0, node_uuid, NULL, FALSE); -+ if(down == NULL || down->executed == false) { -+ crm_trace("Not expecting %s to be down (%s)", node_uuid, xpath); -+ abort_transition(INFINITY, tg_restart, reason, change); -+ } else { -+ crm_trace("Expecting changes to %s (%s)", node_uuid, xpath); -+ } -+ free(node_uuid); -+} -+ - void - te_update_diff(const char *event, xmlNode * msg) - { -@@ -388,27 +431,22 @@ te_update_diff(const char *event, xmlNode * msg) - break; /* Wont be packaged with any resource operations we may be waiting for */ - - } else if(strstr(xpath, "/"XML_TAG_TRANSIENT_NODEATTRS"[") || safe_str_eq(name, XML_TAG_TRANSIENT_NODEATTRS)) { -- abort_transition(INFINITY, tg_restart, "Transient attribute change", change); -+ abort_unless_down(xpath, op, change, "Transient attribute change"); - break; /* Wont be packaged with any resource operations we may be waiting for */ - - } else if(strstr(xpath, "/"XML_LRM_TAG_RSC_OP"[") && safe_str_eq(op, "delete")) { - crm_action_t *cancel = NULL; - char *mutable_key = strdup(xpath); -- char *mutable_node = strdup(xpath); - char *search = NULL; - - const char *key = NULL; -- const char *node_uuid = NULL; -+ char *node_uuid = extract_node_uuid(xpath); - - search = strrchr(mutable_key, '\''); - search[0] = 0; - - key = strrchr(mutable_key, '\'') + 1; - -- node_uuid = strstr(mutable_node, "node_state[@id=\'") + strlen("node_state[@id=\'"); -- search = strchr(node_uuid, '\''); -- search[0] = 0; -- - cancel = get_cancel_action(key, node_uuid); - if (cancel == NULL) { - abort_transition(INFINITY, tg_restart, "Resource operation removal", change); -@@ -422,14 +460,14 @@ te_update_diff(const char *event, xmlNode * msg) - trigger_graph(); - - } -- free(mutable_node); - free(mutable_key); -+ free(node_uuid); - - } else if(strstr(xpath, "/"XML_CIB_TAG_LRM"[") && safe_str_eq(op, "delete")) { -- abort_transition(INFINITY, tg_restart, "Resource state removal", change); -+ abort_unless_down(xpath, op, change, "Resource state removal"); - - } else if(strstr(xpath, "/"XML_CIB_TAG_STATE"[") && safe_str_eq(op, "delete")) { -- abort_transition(INFINITY, tg_restart, "Node state removal", change); -+ abort_unless_down(xpath, op, change, "Node state removal"); - - } else if(name == NULL) { - crm_debug("No result for %s operation to %s", op, xpath); -@@ -717,7 +755,6 @@ cib_fencing_updated(xmlNode * msg, int call_id, int rc, xmlNode * output, void * - } else { - crm_info("Fencing update %d for %s: complete", call_id, (char *)user_data); - } -- free(user_data); - } - - void -diff --git a/crmd/utils.c b/crmd/utils.c -index 5ca4b9d..4fe3a49 100644 ---- a/crmd/utils.c -+++ b/crmd/utils.c -@@ -999,7 +999,6 @@ erase_xpath_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void - - do_crm_log_unlikely(rc == 0 ? LOG_DEBUG : LOG_NOTICE, - "Deletion of \"%s\": %s (rc=%d)", xpath, pcmk_strerror(rc), rc); -- free(xpath); - } - - void -diff --git a/cts/CIB.py b/cts/CIB.py -index 82d02d7..8fbba6c 100644 ---- a/cts/CIB.py -+++ b/cts/CIB.py -@@ -105,7 +105,7 @@ class CIB11(ConfigBase): - if not name: - name = "r%s%d" % (self.CM.Env["IPagent"], self.counter) - self.counter = self.counter + 1 -- r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard) -+ r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard) - - r.add_op("monitor", "5s") - return r -@@ -387,7 +387,7 @@ class ConfigFactory: - """register a constructor""" - _args = [constructor] - _args.extend(args) -- setattr(self, methodName, apply(ConfigFactoryItem,_args, kargs)) -+ setattr(self, methodName, ConfigFactoryItem(*_args, **kargs)) - - def unregister(self, methodName): - """unregister a constructor""" -@@ -415,7 +415,6 @@ class ConfigFactory: - - class ConfigFactoryItem: - def __init__(self, function, *args, **kargs): -- assert callable(function), "function should be a callable obj" - self._function = function - self._args = args - self._kargs = kargs -@@ -426,7 +425,7 @@ class ConfigFactoryItem: - _args.extend(args) - _kargs = self._kargs.copy() - _kargs.update(kargs) -- return apply(self._function,_args,_kargs) -+ return self._function(*_args,**_kargs) - - # Basic Sanity Testing - if __name__ == '__main__': -@@ -449,4 +448,4 @@ if __name__ == '__main__': - - CibFactory = ConfigFactory(manager) - cib = CibFactory.createConfig("pacemaker-1.1") -- print cib.contents() -+ print(cib.contents()) -diff --git a/cts/CM_ais.py b/cts/CM_ais.py -index a34f9b1..d2e2c1f 100644 ---- a/cts/CM_ais.py -+++ b/cts/CM_ais.py -@@ -80,7 +80,7 @@ class crm_ais(crm_lha): - # Processes running under valgrind can't be shot with "killall -9 processname", - # so don't include them in the returned list - vgrind = self.Env["valgrind-procs"].split() -- for key in self.fullcomplist.keys(): -+ for key in list(self.fullcomplist.keys()): - if self.Env["valgrind-tests"]: - if key in vgrind: - self.log("Filtering %s from the component list as it is being profiled by valgrind" % key) -diff --git a/cts/CM_lha.py b/cts/CM_lha.py -index b192272..28742d9 100755 ---- a/cts/CM_lha.py -+++ b/cts/CM_lha.py -@@ -92,7 +92,7 @@ class crm_lha(ClusterManager): - self.log("Node %s is not up." % node) - return None - -- if not self.CIBsync.has_key(node) and self.Env["ClobberCIB"] == 1: -+ if not node in self.CIBsync and self.Env["ClobberCIB"] == 1: - self.CIBsync[node] = 1 - self.rsh(node, "rm -f "+CTSvars.CRM_CONFIG_DIR+"/cib*") - -diff --git a/cts/CTS.py b/cts/CTS.py -index 9f9a291..634348a 100644 ---- a/cts/CTS.py -+++ b/cts/CTS.py -@@ -69,7 +69,7 @@ function status() { - function start() { - # Is it already running? - if -- status -+ status - then - return - fi -@@ -94,20 +94,20 @@ case $action in - nohup $0 $f start >/dev/null 2>&1 > $f -- echo " $*" >> $f -+ uptime | sed s/up.*:/,/ | tr '\\n' ',' >> $f -+ echo " $*" >> $f - start -- ;; -+ ;; - *) -- echo "Unknown action: $action." -- ;; -+ echo "Unknown action: $action." -+ ;; - esac - """ - -@@ -157,7 +157,7 @@ class CtsLab: - self.Env.dump() - - def has_key(self, key): -- return self.Env.has_key(key) -+ return key in self.Env.keys() - - def __getitem__(self, key): - return self.Env[key] -@@ -275,7 +275,7 @@ class ClusterManager(UserDict): - None - - def _finalConditions(self): -- for key in self.keys(): -+ for key in list(self.keys()): - if self[key] == None: - raise ValueError("Improper derivation: self[" + key + "] must be overridden by subclass.") - -@@ -299,14 +299,14 @@ class ClusterManager(UserDict): - if key == "Name": - return self.name - -- print "FIXME: Getting %s from %s" % (key, repr(self)) -- if self.data.has_key(key): -+ print("FIXME: Getting %s from %s" % (key, repr(self))) -+ if key in self.data: - return self.data[key] - - return self.templates.get_patterns(self.Env["Name"], key) - - def __setitem__(self, key, value): -- print "FIXME: Setting %s=%s on %s" % (key, value, repr(self)) -+ print("FIXME: Setting %s=%s on %s" % (key, value, repr(self))) - self.data[key] = value - - def key_for_node(self, node): -@@ -333,7 +333,7 @@ class ClusterManager(UserDict): - def prepare(self): - '''Finish the Initialization process. Prepare to test...''' - -- print repr(self)+"prepare" -+ print(repr(self)+"prepare") - for node in self.Env["nodes"]: - if self.StataCM(node): - self.ShouldBeStatus[node] = "up" -@@ -387,11 +387,11 @@ class ClusterManager(UserDict): - return None - - if not self.templates["Pat:Fencing_start"]: -- print "No start pattern" -+ print("No start pattern") - return None - - if not self.templates["Pat:Fencing_ok"]: -- print "No ok pattern" -+ print("No ok pattern") - return None - - stonith = None -@@ -500,7 +500,7 @@ class ClusterManager(UserDict): - else: self.debug("Starting %s on node %s" % (self.templates["Name"], node)) - ret = 1 - -- if not self.ShouldBeStatus.has_key(node): -+ if not node in self.ShouldBeStatus: - self.ShouldBeStatus[node] = "down" - - if self.ShouldBeStatus[node] != "down": -@@ -871,13 +871,13 @@ class ClusterManager(UserDict): - - for host in self.Env["nodes"]: - log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR -- if has_log_stats.has_key(host): -+ if host in has_log_stats: - self.rsh(host, '''bash %s %s stop''' % (log_stats_bin, log_stats_file)) - (rc, lines) = self.rsh(host, '''cat %s''' % log_stats_file, stdout=2) - self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file)) - - fname = "cts-stats-%d-nodes-%s.csv" % (len(self.Env["nodes"]), host) -- print "Extracted stats: %s" % fname -+ print("Extracted stats: %s" % fname) - fd = open(fname, "a") - fd.writelines(lines) - fd.close() -@@ -891,7 +891,7 @@ class ClusterManager(UserDict): - - for host in self.Env["nodes"]: - log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR -- if not has_log_stats.has_key(host): -+ if not host in has_log_stats: - - global log_stats - global log_stats_bin -@@ -986,7 +986,7 @@ class Process(Component): - self.CM = cm - self.badnews_ignore = badnews_ignore - self.badnews_ignore.extend(common_ignore) -- self.triggersreboot = triggersreboot -+ self.triggersreboot = triggersreboot - - if process: - self.proc = str(process) -diff --git a/cts/CTSaudits.py b/cts/CTSaudits.py -index 8d52062..e8663f2 100755 ---- a/cts/CTSaudits.py -+++ b/cts/CTSaudits.py -@@ -108,7 +108,7 @@ class LogAudit(ClusterAudit): - self.CM.log ("ERROR: Cannot execute remote command [%s] on %s" % (cmd, node)) - - for k in self.kinds: -- if watch.has_key(k): -+ if k in watch: - w = watch[k] - if watch_pref == "any": self.CM.log("Testing for %s logs" % (k)) - w.lookforall(silent=True) -@@ -118,7 +118,7 @@ class LogAudit(ClusterAudit): - self.CM.Env["LogWatcher"] = w.kind - return 1 - -- for k in watch.keys(): -+ for k in list(watch.keys()): - w = watch[k] - if w.unmatched: - for regex in w.unmatched: -@@ -226,7 +226,7 @@ class FileAudit(ClusterAudit): - self.known.append(line) - self.CM.log("Warning: Corosync core file on %s: %s" % (node, line)) - -- if self.CM.ShouldBeStatus.has_key(node) and self.CM.ShouldBeStatus[node] == "down": -+ if node in self.CM.ShouldBeStatus and self.CM.ShouldBeStatus[node] == "down": - clean = 0 - (rc, lsout) = self.CM.rsh(node, "ls -al /dev/shm | grep qb-", None) - for line in lsout: -@@ -532,7 +532,7 @@ class CrmdStateAudit(ClusterAudit): - , "auditfail":0} - - def has_key(self, key): -- return self.Stats.has_key(key) -+ return key in self.Stats - - def __setitem__(self, key, value): - self.Stats[key] = value -@@ -542,7 +542,7 @@ class CrmdStateAudit(ClusterAudit): - - def incr(self, name): - '''Increment (or initialize) the value associated with the given name''' -- if not self.Stats.has_key(name): -+ if not name in self.Stats: - self.Stats[name] = 0 - self.Stats[name] = self.Stats[name]+1 - -@@ -601,7 +601,7 @@ class CIBAudit(ClusterAudit): - , "auditfail":0} - - def has_key(self, key): -- return self.Stats.has_key(key) -+ return key in self.Stats - - def __setitem__(self, key, value): - self.Stats[key] = value -@@ -611,7 +611,7 @@ class CIBAudit(ClusterAudit): - - def incr(self, name): - '''Increment (or initialize) the value associated with the given name''' -- if not self.Stats.has_key(name): -+ if not name in self.Stats: - self.Stats[name] = 0 - self.Stats[name] = self.Stats[name]+1 - -@@ -726,7 +726,7 @@ class PartitionAudit(ClusterAudit): - - def incr(self, name): - '''Increment (or initialize) the value associated with the given name''' -- if not self.Stats.has_key(name): -+ if not name in self.Stats: - self.Stats[name] = 0 - self.Stats[name] = self.Stats[name]+1 - -diff --git a/cts/CTSscenarios.py b/cts/CTSscenarios.py -index 2f3a69b..cc6e67e 100644 ---- a/cts/CTSscenarios.py -+++ b/cts/CTSscenarios.py -@@ -124,7 +124,7 @@ A partially set up scenario is torn down if it fails during setup. - - def incr(self, name): - '''Increment (or initialize) the value associated with the given name''' -- if not self.Stats.has_key(name): -+ if not name in self.Stats: - self.Stats[name] = 0 - self.Stats[name] = self.Stats[name]+1 - -@@ -176,7 +176,7 @@ A partially set up scenario is torn down if it fails during setup. - - elapsed_time = stoptime - starttime - test_time = stoptime - test.get_timer() -- if not test.has_key("min_time"): -+ if not test["min_time"]: - test["elapsed_time"] = elapsed_time - test["min_time"] = test_time - test["max_time"] = test_time -@@ -211,7 +211,7 @@ A partially set up scenario is torn down if it fails during setup. - } - self.ClusterManager.log("Test Summary") - for test in self.Tests: -- for key in stat_filter.keys(): -+ for key in list(stat_filter.keys()): - stat_filter[key] = test.Stats[key] - self.ClusterManager.log(("Test %s: "%test.name).ljust(25) + " %s"%repr(stat_filter)) - -@@ -387,7 +387,7 @@ According to the manual page for ping: - '''Start the PingFest!''' - - self.PingSize = 1024 -- if CM.Env.has_key("PingSize"): -+ if "PingSize" in CM.Env.keys(): - self.PingSize = CM.Env["PingSize"] - - CM.log("Starting %d byte flood pings" % self.PingSize) -@@ -550,7 +550,7 @@ Test a rolling upgrade between two versions of the stack - return self.install(node, self.CM.Env["previous-version"]) - - def SetUp(self, CM): -- print repr(self)+"prepare" -+ print(repr(self)+"prepare") - CM.prepare() - - # Clear out the cobwebs -diff --git a/cts/CTStests.py b/cts/CTStests.py -index f817004..00fcd13 100644 ---- a/cts/CTStests.py -+++ b/cts/CTStests.py -@@ -97,13 +97,18 @@ class CTSTest: - self.logger.debug(args) - - def has_key(self, key): -- return self.Stats.has_key(key) -+ return key in self.Stats - - def __setitem__(self, key, value): - self.Stats[key] = value - - def __getitem__(self, key): -- return self.Stats[key] -+ if str(key) == "0": -+ raise ValueError("Bad call to 'foo in X', should reference 'foo in X.Stats' instead") -+ -+ if key in self.Stats: -+ return self.Stats[key] -+ return None - - def log_mark(self, msg): - self.debug("MARK: test %s %s %d" % (self.name,msg,time.time())) -@@ -128,7 +133,7 @@ class CTSTest: - - def incr(self, name): - '''Increment (or initialize) the value associated with the given name''' -- if not self.Stats.has_key(name): -+ if not name in self.Stats: - self.Stats[name] = 0 - self.Stats[name] = self.Stats[name]+1 - -@@ -534,7 +539,7 @@ class StonithdTest(CTSTest): - if not self.is_applicable_common(): - return 0 - -- if self.Env.has_key("DoFencing"): -+ if "DoFencing" in self.Env.keys(): - return self.Env["DoFencing"] - - return 1 -@@ -1048,7 +1053,7 @@ class BandwidthTest(CTSTest): - T1 = linesplit[0] - timesplit = string.split(T1,":") - time2split = string.split(timesplit[2],".") -- time1 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001 -+ time1 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001 - break - - while count < 100: -@@ -1070,7 +1075,7 @@ class BandwidthTest(CTSTest): - T2 = linessplit[0] - timesplit = string.split(T2,":") - time2split = string.split(timesplit[2],".") -- time2 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001 -+ time2 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001 - time = time2-time1 - if (time <= 0): - return 0 -@@ -1105,7 +1110,7 @@ class MaintenanceMode(CTSTest): - # fail the resource right after turning Maintenance mode on - # verify it is not recovered until maintenance mode is turned off - if action == "On": -- pats.append("pengine.*: warning:.* Processing failed op %s for %s on" % (self.action, self.rid)) -+ pats.append(r"pengine.*:\s+warning:.*Processing failed op %s for %s on" % (self.action, self.rid)) - else: - pats.append(self.templates["Pat:RscOpOK"] % (self.rid, "stop_0")) - pats.append(self.templates["Pat:RscOpOK"] % (self.rid, "start_0")) -@@ -1314,7 +1319,7 @@ class ResourceRecover(CTSTest): - self.debug("Shooting %s aka. %s" % (rsc.clone_id, rsc.id)) - - pats = [] -- pats.append(r"pengine.*: warning:.* Processing failed op %s for (%s|%s) on" % (self.action, -+ pats.append(r"pengine.*:\s+warning:.*Processing failed op %s for (%s|%s) on" % (self.action, - rsc.id, rsc.clone_id)) - - if rsc.managed(): -@@ -1574,7 +1579,7 @@ class SplitBrainTest(CTSTest): - p_max = len(self.Env["nodes"]) - for node in self.Env["nodes"]: - p = self.Env.RandomGen.randint(1, p_max) -- if not partitions.has_key(p): -+ if not p in partitions: - partitions[p] = [] - partitions[p].append(node) - p_max = len(partitions.keys()) -@@ -1583,13 +1588,13 @@ class SplitBrainTest(CTSTest): - # else, try again - - self.debug("Created %d partitions" % p_max) -- for key in partitions.keys(): -+ for key in list(partitions.keys()): - self.debug("Partition["+str(key)+"]:\t"+repr(partitions[key])) - - # Disabling STONITH to reduce test complexity for now - self.rsh(node, "crm_attribute -V -n stonith-enabled -v false") - -- for key in partitions.keys(): -+ for key in list(partitions.keys()): - self.isolate_partition(partitions[key]) - - count = 30 -@@ -1612,7 +1617,7 @@ class SplitBrainTest(CTSTest): - self.CM.partitions_expected = 1 - - # And heal them again -- for key in partitions.keys(): -+ for key in list(partitions.keys()): - self.heal_partition(partitions[key]) - - # Wait for a single partition to form -@@ -2247,11 +2252,11 @@ class RollingUpgradeTest(CTSTest): - if not self.is_applicable_common(): - return None - -- if not self.Env.has_key("rpm-dir"): -+ if not "rpm-dir" in self.Env.keys(): - return None -- if not self.Env.has_key("current-version"): -+ if not "current-version" in self.Env.keys(): - return None -- if not self.Env.has_key("previous-version"): -+ if not "previous-version" in self.Env.keys(): - return None - - return 1 -@@ -2305,7 +2310,7 @@ class BSC_AddResource(CTSTest): - if ":" in ip: - fields = ip.rpartition(":") - fields[2] = str(hex(int(fields[2], 16)+1)) -- print str(hex(int(f[2], 16)+1)) -+ print(str(hex(int(f[2], 16)+1))) - else: - fields = ip.rpartition('.') - fields[2] = str(int(fields[2])+1) -@@ -3109,7 +3114,7 @@ class RemoteStonithd(CTSTest): - if not self.driver.is_applicable(): - return False - -- if self.Env.has_key("DoFencing"): -+ if "DoFencing" in self.Env.keys(): - return self.Env["DoFencing"] - - return True -diff --git a/cts/OCFIPraTest.py b/cts/OCFIPraTest.py -index 9900a62..03d964b 100755 ---- a/cts/OCFIPraTest.py -+++ b/cts/OCFIPraTest.py -@@ -28,13 +28,13 @@ from cts.CTSvars import * - - - def usage(): -- print "usage: " + sys.argv[0] \ -+ print("usage: " + sys.argv[0] \ - + " [-2]"\ - + " [--ipbase|-i first-test-ip]"\ - + " [--ipnum|-n test-ip-num]"\ - + " [--help|-h]"\ - + " [--perform|-p op]"\ -- + " [number-of-iterations]" -+ + " [number-of-iterations]") - sys.exit(1) - - -@@ -71,7 +71,7 @@ def log(towrite): - t = time.strftime("%Y/%m/%d_%H:%M:%S\t", time.localtime(time.time())) - logstr = t + " "+str(towrite) - syslog.syslog(logstr) -- print logstr -+ print(logstr) - - if __name__ == '__main__': - ra = "IPaddr" -diff --git a/cts/cib_xml.py b/cts/cib_xml.py -index 0bd963b..3d8f8d4 100644 ---- a/cts/cib_xml.py -+++ b/cts/cib_xml.py -@@ -19,7 +19,7 @@ class XmlBase(CibBase): - text = '''<%s''' % self.tag - if self.name: - text += ''' id="%s"''' % (self.name) -- for k in self.kwargs.keys(): -+ for k in list(self.kwargs.keys()): - text += ''' %s="%s"''' % (k, self.kwargs[k]) - - if not self.children: -@@ -149,22 +149,22 @@ class Resource(XmlBase): - def constraints(self): - text = "" - -- for k in self.scores.keys(): -+ for k in list(self.scores.keys()): - text += '''''' % (k, self.name) - text += self.scores[k].show() - text += '''''' - -- for k in self.needs.keys(): -+ for k in list(self.needs.keys()): - text += '''''' - -- for k in self.coloc.keys(): -+ for k in list(self.coloc.keys()): - text += '''''' - -@@ -179,13 +179,13 @@ class Resource(XmlBase): - - if len(self.meta) > 0: - text += '''''' % self.name -- for p in self.meta.keys(): -+ for p in list(self.meta.keys()): - text += '''''' % (self.name, p, p, self.meta[p]) - text += '''''' - - if len(self.param) > 0: - text += '''''' % self.name -- for p in self.param.keys(): -+ for p in list(self.param.keys()): - text += '''''' % (self.name, p, p, self.param[p]) - text += '''''' - -@@ -219,7 +219,7 @@ class Group(Resource): - - if len(self.meta) > 0: - text += '''''' % self.name -- for p in self.meta.keys(): -+ for p in list(self.meta.keys()): - text += '''''' % (self.name, p, p, self.meta[p]) - text += '''''' - -diff --git a/cts/environment.py b/cts/environment.py -index 61d4211..4ed5ced 100644 ---- a/cts/environment.py -+++ b/cts/environment.py -@@ -92,7 +92,7 @@ class Environment: - - def dump(self): - keys = [] -- for key in self.data.keys(): -+ for key in list(self.data.keys()): - keys.append(key) - - keys.sort() -@@ -106,16 +106,19 @@ class Environment: - if key == "nodes": - return True - -- return self.data.has_key(key) -+ return key in self.data - - def __getitem__(self, key): -+ if str(key) == "0": -+ raise ValueError("Bad call to 'foo in X', should reference 'foo in X.keys()' instead") -+ - if key == "nodes": - return self.Nodes - - elif key == "Name": - return self.get_stack_short() - -- elif self.data.has_key(key): -+ elif key in self.data: - return self.data[key] - - else: -@@ -175,12 +178,12 @@ class Environment: - self.data["Stack"] = "corosync (plugin v0)" - - else: -- print "Unknown stack: "+name -+ raise ValueError("Unknown stack: "+name) - sys.exit(1) - - def get_stack_short(self): - # Create the Cluster Manager object -- if not self.data.has_key("Stack"): -+ if not "Stack" in self.data: - return "unknown" - - elif self.data["Stack"] == "heartbeat": -@@ -202,12 +205,12 @@ class Environment: - return "crm-plugin-v0" - - else: -- LogFactory().log("Unknown stack: "+self.data["stack"]) -- sys.exit(1) -+ LogFactory().log("Unknown stack: "+self["stack"]) -+ raise ValueError("Unknown stack: "+self["stack"]) - - def detect_syslog(self): - # Detect syslog variant -- if not self.has_key("syslogd"): -+ if not "syslogd" in self.data: - if self["have_systemd"]: - # Systemd - self["syslogd"] = self.rsh(self.target, "systemctl list-units | grep syslog.*\.service.*active.*running | sed 's:.service.*::'", stdout=1).strip() -@@ -215,13 +218,13 @@ class Environment: - # SYS-V - self["syslogd"] = self.rsh(self.target, "chkconfig --list | grep syslog.*on | awk '{print $1}' | head -n 1", stdout=1).strip() - -- if not self.has_key("syslogd") or not self["syslogd"]: -+ if not "syslogd" in self.data or not self["syslogd"]: - # default - self["syslogd"] = "rsyslog" - - def detect_at_boot(self): - # Detect if the cluster starts at boot -- if not self.has_key("at-boot"): -+ if not "at-boot" in self.data: - atboot = 0 - - if self["have_systemd"]: -@@ -237,7 +240,7 @@ class Environment: - - def detect_ip_offset(self): - # Try to determin an offset for IPaddr resources -- if self["CIBResource"] and not self.has_key("IPBase"): -+ if self["CIBResource"] and not "IPBase" in self.data: - network=self.rsh(self.target, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", stdout=1).strip() - self["IPBase"] = self.rsh(self.target, "nmap -sn -n %s | grep 'scan report' | awk '{print $NF}' | sed 's:(::' | sed 's:)::' | sort -V | tail -n 1" % network, stdout=1).strip() - if not self["IPBase"]: -@@ -261,7 +264,7 @@ class Environment: - - def validate(self): - if len(self["nodes"]) < 1: -- print "No nodes specified!" -+ print("No nodes specified!") - sys.exit(1) - - def discover(self): -@@ -276,7 +279,7 @@ class Environment: - break; - self["cts-master"] = master - -- if not self.has_key("have_systemd"): -+ if not "have_systemd" in self.data: - self["have_systemd"] = not self.rsh(self.target, "systemctl list-units") - - self.detect_syslog() -@@ -390,7 +393,7 @@ class Environment: - self["DoStonith"]=1 - self["stonith-type"] = "fence_openstack" - -- print "Obtaining OpenStack credentials from the current environment" -+ print("Obtaining OpenStack credentials from the current environment") - self["stonith-params"] = "region=%s,tenant=%s,auth=%s,user=%s,password=%s" % ( - os.environ['OS_REGION_NAME'], - os.environ['OS_TENANT_NAME'], -@@ -403,7 +406,7 @@ class Environment: - self["DoStonith"]=1 - self["stonith-type"] = "fence_rhevm" - -- print "Obtaining RHEV-M credentials from the current environment" -+ print("Obtaining RHEV-M credentials from the current environment") - self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % ( - os.environ['RHEVM_USERNAME'], - os.environ['RHEVM_PASSWORD'], -@@ -442,7 +445,7 @@ class Environment: - try: - float(args[i+1]) - except ValueError: -- print ("--xmit-loss parameter should be float") -+ print("--xmit-loss parameter should be float") - self.usage(args[i+1]) - skipthis=1 - self["XmitLoss"] = args[i+1] -@@ -451,7 +454,7 @@ class Environment: - try: - float(args[i+1]) - except ValueError: -- print ("--recv-loss parameter should be float") -+ print("--recv-loss parameter should be float") - self.usage(args[i+1]) - skipthis=1 - self["RecvLoss"] = args[i+1] -@@ -503,7 +506,7 @@ class Environment: - self["DoStonith"]=1 - self["stonith-type"] = "fence_rhevm" - -- print "Obtaining RHEV-M credentials from the current environment" -+ print("Obtaining RHEV-M credentials from the current environment") - self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % ( - os.environ['RHEVM_USERNAME'], - os.environ['RHEVM_PASSWORD'], -@@ -605,7 +608,7 @@ class Environment: - skipthis=1 - (name, value) = args[i+1].split('=') - self[name] = value -- print "Setting %s = %s" % (name, value) -+ print("Setting %s = %s" % (name, value)) - - elif args[i] == "--help": - self.usage(args[i], 0) -@@ -622,52 +625,52 @@ class Environment: - - def usage(self, arg, status=1): - if status: -- print "Illegal argument %s" % arg -- print "usage: " + sys.argv[0] +" [options] number-of-iterations" -- print "\nCommon options: " -- print "\t [--nodes 'node list'] list of cluster nodes separated by whitespace" -- print "\t [--group | -g 'name'] use the nodes listed in the named DSH group (~/.dsh/groups/$name)" -- print "\t [--limit-nodes max] only use the first 'max' cluster nodes supplied with --nodes" -- print "\t [--stack (v0|v1|cman|corosync|heartbeat|openais)] which cluster stack is installed" -- print "\t [--list-tests] list the valid tests" -- print "\t [--benchmark] add the timing information" -- print "\t " -- print "Options that CTS will usually auto-detect correctly: " -- print "\t [--logfile path] where should the test software look for logs from cluster nodes" -- print "\t [--syslog-facility name] which syslog facility should the test software log to" -- print "\t [--at-boot (1|0)] does the cluster software start at boot time" -- print "\t [--test-ip-base ip] offset for generated IP address resources" -- print "\t " -- print "Options for release testing: " -- print "\t [--populate-resources | -r] generate a sample configuration" -- print "\t [--choose name] run only the named test" -- print "\t [--stonith (1 | 0 | yes | no | rhcs | ssh)]" -- print "\t [--once] run all valid tests once" -- print "\t " -- print "Additional (less common) options: " -- print "\t [--clobber-cib | -c ] erase any existing configuration" -- print "\t [--outputfile path] optional location for the test software to write logs to" -- print "\t [--trunc] truncate logfile before starting" -- print "\t [--xmit-loss lost-rate(0.0-1.0)]" -- print "\t [--recv-loss lost-rate(0.0-1.0)]" -- print "\t [--standby (1 | 0 | yes | no)]" -- print "\t [--fencing (1 | 0 | yes | no | rhcs | lha | openstack )]" -- print "\t [--stonith-type type]" -- print "\t [--stonith-args name=value]" -- print "\t [--bsc]" -- print "\t [--no-loop-tests] dont run looping/time-based tests" -- print "\t [--no-unsafe-tests] dont run tests that are unsafe for use with ocfs2/drbd" -- print "\t [--valgrind-tests] include tests using valgrind" -- print "\t [--experimental-tests] include experimental tests" -- print "\t [--container-tests] include pacemaker_remote tests that run in lxc container resources" -- print "\t [--oprofile 'node list'] list of cluster nodes to run oprofile on]" -- print "\t [--qarsh] use the QARSH backdoor to access nodes instead of SSH" -- print "\t [--docker] Indicates nodes are docker nodes." -- print "\t [--seed random_seed]" -- print "\t [--set option=value]" -- print "\t " -- print "\t Example: " -- print "\t python sys.argv[0] -g virt1 --stack cs -r --stonith ssh --schema pacemaker-1.0 500" -+ print("Illegal argument %s" % arg) -+ print("usage: " + sys.argv[0] +" [options] number-of-iterations") -+ print("\nCommon options: ") -+ print("\t [--nodes 'node list'] list of cluster nodes separated by whitespace") -+ print("\t [--group | -g 'name'] use the nodes listed in the named DSH group (~/.dsh/groups/$name)") -+ print("\t [--limit-nodes max] only use the first 'max' cluster nodes supplied with --nodes") -+ print("\t [--stack (v0|v1|cman|corosync|heartbeat|openais)] which cluster stack is installed") -+ print("\t [--list-tests] list the valid tests") -+ print("\t [--benchmark] add the timing information") -+ print("\t ") -+ print("Options that CTS will usually auto-detect correctly: ") -+ print("\t [--logfile path] where should the test software look for logs from cluster nodes") -+ print("\t [--syslog-facility name] which syslog facility should the test software log to") -+ print("\t [--at-boot (1|0)] does the cluster software start at boot time") -+ print("\t [--test-ip-base ip] offset for generated IP address resources") -+ print("\t ") -+ print("Options for release testing: ") -+ print("\t [--populate-resources | -r] generate a sample configuration") -+ print("\t [--choose name] run only the named test") -+ print("\t [--stonith (1 | 0 | yes | no | rhcs | ssh)]") -+ print("\t [--once] run all valid tests once") -+ print("\t ") -+ print("Additional (less common) options: ") -+ print("\t [--clobber-cib | -c ] erase any existing configuration") -+ print("\t [--outputfile path] optional location for the test software to write logs to") -+ print("\t [--trunc] truncate logfile before starting") -+ print("\t [--xmit-loss lost-rate(0.0-1.0)]") -+ print("\t [--recv-loss lost-rate(0.0-1.0)]") -+ print("\t [--standby (1 | 0 | yes | no)]") -+ print("\t [--fencing (1 | 0 | yes | no | rhcs | lha | openstack )]") -+ print("\t [--stonith-type type]") -+ print("\t [--stonith-args name=value]") -+ print("\t [--bsc]") -+ print("\t [--no-loop-tests] dont run looping/time-based tests") -+ print("\t [--no-unsafe-tests] dont run tests that are unsafe for use with ocfs2/drbd") -+ print("\t [--valgrind-tests] include tests using valgrind") -+ print("\t [--experimental-tests] include experimental tests") -+ print("\t [--container-tests] include pacemaker_remote tests that run in lxc container resources") -+ print("\t [--oprofile 'node list'] list of cluster nodes to run oprofile on]") -+ print("\t [--qarsh] use the QARSH backdoor to access nodes instead of SSH") -+ print("\t [--docker] Indicates nodes are docker nodes.") -+ print("\t [--seed random_seed]") -+ print("\t [--set option=value]") -+ print("\t ") -+ print("\t Example: ") -+ print("\t python sys.argv[0] -g virt1 --stack cs -r --stonith ssh --schema pacemaker-1.0 500") - - sys.exit(status) - -diff --git a/cts/logging.py b/cts/logging.py -index 8afa611..08da44a 100644 ---- a/cts/logging.py -+++ b/cts/logging.py -@@ -22,7 +22,7 @@ Licensed under the GNU GPL. - # along with this program; if not, write to the Free Software - # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - --import types, string, sys, time, os -+import string, sys, time, os - - class Logger: - TimeFormat = "%b %d %H:%M:%S\t" -@@ -47,7 +47,7 @@ class StdErrLog(Logger): - - def __call__(self, lines): - t = time.strftime(Logger.TimeFormat, time.localtime(time.time())) -- if isinstance(lines, types.StringType): -+ if isinstance(lines, basestring): - sys.__stderr__.writelines([t, lines, "\n"]) - else: - for line in lines: -@@ -71,7 +71,7 @@ class FileLog(Logger): - fd = open(self.logfile, "a") - t = time.strftime(Logger.TimeFormat, time.localtime(time.time())) - -- if isinstance(lines, types.StringType): -+ if isinstance(lines, basestring): - fd.writelines([t, self.hostname, self.source, lines, "\n"]) - else: - for line in lines: -diff --git a/cts/patterns.py b/cts/patterns.py -index 493b690..3cdce2f 100644 ---- a/cts/patterns.py -+++ b/cts/patterns.py -@@ -67,9 +67,9 @@ class BasePatterns: - } - - def get_component(self, key): -- if self.components.has_key(key): -+ if key in self.components: - return self.components[key] -- print "Unknown component '%s' for %s" % (key, self.name) -+ print("Unknown component '%s' for %s" % (key, self.name)) - return [] - - def get_patterns(self, key): -@@ -87,12 +87,12 @@ class BasePatterns: - def __getitem__(self, key): - if key == "Name": - return self.name -- elif self.commands.has_key(key): -+ elif key in self.commands: - return self.commands[key] -- elif self.search.has_key(key): -+ elif key in self.search: - return self.search[key] - else: -- print "Unknown template '%s' for %s" % (key, self.name) -+ print("Unknown template '%s' for %s" % (key, self.name)) - return None - - class crm_lha(BasePatterns): -@@ -489,9 +489,9 @@ class PatternSelector: - crm_mcp_docker(name) - - def get_variant(self, variant): -- if patternvariants.has_key(variant): -+ if variant in patternvariants: - return patternvariants[variant] -- print "defaulting to crm-base for %s" % variant -+ print("defaulting to crm-base for %s" % variant) - return self.base - - def get_patterns(self, variant, kind): -@@ -532,7 +532,7 @@ if __name__ == '__main__': - template = args[i+1] - - else: -- print "Illegal argument " + args[i] -+ print("Illegal argument " + args[i]) - - -- print PatternSelector(kind)[template] -+ print(PatternSelector(kind)[template]) -diff --git a/cts/remote.py b/cts/remote.py -index b32b028..040b48a 100644 ---- a/cts/remote.py -+++ b/cts/remote.py -@@ -147,7 +147,7 @@ class RemoteExec: - sysname = args[0] - command = args[1] - -- #print "sysname: %s, us: %s" % (sysname, self.OurNode) -+ #print("sysname: %s, us: %s" % (sysname, self.OurNode)) - if sysname == None or string.lower(sysname) == self.OurNode or sysname == "localhost": - ret = command - else: -@@ -164,7 +164,7 @@ class RemoteExec: - self.logger.debug(args) - - def call_async(self, node, command, completionDelegate=None): -- #if completionDelegate: print "Waiting for %d on %s: %s" % (proc.pid, node, command) -+ #if completionDelegate: print("Waiting for %d on %s: %s" % (proc.pid, node, command)) - aproc = AsyncRemoteCmd(node, self._cmd([node, command]), completionDelegate=completionDelegate) - aproc.start() - return aproc -@@ -186,7 +186,7 @@ class RemoteExec: - proc = Popen(self._cmd([node, command]), - stdout = PIPE, stderr = PIPE, close_fds = True, shell = True) - -- #if completionDelegate: print "Waiting for %d on %s: %s" % (proc.pid, node, command) -+ #if completionDelegate: print("Waiting for %d on %s: %s" % (proc.pid, node, command)) - if not synchronous and proc.pid > 0 and not self.silent: - aproc = AsyncWaitProc(proc, node, command, completionDelegate=completionDelegate) - aproc.start() -@@ -257,14 +257,14 @@ class RemoteFactory: - return RemoteExec(RemoteFactory.rsh, silent) - - def enable_docker(self): -- print "Using DOCKER backend for connections to cluster nodes" -+ print("Using DOCKER backend for connections to cluster nodes") - - RemoteFactory.rsh.Command = "/usr/libexec/phd/docker/phd_docker_remote_cmd " - RemoteFactory.rsh.CpCommand = "/usr/libexec/phd/docker/phd_docker_cp" - - def enable_qarsh(self): - # http://nstraz.wordpress.com/2008/12/03/introducing-qarsh/ -- print "Using QARSH for connections to cluster nodes" -+ print("Using QARSH for connections to cluster nodes") - - RemoteFactory.rsh.Command = "qarsh -t 300 -l root" - RemoteFactory.rsh.CpCommand = "qacp -q" -diff --git a/cts/watcher.py b/cts/watcher.py -index 1182c8b..de032f7 100644 ---- a/cts/watcher.py -+++ b/cts/watcher.py -@@ -73,7 +73,7 @@ for i in range(0, len(args)): - skipthis=1 - - if not os.access(filename, os.R_OK): -- print prefix + 'Last read: %d, limit=%d, count=%d - unreadable' % (0, limit, 0) -+ print(prefix + 'Last read: %d, limit=%d, count=%d - unreadable' % (0, limit, 0)) - sys.exit(1) - - logfile=open(filename, 'r') -@@ -85,7 +85,7 @@ if offset != 'EOF': - if newsize >= offset: - logfile.seek(offset) - else: -- print prefix + ('File truncated from %d to %d' % (offset, newsize)) -+ print(prefix + ('File truncated from %d to %d' % (offset, newsize))) - if (newsize*1.05) < offset: - logfile.seek(0) - # else: we probably just lost a few logs after a fencing op -@@ -103,10 +103,10 @@ while True: - line = logfile.readline() - if not line: break - -- print line.strip() -+ print(line.strip()) - count += 1 - --print prefix + 'Last read: %d, limit=%d, count=%d' % (logfile.tell(), limit, count) -+print(prefix + 'Last read: %d, limit=%d, count=%d' % (logfile.tell(), limit, count)) - logfile.close() - """ - -@@ -158,7 +158,7 @@ class FileObj(SearchObj): - SearchObj.__init__(self, filename, host, name) - - if host is not None: -- if not has_log_watcher.has_key(host): -+ if not host in has_log_watcher: - - global log_watcher - global log_watcher_bin -@@ -381,7 +381,7 @@ class LogWatcher(RemoteExec): - else: - self.file_list.append(FileObj(self.filename)) - -- # print "%s now has %d files" % (self.name, len(self.file_list)) -+ # print("%s now has %d files" % (self.name, len(self.file_list))) - - def __del__(self): - if self.debug_level > 1: self.debug("Destroy") -@@ -406,7 +406,7 @@ class LogWatcher(RemoteExec): - raise ValueError("No sources to read from") - - pending = [] -- #print "%s waiting for %d operations" % (self.name, self.pending) -+ #print("%s waiting for %d operations" % (self.name, self.pending)) - for f in self.file_list: - t = f.harvest_async(self) - if t: -@@ -418,7 +418,7 @@ class LogWatcher(RemoteExec): - self.logger.log("%s: Aborting after 20s waiting for %s logging commands" % (self.name, repr(t))) - return - -- #print "Got %d lines" % len(self.line_cache) -+ #print("Got %d lines" % len(self.line_cache)) - - def end(self): - for f in self.file_list: -diff --git a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt -index 5d5fa33..b0115fb 100644 ---- a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt -+++ b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt -@@ -643,6 +643,16 @@ indexterm:[Action,Property,on-fail] - indexterm:[enabled,Action Property] - indexterm:[Action,Property,enabled] - -+|role -+| -+|This option only makes sense for recurring operations. It restricts -+ the operation to a specific role. The truely paranoid can even -+ specify +role=Stopped+ which allows the cluster to detect an admin -+ that manually started cluster services. -+ Allowed values: +Stopped+, +Started+, +Slave+, +Master+. -+ indexterm:[role,Action Property] -+ indexterm:[Action,Property,role] -+ - |========================================================= - - [[s-operation-defaults]] -diff --git a/fencing/commands.c b/fencing/commands.c -index 0d2d614..bd3b27d 100644 ---- a/fencing/commands.c -+++ b/fencing/commands.c -@@ -124,17 +124,7 @@ static xmlNode *stonith_construct_async_reply(async_command_t * cmd, const char - static gboolean - is_action_required(const char *action, stonith_device_t *device) - { -- if(device == NULL) { -- return FALSE; -- -- } else if (device->required_actions == NULL) { -- return FALSE; -- -- } else if (strstr(device->required_actions, action)) { -- return TRUE; -- } -- -- return FALSE; -+ return device && device->automatic_unfencing && safe_str_eq(action, "on"); - } - - static int -@@ -449,7 +439,6 @@ free_device(gpointer data) - free_xml(device->agent_metadata); - free(device->namespace); - free(device->on_target_actions); -- free(device->required_actions); - free(device->agent); - free(device->id); - free(device); -@@ -713,8 +702,6 @@ read_action_metadata(stonith_device_t *device) - for (lpc = 0; lpc < max; lpc++) { - const char *on_target = NULL; - const char *action = NULL; -- const char *automatic = NULL; -- const char *required = NULL; - xmlNode *match = getXpathResult(xpath, lpc); - - CRM_LOG_ASSERT(match != NULL); -@@ -722,8 +709,6 @@ read_action_metadata(stonith_device_t *device) - - on_target = crm_element_value(match, "on_target"); - action = crm_element_value(match, "name"); -- automatic = crm_element_value(match, "automatic"); -- required = crm_element_value(match, "required"); - - if(safe_str_eq(action, "list")) { - set_bit(device->flags, st_device_supports_list); -@@ -731,17 +716,21 @@ read_action_metadata(stonith_device_t *device) - set_bit(device->flags, st_device_supports_status); - } else if(safe_str_eq(action, "reboot")) { - set_bit(device->flags, st_device_supports_reboot); -- } else if(safe_str_eq(action, "on") && (crm_is_true(automatic))) { -- /* this setting implies required=true for unfencing */ -- required = "true"; -+ } else if (safe_str_eq(action, "on")) { -+ /* "automatic" means the cluster will unfence node when it joins */ -+ const char *automatic = crm_element_value(match, "automatic"); -+ -+ /* "required" is a deprecated synonym for "automatic" */ -+ const char *required = crm_element_value(match, "required"); -+ -+ if (crm_is_true(automatic) || crm_is_true(required)) { -+ device->automatic_unfencing = TRUE; -+ } - } - - if (action && crm_is_true(on_target)) { - device->on_target_actions = add_action(device->on_target_actions, action); - } -- if (action && crm_is_true(required)) { -- device->required_actions = add_action(device->required_actions, action); -- } - } - - freeXpathObject(xpath); -@@ -778,8 +767,7 @@ build_device_from_xml(xmlNode * msg) - - value = crm_element_value(dev, "rsc_provides"); - if (safe_str_eq(value, "unfencing")) { -- /* if this agent requires unfencing, 'on' is considered a required action */ -- device->required_actions = add_action(device->required_actions, "on"); -+ device->automatic_unfencing = TRUE; - } - - if (is_action_required("on", device)) { -@@ -1224,7 +1212,6 @@ stonith_device_action(xmlNode * msg, char **output) - } else if (device) { - cmd = create_async_command(msg); - if (cmd == NULL) { -- free_device(device); - return -EPROTO; - } - -diff --git a/fencing/internal.h b/fencing/internal.h -index 5fb8f9c..0f418ec 100644 ---- a/fencing/internal.h -+++ b/fencing/internal.h -@@ -26,12 +26,13 @@ typedef struct stonith_device_s { - - /*! list of actions that must execute on the target node. Used for unfencing */ - char *on_target_actions; -- char *required_actions; - GListPtr targets; - time_t targets_age; - gboolean has_attr_map; - /* should nodeid parameter for victim be included in agent arguments */ - gboolean include_nodeid; -+ /* whether the cluster should automatically unfence nodes with the device */ -+ gboolean automatic_unfencing; - guint priority; - guint active_pid; - -@@ -59,7 +60,8 @@ typedef struct stonith_device_s { - enum st_remap_phase { - st_phase_requested = 0, - st_phase_off = 1, -- st_phase_on = 2 -+ st_phase_on = 2, -+ st_phase_max = 3 - }; - - typedef struct remote_fencing_op_s { -@@ -128,15 +130,9 @@ typedef struct remote_fencing_op_s { - /*! The current operation phase being executed */ - enum st_remap_phase phase; - -- /* For phase 0 or 1 (requested action or a remapped "off"), required devices -- * will be executed regardless of what topology level is being executed -- * currently. For phase 1 (remapped "on"), required devices will not be -- * attempted, because the cluster will execute them automatically when the -- * node next joins the cluster. -- */ -- /*! Lists of devices marked as required for each phase */ -- GListPtr required_list[3]; -- /*! The device list of all the devices at the current executing topology level. */ -+ /*! Devices with automatic unfencing (always run if "on" requested, never if remapped) */ -+ GListPtr automatic_list; -+ /*! List of all devices at the currently executing topology level */ - GListPtr devices_list; - /*! Current entry in the topology device list */ - GListPtr devices; -diff --git a/fencing/main.c b/fencing/main.c -index 46d7352..c48e12d 100644 ---- a/fencing/main.c -+++ b/fencing/main.c -@@ -553,7 +553,7 @@ remove_fencing_topology(xmlXPathObjectPtr xpathObj) - } - - static void --register_fencing_topology(xmlXPathObjectPtr xpathObj, gboolean force) -+register_fencing_topology(xmlXPathObjectPtr xpathObj) - { - int max = numXpathResults(xpathObj), lpc = 0; - -@@ -584,7 +584,7 @@ register_fencing_topology(xmlXPathObjectPtr xpathObj, gboolean force) - */ - - static void --fencing_topology_init(xmlNode * msg) -+fencing_topology_init() - { - xmlXPathObjectPtr xpathObj = NULL; - const char *xpath = "//" XML_TAG_FENCING_LEVEL; -@@ -598,7 +598,7 @@ fencing_topology_init(xmlNode * msg) - - /* Grab everything */ - xpathObj = xpath_search(local_cib, xpath); -- register_fencing_topology(xpathObj, TRUE); -+ register_fencing_topology(xpathObj); - - freeXpathObject(xpathObj); - } -@@ -931,7 +931,7 @@ update_fencing_topology(const char *event, xmlNode * msg) - xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_FENCING_LEVEL; - xpathObj = xpath_search(msg, xpath); - -- register_fencing_topology(xpathObj, FALSE); -+ register_fencing_topology(xpathObj); - freeXpathObject(xpathObj); - - } else if(format == 2) { -@@ -969,7 +969,7 @@ update_fencing_topology(const char *event, xmlNode * msg) - /* Nuclear option, all we have is the path and an id... not enough to remove a specific entry */ - crm_info("Re-initializing fencing topology after %s operation %d.%d.%d for %s", - op, add[0], add[1], add[2], xpath); -- fencing_topology_init(NULL); -+ fencing_topology_init(); - return; - } - -@@ -977,7 +977,7 @@ update_fencing_topology(const char *event, xmlNode * msg) - /* Change to the topology in general */ - crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s", - op, add[0], add[1], add[2], xpath); -- fencing_topology_init(NULL); -+ fencing_topology_init(); - return; - - } else if (strstr(xpath, "/" XML_CIB_TAG_CONFIGURATION)) { -@@ -989,7 +989,7 @@ update_fencing_topology(const char *event, xmlNode * msg) - } else if(strcmp(op, "delete") == 0 || strcmp(op, "create") == 0) { - crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s.", - op, add[0], add[1], add[2], xpath); -- fencing_topology_init(NULL); -+ fencing_topology_init(); - return; - } - -@@ -1098,7 +1098,7 @@ update_cib_cache_cb(const char *event, xmlNode * msg) - } else if (stonith_enabled_saved == FALSE) { - crm_info("Updating stonith device and topology lists now that stonith is enabled"); - stonith_enabled_saved = TRUE; -- fencing_topology_init(NULL); -+ fencing_topology_init(); - cib_devices_update(); - - } else { -@@ -1114,7 +1114,7 @@ init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *us - have_cib_devices = TRUE; - local_cib = copy_xml(output); - -- fencing_topology_init(msg); -+ fencing_topology_init(); - cib_devices_update(); - } - -@@ -1239,7 +1239,7 @@ st_peer_update_callback(enum crm_status_type type, crm_node_t * node, const void - * This is a hack until we can send to a nodeid and/or we fix node name lookups - * These messages are ignored in stonith_peer_callback() - */ -- xmlNode *query = query = create_xml_node(NULL, "stonith_command"); -+ xmlNode *query = create_xml_node(NULL, "stonith_command"); - - crm_xml_add(query, F_XML_TAGNAME, "stonith_command"); - crm_xml_add(query, F_TYPE, T_STONITH_NG); -diff --git a/fencing/remote.c b/fencing/remote.c -index 2c00b5f..d741672 100644 ---- a/fencing/remote.c -+++ b/fencing/remote.c -@@ -60,13 +60,13 @@ typedef struct device_properties_s { - /* The remaining members are indexed by the operation's "phase" */ - - /* Whether this device has been executed in each phase */ -- gboolean executed[3]; -+ gboolean executed[st_phase_max]; - /* Whether this device is disallowed from executing in each phase */ -- gboolean disallowed[3]; -+ gboolean disallowed[st_phase_max]; - /* Action-specific timeout for each phase */ -- int custom_action_timeout[3]; -+ int custom_action_timeout[st_phase_max]; - /* Action-specific maximum random delay for each phase */ -- int delay_max[3]; -+ int delay_max[st_phase_max]; - } device_properties_t; - - typedef struct st_query_result_s { -@@ -207,22 +207,6 @@ grab_peer_device(const remote_fencing_op_t *op, st_query_result_t *peer, - return TRUE; - } - --/* -- * \internal -- * \brief Free the list of required devices for a particular phase -- * -- * \param[in,out] op Operation to modify -- * \param[in] phase Phase to modify -- */ --static void --free_required_list(remote_fencing_op_t *op, enum st_remap_phase phase) --{ -- if (op->required_list[phase]) { -- g_list_free_full(op->required_list[phase], free); -- op->required_list[phase] = NULL; -- } --} -- - static void - clear_remote_op_timers(remote_fencing_op_t * op) - { -@@ -268,9 +252,7 @@ free_remote_op(gpointer data) - g_list_free_full(op->devices_list, free); - op->devices_list = NULL; - } -- free_required_list(op, st_phase_requested); -- free_required_list(op, st_phase_off); -- free_required_list(op, st_phase_on); -+ g_list_free_full(op->automatic_list, free); - free(op); - } - -@@ -323,10 +305,10 @@ op_phase_on(remote_fencing_op_t *op) - op->phase = st_phase_on; - strcpy(op->action, "on"); - -- /* Any devices that are required for "on" will be automatically executed by -- * the cluster when the node next joins, so we skip them here. -+ /* Skip devices with automatic unfencing, because the cluster will handle it -+ * when the node rejoins. - */ -- for (iter = op->required_list[op->phase]; iter != NULL; iter = iter->next) { -+ for (iter = op->automatic_list; iter != NULL; iter = iter->next) { - GListPtr match = g_list_find_custom(op->devices_list, iter->data, - sort_strings); - -@@ -334,12 +316,8 @@ op_phase_on(remote_fencing_op_t *op) - op->devices_list = g_list_remove(op->devices_list, match->data); - } - } -- -- /* We know this level will succeed, because phase 1 completed successfully -- * and we ignore any errors from phase 2. So we can free the required list, -- * which will keep them from being executed after the device list is done. -- */ -- free_required_list(op, op->phase); -+ g_list_free_full(op->automatic_list, free); -+ op->automatic_list = NULL; - - /* Rewind device list pointer */ - op->devices = op->devices_list; -@@ -659,28 +637,25 @@ topology_is_empty(stonith_topology_t *tp) - - /* - * \internal -- * \brief Add a device to the required list for a particular phase -+ * \brief Add a device to an operation's automatic unfencing list - * - * \param[in,out] op Operation to modify -- * \param[in] phase Phase to modify - * \param[in] device Device ID to add - */ - static void --add_required_device(remote_fencing_op_t *op, enum st_remap_phase phase, -- const char *device) -+add_required_device(remote_fencing_op_t *op, const char *device) - { -- GListPtr match = g_list_find_custom(op->required_list[phase], device, -+ GListPtr match = g_list_find_custom(op->automatic_list, device, - sort_strings); - - if (!match) { -- op->required_list[phase] = g_list_prepend(op->required_list[phase], -- strdup(device)); -+ op->automatic_list = g_list_prepend(op->automatic_list, strdup(device)); - } - } - - /* - * \internal -- * \brief Remove a device from the required list for the current phase -+ * \brief Remove a device from the automatic unfencing list - * - * \param[in,out] op Operation to modify - * \param[in] device Device ID to remove -@@ -688,12 +663,11 @@ add_required_device(remote_fencing_op_t *op, enum st_remap_phase phase, - static void - remove_required_device(remote_fencing_op_t *op, const char *device) - { -- GListPtr match = g_list_find_custom(op->required_list[op->phase], device, -+ GListPtr match = g_list_find_custom(op->automatic_list, device, - sort_strings); - - if (match) { -- op->required_list[op->phase] = g_list_remove(op->required_list[op->phase], -- match->data); -+ op->automatic_list = g_list_remove(op->automatic_list, match->data); - } - } - -@@ -938,7 +912,7 @@ create_remote_stonith_op(const char *client, xmlNode * request, gboolean peer) - - op = calloc(1, sizeof(remote_fencing_op_t)); - -- crm_element_value_int(request, F_STONITH_TIMEOUT, (int *)&(op->base_timeout)); -+ crm_element_value_int(request, F_STONITH_TIMEOUT, &(op->base_timeout)); - - if (peer && dev) { - op->id = crm_element_value_copy(dev, F_STONITH_REMOTE_OP_ID); -@@ -974,7 +948,7 @@ create_remote_stonith_op(const char *client, xmlNode * request, gboolean peer) - crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options); - op->call_options = call_options; - -- crm_element_value_int(request, F_STONITH_CALLID, (int *)&(op->client_callid)); -+ crm_element_value_int(request, F_STONITH_CALLID, &(op->client_callid)); - - crm_trace("%s new stonith op: %s - %s of %s for %s", - (peer -@@ -1352,14 +1326,17 @@ advance_op_topology(remote_fencing_op_t *op, const char *device, xmlNode *msg, - op->devices = op->devices->next; - } - -- /* If this device was required, it's not anymore */ -- remove_required_device(op, device); -+ /* Handle automatic unfencing if an "on" action was requested */ -+ if ((op->phase == st_phase_requested) && safe_str_eq(op->action, "on")) { -+ /* If the device we just executed was required, it's not anymore */ -+ remove_required_device(op, device); - -- /* If there are no more devices at this topology level, -- * run through any required devices not already executed -- */ -- if (op->devices == NULL) { -- op->devices = op->required_list[op->phase]; -+ /* If there are no more devices at this topology level, run through any -+ * remaining devices with automatic unfencing -+ */ -+ if (op->devices == NULL) { -+ op->devices = op->automatic_list; -+ } - } - - if ((op->devices == NULL) && (op->phase == st_phase_off)) { -@@ -1613,8 +1590,6 @@ parse_action_specific(xmlNode *xml, const char *peer, const char *device, - const char *action, remote_fencing_op_t *op, - enum st_remap_phase phase, device_properties_t *props) - { -- int required; -- - props->custom_action_timeout[phase] = 0; - crm_element_value_int(xml, F_STONITH_ACTION_TIMEOUT, - &props->custom_action_timeout[phase]); -@@ -1630,20 +1605,16 @@ parse_action_specific(xmlNode *xml, const char *peer, const char *device, - peer, device, props->delay_max[phase], action); - } - -- required = 0; -- crm_element_value_int(xml, F_STONITH_DEVICE_REQUIRED, &required); -- if (required) { -- /* If the action is marked as required, add the device to the -- * operation's list of required devices for this phase. We use this -- * for unfencing when executing a topology. In phase 0 (requested -- * action) or phase 1 (remapped "off"), required devices get executed -- * regardless of their topology level; in phase 2 (remapped "on"), -- * required devices are not attempted, because the cluster will -- * execute them automatically later. -- */ -- crm_trace("Peer %s requires device %s to execute for action %s", -- peer, device, action); -- add_required_device(op, phase, device); -+ /* Handle devices with automatic unfencing */ -+ if (safe_str_eq(action, "on")) { -+ int required = 0; -+ -+ crm_element_value_int(xml, F_STONITH_DEVICE_REQUIRED, &required); -+ if (required) { -+ crm_trace("Peer %s requires device %s to execute for action %s", -+ peer, device, action); -+ add_required_device(op, device); -+ } - } - - /* If a reboot is remapped to off+on, it's possible that a node is allowed -diff --git a/include/crm/cib.h b/include/crm/cib.h -index cb465bf..306706e 100644 ---- a/include/crm/cib.h -+++ b/include/crm/cib.h -@@ -136,6 +136,13 @@ typedef struct cib_api_operations_s { - void *user_data, const char *callback_name, - void (*callback) (xmlNode *, int, int, xmlNode *, void *)); - -+ gboolean (*register_callback_full)(cib_t *cib, int call_id, int timeout, -+ gboolean only_success, void *user_data, -+ const char *callback_name, -+ void (*callback)(xmlNode *, int, int, -+ xmlNode *, void *), -+ void (*free_func)(void *)); -+ - } cib_api_operations_t; - - struct cib_s { -diff --git a/include/crm/cib/internal.h b/include/crm/cib/internal.h -index 431a2bd..adc2faf 100644 ---- a/include/crm/cib/internal.h -+++ b/include/crm/cib/internal.h -@@ -106,7 +106,7 @@ typedef struct cib_callback_client_s { - void *user_data; - gboolean only_success; - struct timer_rec_s *timer; -- -+ void (*free_func)(void *); - } cib_callback_client_t; - - struct timer_rec_s { -@@ -137,6 +137,13 @@ int cib_native_register_notification(cib_t * cib, const char *callback, int enab - gboolean cib_client_register_callback(cib_t * cib, int call_id, int timeout, gboolean only_success, - void *user_data, const char *callback_name, - void (*callback) (xmlNode *, int, int, xmlNode *, void *)); -+gboolean cib_client_register_callback_full(cib_t *cib, int call_id, -+ int timeout, gboolean only_success, -+ void *user_data, -+ const char *callback_name, -+ void (*callback)(xmlNode *, int, int, -+ xmlNode *, void *), -+ void (*free_func)(void *)); - - int cib_process_query(const char *op, int options, const char *section, xmlNode * req, - xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib, -diff --git a/include/crm/common/ipc.h b/include/crm/common/ipc.h -index db83b09..d6ceda2 100644 ---- a/include/crm/common/ipc.h -+++ b/include/crm/common/ipc.h -@@ -75,7 +75,7 @@ long crm_ipc_read(crm_ipc_t * client); - const char *crm_ipc_buffer(crm_ipc_t * client); - uint32_t crm_ipc_buffer_flags(crm_ipc_t * client); - const char *crm_ipc_name(crm_ipc_t * client); --int crm_ipc_default_buffer_size(void); -+unsigned int crm_ipc_default_buffer_size(void); - - /* Utils */ - xmlNode *create_hello_message(const char *uuid, const char *client_name, -diff --git a/include/crm/common/ipcs.h b/include/crm/common/ipcs.h -index b43fc53..d825912 100644 ---- a/include/crm/common/ipcs.h -+++ b/include/crm/common/ipcs.h -@@ -110,7 +110,7 @@ void crm_ipcs_send_ack(crm_client_t * c, uint32_t request, uint32_t flags, - const char *tag, const char *function, int line); - - /* when max_send_size is 0, default ipc buffer size is used */ --ssize_t crm_ipc_prepare(uint32_t request, xmlNode * message, struct iovec **result, int32_t max_send_size); -+ssize_t crm_ipc_prepare(uint32_t request, xmlNode * message, struct iovec ** result, uint32_t max_send_size); - ssize_t crm_ipcs_send(crm_client_t * c, uint32_t request, xmlNode * message, enum crm_ipc_flags flags); - ssize_t crm_ipcs_sendv(crm_client_t * c, struct iovec *iov, enum crm_ipc_flags flags); - xmlNode *crm_ipcs_recv(crm_client_t * c, void *data, size_t size, uint32_t * id, uint32_t * flags); -diff --git a/lib/cib/cib_client.c b/lib/cib/cib_client.c -index b13323e..f7a19b8 100644 ---- a/lib/cib/cib_client.c -+++ b/lib/cib/cib_client.c -@@ -198,6 +198,11 @@ cib_destroy_op_callback(gpointer data) - g_source_remove(blob->timer->ref); - } - free(blob->timer); -+ -+ if (blob->user_data && blob->free_func) { -+ blob->free_func(blob->user_data); -+ } -+ - free(blob); - } - -@@ -327,10 +332,15 @@ cib_new(void) - return cib_native_new(); - } - --/* this is backwards... -- cib_*_new should call this not the other way around -+/* -+ * \internal -+ * \brief Create a generic CIB connection instance -+ * -+ * \return Newly allocated and initialized cib_t instance -+ * -+ * \note This is called by each variant's cib_*_new() function before setting -+ * variant-specific values. - */ -- - cib_t * - cib_new_variant(void) - { -@@ -364,6 +374,7 @@ cib_new_variant(void) - new_cib->cmds->add_notify_callback = cib_client_add_notify_callback; - new_cib->cmds->del_notify_callback = cib_client_del_notify_callback; - new_cib->cmds->register_callback = cib_client_register_callback; -+ new_cib->cmds->register_callback_full = cib_client_register_callback_full; - - new_cib->cmds->noop = cib_client_noop; - new_cib->cmds->ping = cib_client_ping; -@@ -545,6 +556,19 @@ cib_client_register_callback(cib_t * cib, int call_id, int timeout, gboolean onl - void *user_data, const char *callback_name, - void (*callback) (xmlNode *, int, int, xmlNode *, void *)) - { -+ return cib_client_register_callback_full(cib, call_id, timeout, -+ only_success, user_data, -+ callback_name, callback, NULL); -+} -+ -+gboolean -+cib_client_register_callback_full(cib_t *cib, int call_id, int timeout, -+ gboolean only_success, void *user_data, -+ const char *callback_name, -+ void (*callback)(xmlNode *, int, int, -+ xmlNode *, void *), -+ void (*free_func)(void *)) -+{ - cib_callback_client_t *blob = NULL; - - if (call_id < 0) { -@@ -553,6 +577,9 @@ cib_client_register_callback(cib_t * cib, int call_id, int timeout, gboolean onl - } else { - crm_warn("CIB call failed: %s", pcmk_strerror(call_id)); - } -+ if (user_data && free_func) { -+ free_func(user_data); -+ } - return FALSE; - } - -@@ -561,6 +588,7 @@ cib_client_register_callback(cib_t * cib, int call_id, int timeout, gboolean onl - blob->only_success = only_success; - blob->user_data = user_data; - blob->callback = callback; -+ blob->free_func = free_func; - - if (timeout > 0) { - struct timer_rec_s *async_timer = NULL; -diff --git a/lib/cib/cib_utils.c b/lib/cib/cib_utils.c -index d321517..4dc65aa 100644 ---- a/lib/cib/cib_utils.c -+++ b/lib/cib/cib_utils.c -@@ -624,12 +624,6 @@ cib_native_callback(cib_t * cib, xmlNode * msg, int call_id, int rc) - { - xmlNode *output = NULL; - cib_callback_client_t *blob = NULL; -- cib_callback_client_t local_blob; -- -- local_blob.id = NULL; -- local_blob.callback = NULL; -- local_blob.user_data = NULL; -- local_blob.only_success = FALSE; - - if (msg != NULL) { - crm_element_value_int(msg, F_CIB_RC, &rc); -@@ -638,16 +632,8 @@ cib_native_callback(cib_t * cib, xmlNode * msg, int call_id, int rc) - } - - blob = g_hash_table_lookup(cib_op_callback_table, GINT_TO_POINTER(call_id)); -- -- if (blob != NULL) { -- local_blob = *blob; -- blob = NULL; -- -- remove_cib_op_callback(call_id, FALSE); -- -- } else { -+ if (blob == NULL) { - crm_trace("No callback found for call %d", call_id); -- local_blob.callback = NULL; - } - - if (cib == NULL) { -@@ -659,15 +645,20 @@ cib_native_callback(cib_t * cib, xmlNode * msg, int call_id, int rc) - rc = pcmk_ok; - } - -- if (local_blob.callback != NULL && (rc == pcmk_ok || local_blob.only_success == FALSE)) { -- crm_trace("Invoking callback %s for call %d", crm_str(local_blob.id), call_id); -- local_blob.callback(msg, call_id, rc, output, local_blob.user_data); -+ if (blob && blob->callback && (rc == pcmk_ok || blob->only_success == FALSE)) { -+ crm_trace("Invoking callback %s for call %d", crm_str(blob->id), call_id); -+ blob->callback(msg, call_id, rc, output, blob->user_data); - - } else if (cib && cib->op_callback == NULL && rc != pcmk_ok) { - crm_warn("CIB command failed: %s", pcmk_strerror(rc)); - crm_log_xml_debug(msg, "Failed CIB Update"); - } - -+ /* This may free user_data, so do it after the callback */ -+ if (blob) { -+ remove_cib_op_callback(call_id, FALSE); -+ } -+ - if (cib && cib->op_callback != NULL) { - crm_trace("Invoking global callback for call %d", call_id); - cib->op_callback(msg, call_id, rc, output); -diff --git a/lib/cluster/legacy.c b/lib/cluster/legacy.c -index d93613d..e9905f6 100644 ---- a/lib/cluster/legacy.c -+++ b/lib/cluster/legacy.c -@@ -52,6 +52,21 @@ void *ais_ipc_ctx = NULL; - - hdb_handle_t ais_ipc_handle = 0; - -+static bool valid_cman_name(const char *name, uint32_t nodeid) -+{ -+ bool rc = TRUE; -+ -+ /* Yes, %d, because that's what CMAN does */ -+ char *fakename = crm_strdup_printf("Node%d", nodeid); -+ -+ if(crm_str_eq(fakename, name, TRUE)) { -+ rc = FALSE; -+ crm_notice("Ignoring inferred name from cman: %s", fakename); -+ } -+ free(fakename); -+ return rc; -+} -+ - static gboolean - plugin_get_details(uint32_t * id, char **uname) - { -@@ -361,6 +376,7 @@ cman_event_callback(cman_handle_t handle, void *privdata, int reason, int arg) - arg ? "retained" : "still lost"); - } - -+ memset(cman_nodes, 0, MAX_NODES * sizeof(cman_node_t)); - rc = cman_get_nodes(pcmk_cman_handle, MAX_NODES, &node_count, cman_nodes); - if (rc < 0) { - crm_err("Couldn't query cman node list: %d %d", rc, errno); -@@ -369,6 +385,7 @@ cman_event_callback(cman_handle_t handle, void *privdata, int reason, int arg) - - for (lpc = 0; lpc < node_count; lpc++) { - crm_node_t *peer = NULL; -+ const char *name = NULL; - - if (cman_nodes[lpc].cn_nodeid == 0) { - /* Never allow node ID 0 to be considered a member #315711 */ -@@ -376,7 +393,11 @@ cman_event_callback(cman_handle_t handle, void *privdata, int reason, int arg) - continue; - } - -- peer = crm_get_peer(cman_nodes[lpc].cn_nodeid, cman_nodes[lpc].cn_name); -+ if(valid_cman_name(cman_nodes[lpc].cn_name, cman_nodes[lpc].cn_nodeid)) { -+ name = cman_nodes[lpc].cn_name; -+ } -+ -+ peer = crm_get_peer(cman_nodes[lpc].cn_nodeid, name); - if(cman_nodes[lpc].cn_member) { - crm_update_peer_state(__FUNCTION__, peer, CRM_NODE_MEMBER, crm_peer_seq); - -@@ -631,15 +652,17 @@ cman_node_name(uint32_t nodeid) - - cman = cman_init(NULL); - if (cman != NULL && cman_is_active(cman)) { -- us.cn_name[0] = 0; -+ -+ memset(&us, 0, sizeof(cman_node_t)); - cman_get_node(cman, nodeid, &us); -- name = strdup(us.cn_name); -- crm_info("Using CMAN node name %s for %u", name, nodeid); -- } -+ if(valid_cman_name(us.cn_name, nodeid)) { -+ name = strdup(us.cn_name); -+ crm_info("Using CMAN node name %s for %u", name, nodeid); -+ } -+ } - - cman_finish(cman); - # endif -- - if (name == NULL) { - crm_debug("Unable to get node name for nodeid %u", nodeid); - } -@@ -667,7 +690,6 @@ init_cs_connection_once(crm_cluster_t * cluster) - if (cluster_connect_cpg(cluster) == FALSE) { - return FALSE; - } -- cluster->uname = cman_node_name(0 /* CMAN_NODEID_US */ ); - break; - case pcmk_cluster_heartbeat: - crm_info("Could not find an active corosync based cluster"); -diff --git a/lib/common/ipc.c b/lib/common/ipc.c -index d71c54a..f4188ed 100644 ---- a/lib/common/ipc.c -+++ b/lib/common/ipc.c -@@ -46,8 +46,8 @@ struct crm_ipc_response_header { - }; - - static int hdr_offset = 0; --static int ipc_buffer_max = 0; --static unsigned int pick_ipc_buffer(int max); -+static unsigned int ipc_buffer_max = 0; -+static unsigned int pick_ipc_buffer(unsigned int max); - - static inline void - crm_ipc_init(void) -@@ -60,7 +60,7 @@ crm_ipc_init(void) - } - } - --int -+unsigned int - crm_ipc_default_buffer_size(void) - { - return pick_ipc_buffer(0); -@@ -91,7 +91,7 @@ generateReference(const char *custom1, const char *custom2) - since_epoch = calloc(1, reference_len); - - if (since_epoch != NULL) { -- sprintf(since_epoch, "%s-%s-%ld-%u", -+ sprintf(since_epoch, "%s-%s-%lu-%u", - local_cust1, local_cust2, (unsigned long)time(NULL), ref_counter++); - } - -@@ -431,7 +431,7 @@ crm_ipcs_recv(crm_client_t * c, void *data, size_t size, uint32_t * id, uint32_t - unsigned int size_u = 1 + header->size_uncompressed; - uncompressed = calloc(1, size_u); - -- crm_trace("Decompressing message data %d bytes into %d bytes", -+ crm_trace("Decompressing message data %u bytes into %u bytes", - header->size_compressed, size_u); - - rc = BZ2_bzBuffToBuffDecompress(uncompressed, &size_u, text, header->size_compressed, 1, 0); -@@ -531,9 +531,9 @@ crm_ipcs_flush_events(crm_client_t * c) - } - - ssize_t --crm_ipc_prepare(uint32_t request, xmlNode * message, struct iovec ** result, int32_t max_send_size) -+crm_ipc_prepare(uint32_t request, xmlNode * message, struct iovec ** result, uint32_t max_send_size) - { -- static int biggest = 0; -+ static unsigned int biggest = 0; - struct iovec *iov; - unsigned int total = 0; - char *compressed = NULL; -@@ -579,20 +579,18 @@ crm_ipc_prepare(uint32_t request, xmlNode * message, struct iovec ** result, int - - free(buffer); - -- if (header->size_compressed > biggest) { -- biggest = 2 * QB_MAX(header->size_compressed, biggest); -- } -+ biggest = QB_MAX(header->size_compressed, biggest); - - } else { - ssize_t rc = -EMSGSIZE; - - crm_log_xml_trace(message, "EMSGSIZE"); -- biggest = 2 * QB_MAX(header->size_uncompressed, biggest); -+ biggest = QB_MAX(header->size_uncompressed, biggest); - - crm_err -- ("Could not compress the message into less than the configured ipc limit (%d bytes)." -- "Set PCMK_ipc_buffer to a higher value (%d bytes suggested)", max_send_size, -- biggest); -+ ("Could not compress the message (%u bytes) into less than the configured ipc limit (%u bytes). " -+ "Set PCMK_ipc_buffer to a higher value (%u bytes suggested)", -+ header->size_uncompressed, max_send_size, 4 * biggest); - - free(compressed); - free(buffer); -@@ -656,7 +654,7 @@ crm_ipcs_sendv(crm_client_t * c, struct iovec * iov, enum crm_ipc_flags flags) - - rc = qb_ipcs_response_sendv(c->ipcs, iov, 2); - if (rc < header->qb.size) { -- crm_notice("Response %d to %p[%d] (%d bytes) failed: %s (%d)", -+ crm_notice("Response %d to %p[%d] (%u bytes) failed: %s (%d)", - header->qb.id, c->ipcs, c->pid, header->qb.size, pcmk_strerror(rc), rc); - - } else { -@@ -747,9 +745,9 @@ struct crm_ipc_s { - }; - - static unsigned int --pick_ipc_buffer(int max) -+pick_ipc_buffer(unsigned int max) - { -- static int global_max = 0; -+ static unsigned int global_max = 0; - - if(global_max == 0) { - const char *env = getenv("PCMK_ipc_buffer"); -@@ -925,7 +923,7 @@ crm_ipc_decompress(crm_ipc_t * client) - unsigned int new_buf_size = QB_MAX((hdr_offset + size_u), client->max_buf_size); - char *uncompressed = calloc(1, new_buf_size); - -- crm_trace("Decompressing message data %d bytes into %d bytes", -+ crm_trace("Decompressing message data %u bytes into %u bytes", - header->size_compressed, size_u); - - rc = BZ2_bzBuffToBuffDecompress(uncompressed + hdr_offset, &size_u, -@@ -986,7 +984,7 @@ crm_ipc_read(crm_ipc_t * client) - return -EBADMSG; - } - -- crm_trace("Received %s event %d, size=%d, rc=%d, text: %.100s", -+ crm_trace("Received %s event %d, size=%u, rc=%d, text: %.100s", - client->name, header->qb.id, header->qb.size, client->msg_size, - client->buffer + hdr_offset); - -@@ -1166,9 +1164,9 @@ crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags, in - - if(header->size_compressed) { - if(factor < 10 && (client->max_buf_size / 10) < (rc / factor)) { -- crm_notice("Compressed message exceeds %d0%% of the configured ipc limit (%d bytes), " -- "consider setting PCMK_ipc_buffer to %d or higher", -- factor, client->max_buf_size, 2*client->max_buf_size); -+ crm_notice("Compressed message exceeds %d0%% of the configured ipc limit (%u bytes), " -+ "consider setting PCMK_ipc_buffer to %u or higher", -+ factor, client->max_buf_size, 2 * client->max_buf_size); - factor++; - } - } -@@ -1211,7 +1209,7 @@ crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags, in - if (rc > 0) { - struct crm_ipc_response_header *hdr = (struct crm_ipc_response_header *)(void*)client->buffer; - -- crm_trace("Received response %d, size=%d, rc=%ld, text: %.200s", hdr->qb.id, hdr->qb.size, -+ crm_trace("Received response %d, size=%u, rc=%ld, text: %.200s", hdr->qb.id, hdr->qb.size, - rc, crm_ipc_buffer(client)); - - if (reply) { -diff --git a/lib/common/xml.c b/lib/common/xml.c -index 8eed245..299c7bf 100644 ---- a/lib/common/xml.c -+++ b/lib/common/xml.c -@@ -3821,6 +3821,7 @@ crm_xml_dump(xmlNode * data, int options, char **buffer, int *offset, int *max, - if(data == NULL) { - *offset = 0; - *max = 0; -+ return; - } - #if 0 - if (is_not_set(options, xml_log_option_filtered)) { -@@ -5621,7 +5622,7 @@ update_validation(xmlNode ** xml_blob, int *best, int max, gboolean transform, g - break; - - } else if (known_schemas[lpc].transform == NULL) { -- crm_notice("%s-style configuration is also valid for %s", -+ crm_debug("%s-style configuration is also valid for %s", - known_schemas[lpc].name, known_schemas[next].name); - - if (validate_with(xml, next, to_logs)) { -diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c -index f5e34ee..42bdf2b 100644 ---- a/lib/lrmd/lrmd_client.c -+++ b/lib/lrmd/lrmd_client.c -@@ -1369,7 +1369,7 @@ lrmd_api_disconnect(lrmd_t * lrmd) - { - lrmd_private_t *native = lrmd->private; - -- crm_info("Disconnecting from lrmd service"); -+ crm_info("Disconnecting from %d lrmd service", native->type); - switch (native->type) { - case CRM_CLIENT_IPC: - lrmd_ipc_disconnect(lrmd); -diff --git a/lib/services/dbus.c b/lib/services/dbus.c -index e2efecb..d42affe 100644 ---- a/lib/services/dbus.c -+++ b/lib/services/dbus.c -@@ -329,9 +329,6 @@ pcmk_dbus_lookup_cb(DBusPendingCall *pending, void *user_data) - - pcmk_dbus_lookup_result(reply, user_data); - -- if(pending) { -- dbus_pending_call_unref(pending); -- } - if(reply) { - dbus_message_unref(reply); - } -diff --git a/lib/services/services.c b/lib/services/services.c -index 7e2b9f7..3f40078 100644 ---- a/lib/services/services.c -+++ b/lib/services/services.c -@@ -150,6 +150,7 @@ resources_action_create(const char *name, const char *standard, const char *prov - - op = calloc(1, sizeof(svc_action_t)); - op->opaque = calloc(1, sizeof(svc_action_private_t)); -+ op->opaque->pending = NULL; - op->rsc = strdup(name); - op->action = strdup(action); - op->interval = interval; -@@ -158,6 +159,7 @@ resources_action_create(const char *name, const char *standard, const char *prov - op->agent = strdup(agent); - op->sequence = ++operations; - op->flags = flags; -+ - if (asprintf(&op->id, "%s_%s_%d", name, action, interval) == -1) { - goto return_error; - } -@@ -335,6 +337,7 @@ services_action_create_generic(const char *exec, const char *args[]) - - op->opaque->exec = strdup(exec); - op->opaque->args[0] = strdup(exec); -+ op->opaque->pending = NULL; - - for (cur_arg = 1; args && args[cur_arg - 1]; cur_arg++) { - op->opaque->args[cur_arg] = strdup(args[cur_arg - 1]); -@@ -361,17 +364,17 @@ services_set_op_pending(svc_action_t *op, DBusPendingCall *pending) - { - if (op->opaque->pending && (op->opaque->pending != pending)) { - if (pending) { -- crm_info("Lost pending DBus call (%p)", op->opaque->pending); -+ crm_info("Lost pending %s DBus call (%p)", op->id, op->opaque->pending); - } else { -- crm_trace("Done with pending DBus call (%p)", op->opaque->pending); -+ crm_info("Done with pending %s DBus call (%p)", op->id, op->opaque->pending); - } - dbus_pending_call_unref(op->opaque->pending); - } - op->opaque->pending = pending; - if (pending) { -- crm_trace("Updated pending DBus call (%p)", pending); -+ crm_info("Updated pending %s DBus call (%p)", op->id, pending); - } else { -- crm_trace("Cleared pending DBus call"); -+ crm_info("Cleared pending %s DBus call", op->id); - } - } - #endif -@@ -457,7 +460,7 @@ services_action_free(svc_action_t * op) - gboolean - cancel_recurring_action(svc_action_t * op) - { -- crm_info("Cancelling operation %s", op->id); -+ crm_info("Cancelling %s operation %s", op->standard, op->id); - - if (recurring_actions) { - g_hash_table_remove(recurring_actions, op->id); -diff --git a/lib/services/systemd.c b/lib/services/systemd.c -index e1e1bc9..ca56915 100644 ---- a/lib/services/systemd.c -+++ b/lib/services/systemd.c -@@ -189,16 +189,13 @@ systemd_loadunit_cb(DBusPendingCall *pending, void *user_data) - reply = dbus_pending_call_steal_reply(pending); - } - -- if(op) { -- crm_trace("Got result: %p for %p for %s, %s", reply, pending, op->rsc, op->action); -- } else { -- crm_trace("Got result: %p for %p", reply, pending); -- } -+ crm_trace("Got result: %p for %p / %p for %s", reply, pending, op->opaque->pending, op->id); -+ -+ CRM_LOG_ASSERT(pending == op->opaque->pending); -+ services_set_op_pending(op, NULL); -+ - systemd_loadunit_result(reply, user_data); - -- if(pending) { -- dbus_pending_call_unref(pending); -- } - if(reply) { - dbus_message_unref(reply); - } -@@ -209,6 +206,7 @@ systemd_unit_by_name(const gchar * arg_name, svc_action_t *op) - { - DBusMessage *msg; - DBusMessage *reply = NULL; -+ DBusPendingCall* pending = NULL; - char *name = NULL; - - /* -@@ -249,7 +247,11 @@ systemd_unit_by_name(const gchar * arg_name, svc_action_t *op) - return munit; - } - -- pcmk_dbus_send(msg, systemd_proxy, systemd_loadunit_cb, op, op? op->timeout : DBUS_TIMEOUT_USE_DEFAULT); -+ pending = pcmk_dbus_send(msg, systemd_proxy, systemd_loadunit_cb, op, op->timeout); -+ if(pending) { -+ services_set_op_pending(op, pending); -+ } -+ - dbus_message_unref(msg); - return NULL; - } -@@ -459,23 +461,12 @@ systemd_async_dispatch(DBusPendingCall *pending, void *user_data) - reply = dbus_pending_call_steal_reply(pending); - } - -- if(op) { -- crm_trace("Got result: %p for %p for %s, %s", reply, pending, op->rsc, op->action); -- if (pending == op->opaque->pending) { -- op->opaque->pending = NULL; -- } else { -- crm_info("Received unexpected reply for pending DBus call (%p vs %p)", -- op->opaque->pending, pending); -- } -- systemd_exec_result(reply, op); -+ crm_trace("Got result: %p for %p for %s, %s", reply, pending, op->rsc, op->action); - -- } else { -- crm_trace("Got result: %p for %p", reply, pending); -- } -+ CRM_LOG_ASSERT(pending == op->opaque->pending); -+ services_set_op_pending(op, NULL); -+ systemd_exec_result(reply, op); - -- if(pending) { -- dbus_pending_call_unref(pending); -- } - if(reply) { - dbus_message_unref(reply); - } -@@ -536,7 +527,6 @@ systemd_unit_exec_with_unit(svc_action_t * op, const char *unit) - free(state); - return op->rc == PCMK_OCF_OK; - } else if (pending) { -- dbus_pending_call_ref(pending); - services_set_op_pending(op, pending); - return TRUE; - } -diff --git a/lib/services/upstart.c b/lib/services/upstart.c -index 31b875b..eb8cfa8 100644 ---- a/lib/services/upstart.c -+++ b/lib/services/upstart.c -@@ -322,10 +322,7 @@ upstart_job_check(const char *name, const char *state, void *userdata) - } - - if (op->synchronous == FALSE) { -- if (op->opaque->pending) { -- dbus_pending_call_unref(op->opaque->pending); -- } -- op->opaque->pending = NULL; -+ services_set_op_pending(op, NULL); - operation_finalize(op); - } - } -@@ -392,6 +389,7 @@ upstart_async_dispatch(DBusPendingCall *pending, void *user_data) - if(pending) { - reply = dbus_pending_call_steal_reply(pending); - } -+ - if(pcmk_dbus_find_error(op->action, pending, reply, &error)) { - - /* ignore "already started" or "not running" errors */ -@@ -419,11 +417,10 @@ upstart_async_dispatch(DBusPendingCall *pending, void *user_data) - } - } - -+ CRM_LOG_ASSERT(pending == op->opaque->pending); -+ services_set_op_pending(op, NULL); - operation_finalize(op); - -- if(pending) { -- dbus_pending_call_unref(pending); -- } - if(reply) { - dbus_message_unref(reply); - } -@@ -483,8 +480,7 @@ upstart_job_exec(svc_action_t * op, gboolean synchronous) - free(state); - return op->rc == PCMK_OCF_OK; - } else if (pending) { -- dbus_pending_call_ref(pending); -- op->opaque->pending = pending; -+ services_set_op_pending(op, pending); - return TRUE; - } - return FALSE; -@@ -527,8 +523,7 @@ upstart_job_exec(svc_action_t * op, gboolean synchronous) - free(job); - - if(pending) { -- dbus_pending_call_ref(pending); -- op->opaque->pending = pending; -+ services_set_op_pending(op, pending); - return TRUE; - } - return FALSE; -diff --git a/lrmd/ipc_proxy.c b/lrmd/ipc_proxy.c -index 72d83c4..9427393 100644 ---- a/lrmd/ipc_proxy.c -+++ b/lrmd/ipc_proxy.c -@@ -165,14 +165,14 @@ ipc_proxy_forward_client(crm_client_t *ipc_proxy, xmlNode *xml) - */ - - if (safe_str_eq(msg_type, "event")) { -- crm_info("Sending event to %s", ipc_client->id); -+ crm_trace("Sending event to %s", ipc_client->id); - rc = crm_ipcs_send(ipc_client, 0, msg, crm_ipc_server_event); - - } else if (safe_str_eq(msg_type, "response")) { - int msg_id = 0; - - crm_element_value_int(xml, F_LRMD_IPC_MSG_ID, &msg_id); -- crm_info("Sending response to %d - %s", ipc_client->request_id, ipc_client->id); -+ crm_trace("Sending response to %d - %s", ipc_client->request_id, ipc_client->id); - rc = crm_ipcs_send(ipc_client, msg_id, msg, FALSE); - - CRM_LOG_ASSERT(msg_id == ipc_client->request_id); -diff --git a/lrmd/pacemaker_remote.service.in b/lrmd/pacemaker_remote.service.in -index 7ec42b4..15e61fb 100644 ---- a/lrmd/pacemaker_remote.service.in -+++ b/lrmd/pacemaker_remote.service.in -@@ -9,7 +9,6 @@ WantedBy=multi-user.target - Type=simple - KillMode=process - NotifyAccess=none --SysVStartPriority=99 - EnvironmentFile=-/etc/sysconfig/pacemaker - - ExecStart=@sbindir@/pacemaker_remoted -diff --git a/mcp/pacemaker.service.in b/mcp/pacemaker.service.in -index 2ef9454..9b0a824 100644 ---- a/mcp/pacemaker.service.in -+++ b/mcp/pacemaker.service.in -@@ -20,7 +20,6 @@ WantedBy=multi-user.target - Type=simple - KillMode=process - NotifyAccess=main --SysVStartPriority=99 - EnvironmentFile=-@sysconfdir@/sysconfig/pacemaker - EnvironmentFile=-@sysconfdir@/sysconfig/sbd - SuccessExitStatus=100 -diff --git a/pengine/allocate.c b/pengine/allocate.c -index ec5a18d..c2e56f9 100644 ---- a/pengine/allocate.c -+++ b/pengine/allocate.c -@@ -1495,11 +1495,12 @@ stage6(pe_working_set_t * data_set) - } - } - -- if (last_stonith) { -- order_actions(last_stonith, done, pe_order_implies_then); - -- } else if (dc_fence) { -+ if (dc_fence) { - order_actions(dc_down, done, pe_order_implies_then); -+ -+ } else if (last_stonith) { -+ order_actions(last_stonith, done, pe_order_implies_then); - } - - order_actions(done, all_stopped, pe_order_implies_then); -diff --git a/pengine/test10/rec-node-14.dot b/pengine/test10/rec-node-14.dot -index 395fa89..5ceef92 100644 ---- a/pengine/test10/rec-node-14.dot -+++ b/pengine/test10/rec-node-14.dot -@@ -2,9 +2,9 @@ - "all_stopped" [ style=bold color="green" fontcolor="orange" ] - "stonith 'reboot' node1" -> "stonith 'reboot' node3" [ style = bold] - "stonith 'reboot' node1" [ style=bold color="green" fontcolor="black"] -+"stonith 'reboot' node2" -> "stonith_complete" [ style = bold] - "stonith 'reboot' node2" [ style=bold color="green" fontcolor="black"] - "stonith 'reboot' node3" -> "stonith 'reboot' node2" [ style = bold] --"stonith 'reboot' node3" -> "stonith_complete" [ style = bold] - "stonith 'reboot' node3" [ style=bold color="green" fontcolor="black"] - "stonith_complete" -> "all_stopped" [ style = bold] - "stonith_complete" [ style=bold color="green" fontcolor="orange" ] -diff --git a/pengine/test10/rec-node-14.exp b/pengine/test10/rec-node-14.exp -index 58bb5ca..0e5e163 100644 ---- a/pengine/test10/rec-node-14.exp -+++ b/pengine/test10/rec-node-14.exp -@@ -39,7 +39,7 @@ - - - -- -+ - - - -diff --git a/pengine/test10/stonith-0.dot b/pengine/test10/stonith-0.dot -index 29cdd59..8ad32fd 100644 ---- a/pengine/test10/stonith-0.dot -+++ b/pengine/test10/stonith-0.dot -@@ -71,13 +71,13 @@ digraph "g" { - "stonith 'reboot' c001n03" -> "ocf_192.168.100.181_stop_0 c001n03" [ style = bold] - "stonith 'reboot' c001n03" -> "ocf_192.168.100.183_stop_0 c001n03" [ style = bold] - "stonith 'reboot' c001n03" -> "rsc_c001n07_stop_0 c001n03" [ style = bold] -+"stonith 'reboot' c001n03" -> "stonith_complete" [ style = bold] - "stonith 'reboot' c001n03" [ style=bold color="green" fontcolor="black"] - "stonith 'reboot' c001n05" -> "group-1_stop_0" [ style = bold] - "stonith 'reboot' c001n05" -> "ocf_192.168.100.181_stop_0 c001n05" [ style = bold] - "stonith 'reboot' c001n05" -> "ocf_192.168.100.183_stop_0 c001n05" [ style = bold] - "stonith 'reboot' c001n05" -> "rsc_c001n05_stop_0 c001n05" [ style = bold] - "stonith 'reboot' c001n05" -> "stonith 'reboot' c001n03" [ style = bold] --"stonith 'reboot' c001n05" -> "stonith_complete" [ style = bold] - "stonith 'reboot' c001n05" [ style=bold color="green" fontcolor="black"] - "stonith_complete" -> "all_stopped" [ style = bold] - "stonith_complete" -> "heartbeat_192.168.100.182_start_0 c001n02" [ style = bold] -diff --git a/pengine/test10/stonith-0.exp b/pengine/test10/stonith-0.exp -index 9d47215..a6695c9 100644 ---- a/pengine/test10/stonith-0.exp -+++ b/pengine/test10/stonith-0.exp -@@ -394,7 +394,7 @@ - - - -- -+ - - - -diff --git a/pengine/test10/systemhealth1.dot b/pengine/test10/systemhealth1.dot -index 28841b7..a29f519 100644 ---- a/pengine/test10/systemhealth1.dot -+++ b/pengine/test10/systemhealth1.dot -@@ -1,8 +1,8 @@ - digraph "g" { - "all_stopped" [ style=bold color="green" fontcolor="orange" ] -+"stonith 'reboot' hs21c" -> "stonith_complete" [ style = bold] - "stonith 'reboot' hs21c" [ style=bold color="green" fontcolor="black"] - "stonith 'reboot' hs21d" -> "stonith 'reboot' hs21c" [ style = bold] --"stonith 'reboot' hs21d" -> "stonith_complete" [ style = bold] - "stonith 'reboot' hs21d" [ style=bold color="green" fontcolor="black"] - "stonith_complete" -> "all_stopped" [ style = bold] - "stonith_complete" [ style=bold color="green" fontcolor="orange" ] -diff --git a/pengine/test10/systemhealth1.exp b/pengine/test10/systemhealth1.exp -index 80a2329..aa2afe1 100644 ---- a/pengine/test10/systemhealth1.exp -+++ b/pengine/test10/systemhealth1.exp -@@ -27,7 +27,7 @@ - - - -- -+ - - - -diff --git a/pengine/test10/systemhealthm1.dot b/pengine/test10/systemhealthm1.dot -index 28841b7..a29f519 100644 ---- a/pengine/test10/systemhealthm1.dot -+++ b/pengine/test10/systemhealthm1.dot -@@ -1,8 +1,8 @@ - digraph "g" { - "all_stopped" [ style=bold color="green" fontcolor="orange" ] -+"stonith 'reboot' hs21c" -> "stonith_complete" [ style = bold] - "stonith 'reboot' hs21c" [ style=bold color="green" fontcolor="black"] - "stonith 'reboot' hs21d" -> "stonith 'reboot' hs21c" [ style = bold] --"stonith 'reboot' hs21d" -> "stonith_complete" [ style = bold] - "stonith 'reboot' hs21d" [ style=bold color="green" fontcolor="black"] - "stonith_complete" -> "all_stopped" [ style = bold] - "stonith_complete" [ style=bold color="green" fontcolor="orange" ] -diff --git a/pengine/test10/systemhealthm1.exp b/pengine/test10/systemhealthm1.exp -index 80a2329..aa2afe1 100644 ---- a/pengine/test10/systemhealthm1.exp -+++ b/pengine/test10/systemhealthm1.exp -@@ -27,7 +27,7 @@ - - - -- -+ - - - -diff --git a/pengine/test10/systemhealthn1.dot b/pengine/test10/systemhealthn1.dot -index 28841b7..a29f519 100644 ---- a/pengine/test10/systemhealthn1.dot -+++ b/pengine/test10/systemhealthn1.dot -@@ -1,8 +1,8 @@ - digraph "g" { - "all_stopped" [ style=bold color="green" fontcolor="orange" ] -+"stonith 'reboot' hs21c" -> "stonith_complete" [ style = bold] - "stonith 'reboot' hs21c" [ style=bold color="green" fontcolor="black"] - "stonith 'reboot' hs21d" -> "stonith 'reboot' hs21c" [ style = bold] --"stonith 'reboot' hs21d" -> "stonith_complete" [ style = bold] - "stonith 'reboot' hs21d" [ style=bold color="green" fontcolor="black"] - "stonith_complete" -> "all_stopped" [ style = bold] - "stonith_complete" [ style=bold color="green" fontcolor="orange" ] -diff --git a/pengine/test10/systemhealthn1.exp b/pengine/test10/systemhealthn1.exp -index 80a2329..aa2afe1 100644 ---- a/pengine/test10/systemhealthn1.exp -+++ b/pengine/test10/systemhealthn1.exp -@@ -27,7 +27,7 @@ - - - -- -+ - - - -diff --git a/pengine/test10/systemhealtho1.dot b/pengine/test10/systemhealtho1.dot -index 28841b7..a29f519 100644 ---- a/pengine/test10/systemhealtho1.dot -+++ b/pengine/test10/systemhealtho1.dot -@@ -1,8 +1,8 @@ - digraph "g" { - "all_stopped" [ style=bold color="green" fontcolor="orange" ] -+"stonith 'reboot' hs21c" -> "stonith_complete" [ style = bold] - "stonith 'reboot' hs21c" [ style=bold color="green" fontcolor="black"] - "stonith 'reboot' hs21d" -> "stonith 'reboot' hs21c" [ style = bold] --"stonith 'reboot' hs21d" -> "stonith_complete" [ style = bold] - "stonith 'reboot' hs21d" [ style=bold color="green" fontcolor="black"] - "stonith_complete" -> "all_stopped" [ style = bold] - "stonith_complete" [ style=bold color="green" fontcolor="orange" ] -diff --git a/pengine/test10/systemhealtho1.exp b/pengine/test10/systemhealtho1.exp -index 80a2329..aa2afe1 100644 ---- a/pengine/test10/systemhealtho1.exp -+++ b/pengine/test10/systemhealtho1.exp -@@ -27,7 +27,7 @@ - - - -- -+ - - - -diff --git a/pengine/test10/systemhealthp1.dot b/pengine/test10/systemhealthp1.dot -index 28841b7..a29f519 100644 ---- a/pengine/test10/systemhealthp1.dot -+++ b/pengine/test10/systemhealthp1.dot -@@ -1,8 +1,8 @@ - digraph "g" { - "all_stopped" [ style=bold color="green" fontcolor="orange" ] -+"stonith 'reboot' hs21c" -> "stonith_complete" [ style = bold] - "stonith 'reboot' hs21c" [ style=bold color="green" fontcolor="black"] - "stonith 'reboot' hs21d" -> "stonith 'reboot' hs21c" [ style = bold] --"stonith 'reboot' hs21d" -> "stonith_complete" [ style = bold] - "stonith 'reboot' hs21d" [ style=bold color="green" fontcolor="black"] - "stonith_complete" -> "all_stopped" [ style = bold] - "stonith_complete" [ style=bold color="green" fontcolor="orange" ] -diff --git a/pengine/test10/systemhealthp1.exp b/pengine/test10/systemhealthp1.exp -index 80a2329..aa2afe1 100644 ---- a/pengine/test10/systemhealthp1.exp -+++ b/pengine/test10/systemhealthp1.exp -@@ -27,7 +27,7 @@ - - - -- -+ - - - -diff --git a/tools/1node2heartbeat b/tools/1node2heartbeat -deleted file mode 100755 -index b63a0c8..0000000 ---- a/tools/1node2heartbeat -+++ /dev/null -@@ -1,326 +0,0 @@ --#!/usr/bin/python --# --# Program to determine current list of enabled services for init state 3 --# and create heartbeat CRM configuration for heartbeat to manage them --# --__copyright__=''' --Author: Alan Robertson --Copyright (C) 2006 International Business Machines --''' -- --# This program is free software; you can redistribute it and/or --# modify it under the terms of the GNU General Public License --# as published by the Free Software Foundation; either version 2 --# of the License, or (at your option) any later version. --# --# This program is distributed in the hope that it will be useful, --# but WITHOUT ANY WARRANTY; without even the implied warranty of --# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the --# GNU General Public License for more details. --# --# You should have received a copy of the GNU General Public License --# along with this program; if not, write to the Free Software --# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. --import os,re --# --# Here's the plan: --# Find out the default run level --# Find out what (additional?) services are enabled in that run level --# Figure out which of them start after the network (or heartbeat?) --# Ignore heartbeat :-) --# Figure out which services supply the $services --# Look to see if the SUSE /etc/insserv.conf file exists --# If so, then scan it for who provides the $services --# defined by the LSB --# If we're on Red Hat, then make some Red Hat type assumptions --# (whatever those might be) --# If we're not, then make some generic assumptions... --# Scan the init scripts for their dependencies... --# Eliminate anything at or before 'network'. --# Create resources corresponding to all active services --# Include monitor actions for those services --# that can be started after 'network' --# Add the start-after dependencies --# --# Things to consider doing in the future: --# Constrain them to only run on the local system? --# Put them all in a convenience group (no colocation, no ordering) --# Add start and stop timeouts -- --ServiceKeywords = {} --ServiceMap = {} --ProvidesMap = {} --RequiresMap = {} --SkipMap = {'heartbeat': None, 'random': None} --NoMonitor = {'microcode': None} --PreReqs = ['network'] --IgnoreList = [] --sysname = os.uname()[1] --InitDir = "/etc/init.d" -- --def service_is_hb_compatible(service): -- scriptname = os.path.join(InitDir, service) -- command=scriptname + " status >/dev/null 2>&1"; -- rc = os.system(command) -- return rc == 0 -- --def find_ordered_services(dir): -- allscripts = os.listdir(dir) -- allscripts.sort() -- services = [] -- for entry in allscripts: -- matchobj = re.match("S[0-9]+(.*)", entry) -- if not matchobj: -- continue -- service = matchobj.group(1) -- if SkipMap.has_key(service): -- continue -- if service_is_hb_compatible(service): -- services.append(service) -- else: -- IgnoreList.append(service) -- return services -- -- --def register_services(initdir, services): -- for service in services: -- if not ServiceMap.has_key(service): -- ServiceMap[service] = os.path.join(initdir, service) -- for service in services: -- script_dependency_scan(service, os.path.join(initdir, service), ServiceMap) -- --# --# From the LSB version 3.1: "Comment Conventions for Init Scripts" --# --### BEGIN INIT INFO --### END INIT INFO --# --# The delimiter lines may contain trailing whitespace, which shall be ignored. --# All lines inside the block shall begin with a hash character '#' in the --# first column, so the shell interprets them as comment lines which do not --# affect operation of the script. The lines shall be of the form: --# {keyword}: arg1 [arg2...] --# with exactly one space character between the '#' and the keyword, with a --# single exception. In lines following a line containing the Description --# keyword, and until the next keyword or block ending delimiter is seen, --# a line where the '#' is followed by more than one space or a tab --# character shall be treated as a continuation of the previous line. --# -- --# Make this a class to avoid recompiling it for each script we scan. --class pats: -- begin=re.compile("###\s+BEGIN\s+INIT\s+INFO") -- end=re.compile("###\s+END\s+INIT\s+INFO") -- desc=re.compile("# Description:\s*(.*)", re.IGNORECASE) -- desc_continue=re.compile("#( +|\t)\s*(.*)") -- keyword=re.compile("# ([^\s:]+):\s*(.*)\s*\Z") -- --def script_keyword_scan(filename, servicename): -- keywords = {} -- ST_START=0 -- ST_INITINFO=1 -- ST_DESCRIPTION=1 -- description="" -- state=ST_START -- -- try: -- fd = open(filename) -- except IOError: -- return keywords -- -- while 1: -- line = fd.readline() -- if not line: -- break -- -- if state == ST_START: -- if pats.begin.match(line): -- state = ST_INITINFO -- continue -- if pats.end.match(line): -- break -- -- if state == ST_DESCRIPTION: -- match = pats.desc_continue.match(line) -- if match: -- description += ("\n" + match.group(2)) -- continue -- state = ST_INITINFO -- -- match = pats.desc.match(line) -- if match: -- state = ST_DESCRIPTION -- description = match.group(1) -- continue -- -- match = pats.keyword.match(line) -- if match: -- keywords[match.group(1)] = match.group(2) -- -- # Clean up and return -- fd.close() -- if description != "": -- keywords["Description"] = description -- keywords["_PATHNAME_"] = filename -- keywords["_RESOURCENAME_"] = "R_" + sysname + "_" + servicename -- return keywords -- --def script_dependency_scan(service, script, servicemap): -- keywords=script_keyword_scan(script, service) -- ServiceKeywords[service] = keywords -- --SysServiceGuesses = { -- '$local_fs': ['boot.localfs'], -- '$network': ['network'], -- '$named': ['named'], -- '$portmap': ['portmap'], -- '$remote_fs': ['nfs'], -- '$syslog': ['syslog'], -- '$netdaemons': ['portmap', 'inetd'], -- '$time': ['ntp'], --} -- --# --# For specific versions of Linux, there are often better ways --# to do this... --# --# (e.g., for SUSE Linux, one should look at /etc/insserv.conf file) --# --def map_sys_services(servicemap): -- sysservicemap = {} -- for sysserv in SysServiceGuesses.keys(): -- servlist = SysServiceGuesses[sysserv] -- result = [] -- for service in servlist: -- if servicemap.has_key(service): -- result.append(service) -- -- sysservicemap[sysserv] = result -- return sysservicemap -- --# --# --# --def create_service_dependencies(servicekeywords, systemservicemap): -- dependencies = {} -- for service in servicekeywords.keys(): -- if not dependencies.has_key(service): -- dependencies[service] = {} -- for key in ('Required-Start', 'Should-Start'): -- if not servicekeywords[service].has_key(key): -- continue -- for depserv in servicekeywords[service][key].split(): -- if systemservicemap.has_key(depserv): -- sysserv = systemservicemap[depserv] -- for serv in sysserv: -- dependencies[service][serv] = None -- else: -- if servicekeywords.has_key(depserv): -- dependencies[service][depserv] = None -- if len(dependencies[service]) == 0: -- del dependencies[service] -- return dependencies -- --# --# Modify the service name map to include all the mappings from --# 'Provides' services to real service script names... --# --def map_script_services(sysservmap, servicekeywords): -- for service in servicekeywords.keys(): -- if not servicekeywords[service].has_key('Provides'): -- continue -- for provided in servicekeywords[service]['Provides'].split(): -- if not sysservmap.has_key(provided): -- sysservmap[provided] = [] -- sysservmap[provided].append(service) -- return sysservmap -- --def create_cib_update(keywords, depmap): -- services = keywords.keys() -- services.sort() -- result = "" -- # Create the XML for the resources -- result += '\n' -- result += '\n' -- result += '\n' -- result += '\n' -- result += '\n' -- groupname="G_" + sysname + "_localinit" -- result += ' \n' -- for service in services: -- rid = keywords[service]["_RESOURCENAME_"] -- monid = "OPmon_" + sysname + '_' + service -- result += \ -- ' \n' + \ -- ' \n' + \ -- ' \n' -- if not NoMonitor.has_key(service): -- result += \ -- ' \n' -- result += \ -- ' \n' \ -- ' \n' -- result += ' \n' -- result += '\n' -- services = depmap.keys() -- services.sort() -- result += '\n' -- for service in services: -- rid = keywords[service]["_RESOURCENAME_"] -- deps = depmap[service].keys() -- deps.sort() -- for dep in deps: -- if not keywords.has_key(dep): -- continue -- depid = keywords[dep]["_RESOURCENAME_"] -- orderid='O_' + sysname + '_' + service + '_' + dep -- result += ' \n' -- loc_id="Loc_" + sysname + "_localinit" -- rule_id="LocRule_" + sysname + "_localinit" -- expr_id="LocExp_" + sysname + "_localinit" -- -- result += ' \n' -- result += ' \n' -- result += ' \n' -- result += ' \n' -- result += ' \n' -- result += '\n' -- result += '\n' -- result += '\n' -- result += '\n' -- return result -- -- -- --def remove_a_prereq(service, servicemap, keywords, deps): -- if deps.has_key(service): -- parents = deps[service].keys() -- del deps[service] -- else: -- parents = [] -- if servicemap.has_key(service): -- del servicemap[service] -- if keywords.has_key(service): -- del keywords[service] -- for parent in parents: -- if not deps.has_key(parent): -- continue -- remove_a_prereq(parent, servicemap, keywords, deps) -- -- --def remove_important_prereqs(prereqs, servicemap, keywords, deps): -- # Find everything these important prereqs need and get rid of them... -- for service in prereqs: -- remove_a_prereq(service, servicemap, keywords, deps) -- --ServiceList = find_ordered_services(os.path.join(InitDir, "rc3.d")) --register_services(InitDir, ServiceList) --SysServiceMap = map_sys_services(ServiceMap) --map_script_services(SysServiceMap, ServiceKeywords) --ServiceDependencies = create_service_dependencies(ServiceKeywords,SysServiceMap) --remove_important_prereqs(PreReqs, SysServiceMap, ServiceKeywords, ServiceDependencies) -- --print create_cib_update(ServiceKeywords, ServiceDependencies) -diff --git a/tools/crm_commands.py.in b/tools/crm_commands.py.in -deleted file mode 100644 -index c48d82c..0000000 ---- a/tools/crm_commands.py.in -+++ /dev/null -@@ -1,132 +0,0 @@ --# --# --# pingd OCF Resource Agent --# Records (in the CIB) the current number of ping nodes a --# cluster node can connect to. --# --# Copyright (c) 2006 Andrew Beekhof --# All Rights Reserved. --# --# This program is free software; you can redistribute it and/or modify --# it under the terms of version 2 of the GNU General Public License as --# published by the Free Software Foundation. --# --# This program is distributed in the hope that it would be useful, but --# WITHOUT ANY WARRANTY; without even the implied warranty of --# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. --# --# Further, this software is distributed without any warranty that it is --# free of the rightful claim of any third person regarding infringement --# or the like. Any license provided herein, whether implied or --# otherwise, applies only to this software file. Patent licenses, if --# any, provided herein do not apply to combinations of this program with --# other software, or any other product whatsoever. --# --# You should have received a copy of the GNU General Public License --# along with this program; if not, write the Free Software Foundation, --# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. --# --####################################################################### -- --import crm_utils as utl -- --class HelpRequest(Exception): -- """Exception raised when a help listing is required.""" -- --class ReparseRequest(Exception): -- """Exception raised when a command changed the command-line.""" -- --def up(*args, **cmdoptions): -- l = len(utl.topic_stack) -- if l > 1: -- utl.topic_stack.pop() -- utl.set_topic(utl.topic_stack[-1]) -- else: -- utl.log_debug("Already at the top of the stack") -- --def toggle_flag(*args, **cmdoptions): -- flag = cmdoptions["flag"] -- if utl.global_opts[flag]: -- utl.global_opts[flag] = 0 -- else: -- utl.global_opts[flag] = 1 -- -- return utl.global_opts[flag] -- --def cd_(*args, **cmdoptions): -- utl.log_dev("args: %s\nopts: %s" % (repr(args), repr(cmdoptions))) -- if not cmdoptions["topic"]: -- utl.log_err("No topic specified") -- return 1 -- -- if cmdoptions["topic"]: -- utl.set_topic(cmdoptions["topic"]) -- if args: -- raise ReparseRequest() -- if utl.crm_topic not in utl.topic_stack: -- utl.topic_stack.append(cmdoptions["topic"]) -- if not utl.global_opts["interactive"]: -- help(cmdoptions["topic"]) -- return 0 -- --def exit(*args, **cmdoptions): -- sys.exit(0) -- --def help(*args, **cmdoptions): -- if args: -- raise HelpRequest(args[0]) -- raise HelpRequest(utl.crm_topic) -- --def debugstate(*args, **cmdoptions): -- utl.log_info("Global Options: ") -- for opt in utl.global_opts.keys(): -- utl.log_info(" * %s:\t%s" % (opt, utl.global_opts[opt])) -- utl.log_info("Stack: "+repr(utl.topic_stack)) -- utl.log_info("Stack Head: "+utl.crm_topic) -- return 0 -- --def do_list(*args, **cmdoptions): -- topic = utl.crm_topic -- if cmdoptions.has_key("topic") and cmdoptions["topic"]: -- topic = cmdoptions["topic"] -- -- utl.log_debug("Complete '%s' listing" % topic) -- if topic == "resources": -- utl.os_system("crm_resource -l", True) -- elif topic == "nodes": -- lines = utl.os_system("cibadmin -Q -o nodes", False) -- for line in lines: -- if line.find("node ") >= 0: -- print line.rstrip() -- else: -- utl.log_err("%s: Topic %s is not (yet) supported" % ("list", topic)) -- return 1 -- return 0 -- --def do_status(*args, **cmdoptions): -- topic = utl.crm_topic -- if cmdoptions.has_key("topic") and cmdoptions["topic"]: -- topic = cmdoptions["topic"] -- -- if topic == "resources": -- if not args: -- utl.os_system("crm_resource -L", True) -- for rsc in args: -- utl.os_system("crm_resource -W -r %s"%rsc, True) -- -- elif topic == "nodes": -- lines = utl.os_system("cibadmin -Q -o status", False) -- for line in lines: -- line = line.rstrip() -- utl.log_dev("status line: "+line) -- if line.find("node_state ") >= 0: -- if not args: -- print line -- for node in args: -- if line.find(node) >= 0: -- print line -- else: -- utl.log_err("Topic %s is not (yet) supported" % topic) -- return 1 -- -- return 0 -diff --git a/tools/crm_mon.c b/tools/crm_mon.c -index 0b71275..46a59d6 100644 ---- a/tools/crm_mon.c -+++ b/tools/crm_mon.c -@@ -2715,6 +2715,7 @@ print_status(pe_working_set_t * data_set) - } else { - online_nodes = add_list_element(online_nodes, node_name); - } -+ free(node_name); - continue; - } - } else { -@@ -2727,6 +2728,7 @@ print_status(pe_working_set_t * data_set) - } else { - offline_nodes = add_list_element(offline_nodes, node_name); - } -+ free(node_name); - continue; - } - } -@@ -3078,6 +3080,7 @@ print_html_status(pe_working_set_t * data_set, const char *filename) - fprintf(stream, "\n"); - } - fprintf(stream, "\n"); -+ free(node_name); - } - fprintf(stream, "\n"); - -diff --git a/tools/crm_node.c b/tools/crm_node.c -index c484e17..d0195e3 100644 ---- a/tools/crm_node.c -+++ b/tools/crm_node.c -@@ -470,6 +470,7 @@ try_cman(int command, enum cluster_type_e stack) - - case 'l': - case 'p': -+ memset(cman_nodes, 0, MAX_NODES * sizeof(cman_node_t)); - rc = cman_get_nodes(cman_handle, MAX_NODES, &node_count, cman_nodes); - if (rc != 0) { - fprintf(stderr, "Couldn't query cman node list: %d %d", rc, errno); -@@ -489,6 +490,7 @@ try_cman(int command, enum cluster_type_e stack) - break; - - case 'i': -+ memset(&node, 0, sizeof(cman_node_t)); - rc = cman_get_node(cman_handle, CMAN_NODEID_US, &node); - if (rc != 0) { - fprintf(stderr, "Couldn't query cman node id: %d %d", rc, errno); -diff --git a/tools/crm_primitive.py.in b/tools/crm_primitive.py.in -deleted file mode 100644 -index cfe0b5c..0000000 ---- a/tools/crm_primitive.py.in -+++ /dev/null -@@ -1,268 +0,0 @@ --#!@PYTHON@ -- --'''Create an XML fragment describing a new resource --''' -- --__copyright__=''' --Author: Andrew Beekhof --Copyright (C) 2005 Andrew Beekhof --''' -- --# --# This program is free software; you can redistribute it and/or --# modify it under the terms of the GNU General Public License --# as published by the Free Software Foundation; either version 2 --# of the License, or (at your option) any later version. --# --# This program is distributed in the hope that it will be useful, --# but WITHOUT ANY WARRANTY; without even the implied warranty of --# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the --# GNU General Public License for more details. --# --# You should have received a copy of the GNU General Public License --# along with this program; if not, write to the Free Software --# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -- --import sys,string,os --import xml.dom.minidom -- --print_rsc_only = 0 --rsc_name = None --rsc_class = None --rsc_type = None --rsc_provider = None --start_timeout = None --stop_timeout = None --monitor_interval = None --monitor_timeout = None --rsc_options = [] --rsc_location = [] --rsc_colocation = [] -- --def create_cib() : -- doc = xml.dom.minidom.Document() -- cib = doc.createElement("cib") -- doc.appendChild(cib) -- -- configuration = doc.createElement("configuration") -- cib.appendChild(configuration) -- -- #crm_config = doc.createElement("crm_config") -- #configuration.appendChild(crm_config) -- -- resources = doc.createElement("resources") -- configuration.appendChild(resources) -- constraints = doc.createElement("constraints") -- configuration.appendChild(constraints) -- -- return doc, resources, constraints -- --def cib_resource(doc, id, ra_class, type, provider): -- -- params = None -- -- resource = doc.createElement("primitive") -- -- resource.setAttribute("id", id) -- resource.setAttribute("type", type) -- resource.setAttribute("class", ra_class) -- -- if ra_class == "ocf": -- if not provider: -- provider = "heartbeat" -- resource.setAttribute("provider", provider) -- -- elif ra_class != "lsb" and ra_class != "heartbeat": -- print "Unknown resource class: "+ ra_class -- return None -- -- operations = doc.createElement("operations") -- resource.appendChild(operations) -- -- if monitor_interval != None: -- op = doc.createElement("op") -- operations.appendChild(op) -- op.setAttribute("id", id + "_mon_" + monitor_interval) -- op.setAttribute("name", "monitor") -- op.setAttribute("interval", monitor_interval) -- if monitor_timeout != None: -- op.setAttribute("timeout", monitor_timeout) -- -- if start_timeout != None: -- op = doc.createElement("op") -- operations.appendChild(op) -- op.setAttribute("id", id + "_start") -- op.setAttribute("name", "start") -- op.setAttribute("timeout", start_timeout) -- -- if stop_timeout != None: -- op = doc.createElement("op") -- operations.appendChild(op) -- op.setAttribute("id", id + "_stop") -- op.setAttribute("name", "stop") -- op.setAttribute("timeout", stop_timeout) -- -- instance_attributes = doc.createElement("instance_attributes") -- instance_attributes.setAttribute("id", id) -- resource.appendChild(instance_attributes) -- attributes = doc.createElement("attributes") -- instance_attributes.appendChild(attributes) -- for i in range(0,len(rsc_options)) : -- if rsc_options[i] == None : -- continue -- -- param = string.split(rsc_options[i], "=") -- nvpair = doc.createElement("nvpair") -- nvpair.setAttribute("id", id + "_" + param[0]) -- nvpair.setAttribute("name", param[0]) -- nvpair.setAttribute("value", param[1]) -- attributes.appendChild(nvpair) -- -- return resource -- --def cib_rsc_location(doc, id, node, score): -- rule = doc.createElement("rule") -- rule.setAttribute("id", id+"_prefer_"+node+"_rule") -- rule.setAttribute("score", score) -- expression = doc.createElement("expression") -- expression.setAttribute("id",id+"_prefer_"+node+"_expr") -- expression.setAttribute("attribute","#uname") -- expression.setAttribute("operation","eq") -- expression.setAttribute("value", node) -- rule.appendChild(expression) -- return rule -- --def cib_rsc_colocation(doc, id, other_resource, score): -- rsc_colocation = doc.createElement("rsc_colocation") -- rsc_colocation.setAttribute("id", id+"_colocate_with_"+other_resource) -- rsc_colocation.setAttribute("from", id) -- rsc_colocation.setAttribute("to", other_resource) -- rsc_colocation.setAttribute("score", score) -- return rsc_colocation -- --def print_usage(): -- print "usage: " \ -- + sys.argv[0] \ -- + " --name "\ -- + " --class "\ -- + " --type "\ -- + " [--provider ]"\ -- + "\n\t"\ -- + " [--start-timeout ]"\ -- + " [--stop-timeout ]"\ -- + " [--monitor ]"\ -- + " [--monitor-timeout ]"\ -- + "\n\t"\ -- + " [--rsc-option name=value]*"\ -- + " [--rsc-location uname=score]*"\ -- + " [--rsc-colocation resource=score]*" -- print "Example:\n\t" + sys.argv[0] \ -- + " --name cluster_ip_1 --type IPaddr --provider heartbeat --class ocf "\ -- + "--rsc-option ip=192.168.1.101 --rsc-location node1=500 | cibadmin -C -p" -- sys.exit(1) -- --if __name__=="__main__" : -- -- # Process arguments... -- skipthis = None -- args = sys.argv[1:] -- if len(args) == 0: -- print_usage() -- -- for i in range(0, len(args)) : -- if skipthis : -- skipthis = None -- continue -- -- elif args[i] == "--name" : -- skipthis = True -- rsc_name = args[i+1] -- -- elif args[i] == "--class" : -- skipthis = True -- rsc_class = args[i+1] -- -- elif args[i] == "--type" : -- skipthis = True -- rsc_type = args[i+1] -- -- elif args[i] == "--provider" : -- skipthis = True -- rsc_provider = args[i+1] -- -- elif args[i] == "--start-timeout" : -- skipthis = True -- start_timeout = args[i+1] -- -- elif args[i] == "--stop-timeout" : -- skipthis = True -- stop_timeout = args[i+1] -- -- elif args[i] == "--monitor" : -- skipthis = True -- monitor_interval = args[i+1] -- -- elif args[i] == "--monitor-timeout" : -- skipthis = True -- monitor_timeout = args[i+1] -- -- elif args[i] == "--rsc-option" : -- skipthis = True -- params = string.split(args[i+1], "=") -- if params[1] != None: -- rsc_options.append(args[i+1]) -- else: -- print "option '"+args[i+1]+"' must be of the form name=value" -- -- elif args[i] == "--rsc-location" : -- skipthis = True -- params = string.split(args[i+1], "=") -- if params[1] != None: -- rsc_location.append(args[i+1]) -- else: -- print "option '"+args[i+1]+"' must be of the form host=score" -- -- elif args[i] == "--rsc-colocation" : -- skipthis = True -- params = string.split(args[i+1], "=") -- if params[1] != None: -- rsc_colocation.append(args[i+1]) -- else: -- print "option '"+args[i+1]+"' must be of the form resource=score" -- -- elif args[i] == "--rsc-only" : -- print_rsc_only = 1 -- else: -- print "Unknown argument: "+ args[i] -- print_usage() -- -- cib = create_cib() -- pre_line = "" -- id_index = 1 -- resource = cib_resource(cib[0], rsc_name, rsc_class, rsc_type, rsc_provider) -- -- if print_rsc_only: -- print resource.toprettyxml() -- sys.exit(0) -- -- cib[1].appendChild(resource) -- -- if rsc_location != None : -- rsc_loc = cib[0].createElement("rsc_location") -- rsc_loc.setAttribute("id", rsc_name+"_preferences") -- rsc_loc.setAttribute("rsc", rsc_name) -- for i in range(0, len(rsc_location)) : -- param = string.split(rsc_location[i], "=") -- location_rule = cib_rsc_location(cib[0], rsc_name, param[0], param[1]) -- rsc_loc.appendChild(location_rule) -- cib[2].appendChild(rsc_loc) -- -- for i in range(0, len(rsc_colocation)) : -- if rsc_location[i] == None : -- continue -- -- param = string.split(rsc_colocation[i], "=") -- colocation_rule = cib_rsc_colocation(cib[0], rsc_name, param[0], param[1]) -- cib[2].appendChild(colocation_rule) -- -- print cib[0].toprettyxml() -diff --git a/tools/crm_resource.c b/tools/crm_resource.c -index 31136ef..2fce3b7 100644 ---- a/tools/crm_resource.c -+++ b/tools/crm_resource.c -@@ -853,6 +853,7 @@ main(int argc, char **argv) - rc = -ENXIO; - goto bail; - } -+ - rc = cli_resource_print_attribute(rsc_id, prop_name, &data_set); - - } else if (rsc_cmd == 'p') { -@@ -883,6 +884,10 @@ main(int argc, char **argv) - } else if (rsc_cmd == 'C' && rsc_id) { - resource_t *rsc = pe_find_resource(data_set.resources, rsc_id); - -+ if(do_force == FALSE) { -+ rsc = uber_parent(rsc); -+ } -+ - crm_debug("Re-checking the state of %s on %s", rsc_id, host_uname); - if(rsc) { - crmd_replies_needed = 0; -@@ -891,6 +896,11 @@ main(int argc, char **argv) - rc = -ENODEV; - } - -+ if(rc == pcmk_ok && BE_QUIET == FALSE) { -+ /* Now check XML_RSC_ATTR_TARGET_ROLE and XML_RSC_ATTR_MANAGED */ -+ cli_resource_check(cib_conn, rsc); -+ } -+ - if (rc == pcmk_ok) { - start_mainloop(); - } -diff --git a/tools/crm_resource.h b/tools/crm_resource.h -index 49b6138..5a206e0 100644 ---- a/tools/crm_resource.h -+++ b/tools/crm_resource.h -@@ -68,6 +68,7 @@ int cli_resource_print_property(const char *rsc, const char *attr, pe_working_se - int cli_resource_print_operations(const char *rsc_id, const char *host_uname, bool active, pe_working_set_t * data_set); - - /* runtime */ -+void cli_resource_check(cib_t * cib, resource_t *rsc); - int cli_resource_fail(crm_ipc_t * crmd_channel, const char *host_uname, const char *rsc_id, pe_working_set_t * data_set); - int cli_resource_search(const char *rsc, pe_working_set_t * data_set); - int cli_resource_delete(cib_t *cib_conn, crm_ipc_t * crmd_channel, const char *host_uname, resource_t * rsc, pe_working_set_t * data_set); -diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c -index 9c3711c..946b9e3 100644 ---- a/tools/crm_resource_print.c -+++ b/tools/crm_resource_print.c -@@ -352,8 +352,11 @@ cli_resource_print_attribute(const char *rsc, const char *attr, pe_working_set_t - - if (safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) { - get_rsc_attributes(params, the_rsc, current, data_set); -+ - } else if (safe_str_eq(attr_set_type, XML_TAG_META_SETS)) { -+ /* No need to redirect to the parent */ - get_meta_attributes(params, the_rsc, current, data_set); -+ - } else { - unpack_instance_attributes(data_set->input, the_rsc->xml, XML_TAG_UTILIZATION, NULL, - params, NULL, FALSE, data_set->now); -diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c -index 006ec08..a270cbf 100644 ---- a/tools/crm_resource_runtime.c -+++ b/tools/crm_resource_runtime.c -@@ -198,6 +198,7 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch - int rc = pcmk_ok; - static bool need_init = TRUE; - -+ char *lookup_id = NULL; - char *local_attr_id = NULL; - char *local_attr_set = NULL; - -@@ -212,14 +213,39 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch - } - - if (safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) { -- rc = find_resource_attr(cib, XML_ATTR_ID, rsc_id, XML_TAG_META_SETS, attr_set, attr_id, -+ rc = find_resource_attr(cib, XML_ATTR_ID, uber_parent(rsc)->id, XML_TAG_META_SETS, attr_set, attr_id, - attr_name, &local_attr_id); -- if (rc == pcmk_ok) { -- printf("WARNING: There is already a meta attribute called %s (id=%s)\n", attr_name, -- local_attr_id); -+ if(rc == pcmk_ok && do_force == FALSE) { -+ if (BE_QUIET == FALSE) { -+ printf("WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)\n", -+ uber_parent(rsc)->id, attr_name, local_attr_id); -+ printf(" Delete '%s' first or use --force to override\n", local_attr_id); -+ } -+ return -ENOTUNIQ; -+ } -+ -+ } else if(rsc->parent) { -+ -+ switch(rsc->parent->variant) { -+ case pe_group: -+ if (BE_QUIET == FALSE) { -+ printf("Updating '%s' for '%s' will not apply to its peers in '%s'\n", attr_name, rsc_id, rsc->parent->id); -+ } -+ break; -+ case pe_master: -+ case pe_clone: -+ rsc = rsc->parent; -+ if (BE_QUIET == FALSE) { -+ printf("Updating '%s' for '%s'...\n", rsc->id, rsc_id); -+ } -+ break; -+ default: -+ break; - } - } -- rc = find_resource_attr(cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, -+ -+ lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */ -+ rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, - &local_attr_id); - - if (rc == pcmk_ok) { -@@ -227,6 +253,7 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch - attr_id = local_attr_id; - - } else if (rc != -ENXIO) { -+ free(lookup_id); - free(local_attr_id); - return rc; - -@@ -250,7 +277,7 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch - free_xml(cib_top); - - if (attr_set == NULL) { -- local_attr_set = crm_concat(rsc_id, attr_set_type, '-'); -+ local_attr_set = crm_concat(lookup_id, attr_set_type, '-'); - attr_set = local_attr_set; - } - if (attr_id == NULL) { -@@ -263,7 +290,7 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch - } - - xml_top = create_xml_node(NULL, tag); -- crm_xml_add(xml_top, XML_ATTR_ID, rsc_id); -+ crm_xml_add(xml_top, XML_ATTR_ID, lookup_id); - - xml_obj = create_xml_node(xml_top, attr_set_type); - crm_xml_add(xml_obj, XML_ATTR_ID, attr_set); -@@ -285,7 +312,15 @@ cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const ch - crm_log_xml_debug(xml_top, "Update"); - - rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top, cib_options); -+ if (rc == pcmk_ok && BE_QUIET == FALSE) { -+ printf("Set '%s' option: id=%s%s%s%s%s=%s\n", lookup_id, local_attr_id, -+ attr_set ? " set=" : "", attr_set ? attr_set : "", -+ attr_name ? " name=" : "", attr_name ? attr_name : "", attr_value); -+ } -+ - free_xml(xml_top); -+ -+ free(lookup_id); - free(local_attr_id); - free(local_attr_set); - -@@ -330,6 +365,7 @@ cli_resource_delete_attribute(const char *rsc_id, const char *attr_set, const ch - xmlNode *xml_obj = NULL; - - int rc = pcmk_ok; -+ char *lookup_id = NULL; - char *local_attr_id = NULL; - resource_t *rsc = find_rsc_or_clone(rsc_id, data_set); - -@@ -337,7 +373,29 @@ cli_resource_delete_attribute(const char *rsc_id, const char *attr_set, const ch - return -ENXIO; - } - -- rc = find_resource_attr(cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, -+ if(rsc->parent && safe_str_eq(attr_set_type, XML_TAG_META_SETS)) { -+ -+ switch(rsc->parent->variant) { -+ case pe_group: -+ if (BE_QUIET == FALSE) { -+ printf("Removing '%s' for '%s' will not apply to its peers in '%s'\n", attr_name, rsc_id, rsc->parent->id); -+ } -+ break; -+ case pe_master: -+ case pe_clone: -+ rsc = rsc->parent; -+ if (BE_QUIET == FALSE) { -+ printf("Removing '%s' from '%s' for '%s'...\n", attr_name, rsc->id, rsc_id); -+ } -+ break; -+ default: -+ break; -+ } -+ -+ } -+ -+ lookup_id = clone_strip(rsc->id); -+ rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, - &local_attr_id); - - if (rc == -ENXIO) { -@@ -360,8 +418,8 @@ cli_resource_delete_attribute(const char *rsc_id, const char *attr_set, const ch - CRM_ASSERT(cib); - rc = cib->cmds->delete(cib, XML_CIB_TAG_RESOURCES, xml_obj, cib_options); - -- if (rc == pcmk_ok) { -- printf("Deleted %s option: id=%s%s%s%s%s\n", rsc_id, local_attr_id, -+ if (rc == pcmk_ok && BE_QUIET == FALSE) { -+ printf("Deleted '%s' option: id=%s%s%s%s%s\n", lookup_id, local_attr_id, - attr_set ? " set=" : "", attr_set ? attr_set : "", - attr_name ? " name=" : "", attr_name ? attr_name : ""); - } -@@ -493,7 +551,10 @@ cli_resource_delete(cib_t *cib_conn, crm_ipc_t * crmd_channel, const char *host_ - for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) { - resource_t *child = (resource_t *) lpc->data; - -- cli_resource_delete(cib_conn, crmd_channel, host_uname, child, data_set); -+ rc = cli_resource_delete(cib_conn, crmd_channel, host_uname, child, data_set); -+ if(rc != pcmk_ok || is_not_set(rsc->flags, pe_rsc_unique)) { -+ return rc; -+ } - } - return pcmk_ok; - -@@ -514,31 +575,78 @@ cli_resource_delete(cib_t *cib_conn, crm_ipc_t * crmd_channel, const char *host_ - node = pe_find_node(data_set->nodes, host_uname); - - if (node && node->details->rsc_discovery_enabled) { -- printf("Cleaning up %s on %s\n", rsc->id, host_uname); -+ printf("Cleaning up %s on %s", rsc->id, host_uname); - rc = send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_DELETE, host_uname, rsc->id, TRUE, data_set); - } else { - printf("Resource discovery disabled on %s. Unable to delete lrm state.\n", host_uname); -+ rc = -EOPNOTSUPP; - } - - if (rc == pcmk_ok) { - char *attr_name = NULL; -- const char *id = rsc->id; - - if(node && node->details->remote_rsc == NULL && node->details->rsc_discovery_enabled) { - crmd_replies_needed++; - } -- if (rsc->clone_name) { -- id = rsc->clone_name; -+ -+ if(is_not_set(rsc->flags, pe_rsc_unique)) { -+ char *id = clone_strip(rsc->id); -+ attr_name = crm_strdup_printf("fail-count-%s", id); -+ free(id); -+ -+ } else if (rsc->clone_name) { -+ attr_name = crm_strdup_printf("fail-count-%s", rsc->clone_name); -+ -+ } else { -+ attr_name = crm_strdup_printf("fail-count-%s", rsc->id); - } - -- attr_name = crm_concat("fail-count", id, '-'); -+ printf(", removing %s\n", attr_name); - rc = attrd_update_delegate(NULL, 'D', host_uname, attr_name, NULL, XML_CIB_TAG_STATUS, NULL, - NULL, NULL, node ? is_remote_node(node) : FALSE); - free(attr_name); -+ -+ } else if(rc != -EOPNOTSUPP) { -+ printf(" - FAILED\n"); - } -+ - return rc; - } - -+void -+cli_resource_check(cib_t * cib_conn, resource_t *rsc) -+{ -+ -+ char *role_s = NULL; -+ char *managed = NULL; -+ resource_t *parent = uber_parent(rsc); -+ -+ find_resource_attr(cib_conn, XML_ATTR_ID, parent->id, -+ XML_TAG_META_SETS, NULL, NULL, XML_RSC_ATTR_MANAGED, &managed); -+ -+ find_resource_attr(cib_conn, XML_ATTR_ID, parent->id, -+ XML_TAG_META_SETS, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &role_s); -+ -+ if(managed == NULL) { -+ managed = strdup("1"); -+ } -+ if(crm_is_true(managed) == FALSE) { -+ printf("\n\t*Resource %s is configured to not be managed by the cluster\n", parent->id); -+ } -+ if(role_s) { -+ enum rsc_role_e role = text2role(role_s); -+ if(role == RSC_ROLE_UNKNOWN) { -+ // Treated as if unset -+ -+ } else if(role == RSC_ROLE_STOPPED) { -+ printf("\n\t* The configuration specifies that '%s' should remain stopped\n", parent->id); -+ -+ } else if(parent->variant > pe_clone && role != RSC_ROLE_MASTER) { -+ printf("\n\t* The configuration specifies that '%s' should not be promoted\n", parent->id); -+ } -+ } -+} -+ - int - cli_resource_fail(crm_ipc_t * crmd_channel, const char *host_uname, - const char *rsc_id, pe_working_set_t * data_set) -diff --git a/tools/crm_simulate.c b/tools/crm_simulate.c -index 0051112..7d0a8eb 100644 ---- a/tools/crm_simulate.c -+++ b/tools/crm_simulate.c -@@ -59,8 +59,11 @@ char *use_date = NULL; - static void - get_date(pe_working_set_t * data_set) - { -+ int value = 0; - time_t original_date = 0; -- crm_element_value_int(data_set->input, "execution-date", (int*)&original_date); -+ -+ crm_element_value_int(data_set->input, "execution-date", &value); -+ original_date = value; - - if (use_date) { - data_set->now = crm_time_new(use_date); -diff --git a/tools/crm_utils.py.in b/tools/crm_utils.py.in -deleted file mode 100644 -index 67d6918..0000000 ---- a/tools/crm_utils.py.in -+++ /dev/null -@@ -1,188 +0,0 @@ --#!/bin/env python --# --# --# pingd OCF Resource Agent --# Records (in the CIB) the current number of ping nodes a --# cluster node can connect to. --# --# Copyright (c) 2006 Andrew Beekhof --# All Rights Reserved. --# --# This program is free software; you can redistribute it and/or modify --# it under the terms of version 2 of the GNU General Public License as --# published by the Free Software Foundation. --# --# This program is distributed in the hope that it would be useful, but --# WITHOUT ANY WARRANTY; without even the implied warranty of --# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. --# --# Further, this software is distributed without any warranty that it is --# free of the rightful claim of any third person regarding infringement --# or the like. Any license provided herein, whether implied or --# otherwise, applies only to this software file. Patent licenses, if --# any, provided herein do not apply to combinations of this program with --# other software, or any other product whatsoever. --# --# You should have received a copy of the GNU General Public License --# along with this program; if not, write the Free Software Foundation, --# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. --# --####################################################################### -- --import os --import sys --import getopt --import readline --import traceback --from popen2 import Popen3 -- --crm_topic = "crm" --topic_stack = [ crm_topic ] --hist_file = os.environ.get('HOME')+"/.crm_history" --global_opts = {} -- --def exit_(code=0): -- if global_opts["interactive"]: -- log_info("Exiting... ") -- try: -- readline.write_history_file(hist_file) -- log_debug("Wrote history to: "+hist_file) -- except: -- log_debug("Couldnt write history to: "+hist_file) -- sys.exit(code) -- --def log_debug(log): -- if global_opts.has_key("debug") and global_opts["debug"]: -- print log -- --def log_dev(log): -- if global_opts.has_key("devlog") and global_opts["devlog"]: -- print log -- --def log_info(log): -- print log -- --def log_err(log): -- print "ERROR: "+log -- --def set_topic(name): -- global crm_topic -- if crm_topic != name: -- log_dev("topic: %s->%s" % (crm_topic, name)) -- crm_topic = name -- --def os_system(cmd, print_raw=False): -- log_debug("Performing command: "+cmd) -- p = Popen3(cmd, None) -- p.tochild.close() -- result = p.fromchild.readlines() -- p.fromchild.close() -- p.wait() -- if print_raw: -- for line in result: -- print line.rstrip() -- return result -- --# --# Creates an argv-style array (that preserves quoting) for use in shell-mode --# --def create_argv(text): -- args = [] -- word = [] -- index = 0 -- total = len(text) -- -- in_word = False -- in_verbatum = False -- -- while index < total: -- finish_word = False -- append_word = False -- #log_debug("processing: "+text[index]) -- if text[index] == '\\': -- index = index +1 -- append_word = True -- -- elif text[index].isspace(): -- if in_verbatum or in_word: -- append_word = True -- else: -- finish_word = True -- -- elif text[index] == '"': -- if in_verbatum: -- append_word = True -- else: -- finish_word = True -- if in_word: -- in_word = False -- else: -- in_word = True -- -- elif text[index] == '\'': -- finish_word = True -- if in_verbatum: -- in_verbatum = False -- else: -- in_verbatum = True -- else: -- append_word = True -- -- if finish_word: -- if word: -- args.append(''.join(word)) -- word = [] -- elif append_word: -- word.append(text[index]) -- #log_debug("Added %s to word: %s" % (text[index], str(word))) -- -- index = index +1 -- -- if in_verbatum or in_word: -- text="" -- if word: -- text=" after: '%s'"%''.join(word) -- raise QuotingError("Un-matched quoting%s"%text, args) -- -- elif word: -- args.append(''.join(word)) -- -- return args -- --def init_readline(func): -- readline.set_completer(func) -- readline.parse_and_bind("tab: complete") -- readline.set_history_length(100) -- -- try: -- readline.read_history_file(hist_file) -- except: -- pass -- --def fancyopts(args, options, state): -- long = [] -- short = '' -- map = {} -- dt = {} -- -- for s, l, d, c in options: -- pl = l.replace('-', '_') -- map['-'+s] = map['--'+l] = pl -- state[pl] = d -- dt[pl] = type(d) -- if not d is None and not callable(d): -- if s: s += ':' -- if l: l += '=' -- if s: short = short + s -- if l: long.append(l) -- -- opts, args = getopt.getopt(args, short, long) -- -- for opt, arg in opts: -- if dt[map[opt]] is type(fancyopts): state[map[opt]](state,map[opt],arg) -- elif dt[map[opt]] is type(1): state[map[opt]] = int(arg) -- elif dt[map[opt]] is type(''): state[map[opt]] = arg -- elif dt[map[opt]] is type([]): state[map[opt]].append(arg) -- elif dt[map[opt]] is type(None): state[map[opt]] = 1 -- -- return args -diff --git a/tools/regression.acls.exp b/tools/regression.acls.exp -index ae6735a..ac7ae0c 100644 ---- a/tools/regression.acls.exp -+++ b/tools/regression.acls.exp -@@ -253,10 +253,10 @@ Error performing operation: Permission denied - =#=#=#= End test: unknownguy: Set stonith-enabled - Permission denied (13) =#=#=#= - * Passed: crm_attribute - unknownguy: Set stonith-enabled - =#=#=#= Begin test: unknownguy: Create a resource =#=#=#= --__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs --__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs --__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs --__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs -+__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs -+__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs -+__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs -+__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs - Call failed: Permission denied - =#=#=#= End test: unknownguy: Create a resource - Permission denied (13) =#=#=#= - * Passed: cibadmin - unknownguy: Create a resource -@@ -273,8 +273,8 @@ Error performing operation: Permission denied - =#=#=#= End test: l33t-haxor: Set stonith-enabled - Permission denied (13) =#=#=#= - * Passed: crm_attribute - l33t-haxor: Set stonith-enabled - =#=#=#= Begin test: l33t-haxor: Create a resource =#=#=#= --__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy']: parent --__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy'] -+__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy']: parent -+__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy'] - Call failed: Permission denied - =#=#=#= End test: l33t-haxor: Create a resource - Permission denied (13) =#=#=#= - * Passed: cibadmin - l33t-haxor: Create a resource -@@ -323,13 +323,13 @@ Call failed: Permission denied - =#=#=#= End test: niceguy: Query configuration - OK (0) =#=#=#= - * Passed: cibadmin - niceguy: Query configuration - =#=#=#= Begin test: niceguy: Set enable-acl =#=#=#= --__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]: default -+__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]: default - Error performing operation: Permission denied - Error setting enable-acl=false (section=crm_config, set=): Permission denied - =#=#=#= End test: niceguy: Set enable-acl - Permission denied (13) =#=#=#= - * Passed: crm_attribute - niceguy: Set enable-acl - =#=#=#= Begin test: niceguy: Set stonith-enabled =#=#=#= --__xml_acl_post_process: Creation of nvpair=cib-bootstrap-options-stonith-enabled is allowed -+__xml_acl_post_process: Creation of nvpair=cib-bootstrap-options-stonith-enabled is allowed - =#=#=#= Current cib after: niceguy: Set stonith-enabled =#=#=#= - - -@@ -376,8 +376,8 @@ __xml_acl_post_process: Creation of nvpair=cib-bootstrap-options-stonith-enable - =#=#=#= End test: niceguy: Set stonith-enabled - OK (0) =#=#=#= - * Passed: crm_attribute - niceguy: Set stonith-enabled - =#=#=#= Begin test: niceguy: Create a resource =#=#=#= --__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy']: default --__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy'] -+__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy']: default -+__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy'] - Call failed: Permission denied - =#=#=#= End test: niceguy: Create a resource - Permission denied (13) =#=#=#= - * Passed: cibadmin - niceguy: Create a resource -@@ -533,10 +533,11 @@ Error performing operation: Permission denied - =#=#=#= End test: l33t-haxor: Remove a resource meta attribute - Permission denied (13) =#=#=#= - * Passed: crm_resource - l33t-haxor: Remove a resource meta attribute - =#=#=#= Begin test: niceguy: Create a resource meta attribute =#=#=#= --error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined --error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option --error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity --__xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is allowed -+error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined -+error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option -+error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity -+__xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is allowed -+Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role=Stopped - =#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#= - - -@@ -589,9 +590,9 @@ __xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is - =#=#=#= End test: niceguy: Create a resource meta attribute - OK (0) =#=#=#= - * Passed: crm_resource - niceguy: Create a resource meta attribute - =#=#=#= Begin test: niceguy: Query a resource meta attribute =#=#=#= --error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined --error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option --error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity -+error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined -+error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option -+error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity - Stopped - =#=#=#= Current cib after: niceguy: Query a resource meta attribute =#=#=#= - -@@ -645,10 +646,10 @@ Stopped - =#=#=#= End test: niceguy: Query a resource meta attribute - OK (0) =#=#=#= - * Passed: crm_resource - niceguy: Query a resource meta attribute - =#=#=#= Begin test: niceguy: Remove a resource meta attribute =#=#=#= --error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined --error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option --error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity --Deleted dummy option: id=dummy-meta_attributes-target-role name=target-role -+error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined -+error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option -+error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity -+Deleted 'dummy' option: id=dummy-meta_attributes-target-role name=target-role - =#=#=#= Current cib after: niceguy: Remove a resource meta attribute =#=#=#= - - -@@ -699,10 +700,11 @@ Deleted dummy option: id=dummy-meta_attributes-target-role name=target-role - =#=#=#= End test: niceguy: Remove a resource meta attribute - OK (0) =#=#=#= - * Passed: crm_resource - niceguy: Remove a resource meta attribute - =#=#=#= Begin test: niceguy: Create a resource meta attribute =#=#=#= --error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined --error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option --error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity --__xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is allowed -+error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined -+error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option -+error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity -+__xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is allowed -+Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role=Started - =#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#= - - -@@ -804,8 +806,8 @@ __xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is - - - =#=#=#= Begin test: niceguy: Replace - remove acls =#=#=#= --__xml_acl_check: 400 access denied to /cib[@epoch]: default --__xml_acl_check: 400 access denied to /cib/configuration/acls: default -+__xml_acl_check: 400 access denied to /cib[@epoch]: default -+__xml_acl_check: 400 access denied to /cib/configuration/acls: default - Call failed: Permission denied - =#=#=#= End test: niceguy: Replace - remove acls - Permission denied (13) =#=#=#= - * Passed: cibadmin - niceguy: Replace - remove acls -@@ -859,9 +861,9 @@ Call failed: Permission denied - - - =#=#=#= Begin test: niceguy: Replace - create resource =#=#=#= --__xml_acl_check: 400 access denied to /cib[@epoch]: default --__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy2']: default --__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy2'] -+__xml_acl_check: 400 access denied to /cib[@epoch]: default -+__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy2']: default -+__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy2'] - Call failed: Permission denied - =#=#=#= End test: niceguy: Replace - create resource - Permission denied (13) =#=#=#= - * Passed: cibadmin - niceguy: Replace - create resource -@@ -914,8 +916,8 @@ Call failed: Permission denied - - - =#=#=#= Begin test: niceguy: Replace - modify attribute (deny) =#=#=#= --__xml_acl_check: 400 access denied to /cib[@epoch]: default --__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]: default -+__xml_acl_check: 400 access denied to /cib[@epoch]: default -+__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]: default - Call failed: Permission denied - =#=#=#= End test: niceguy: Replace - modify attribute (deny) - Permission denied (13) =#=#=#= - * Passed: cibadmin - niceguy: Replace - modify attribute (deny) -@@ -968,8 +970,8 @@ Call failed: Permission denied - - - =#=#=#= Begin test: niceguy: Replace - delete attribute (deny) =#=#=#= --__xml_acl_check: 400 access denied to /cib[@epoch]: default --__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl']: default -+__xml_acl_check: 400 access denied to /cib[@epoch]: default -+__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl']: default - Call failed: Permission denied - =#=#=#= End test: niceguy: Replace - delete attribute (deny) - Permission denied (13) =#=#=#= - * Passed: cibadmin - niceguy: Replace - delete attribute (deny) -@@ -1022,8 +1024,8 @@ Call failed: Permission denied - - - =#=#=#= Begin test: niceguy: Replace - create attribute (deny) =#=#=#= --__xml_acl_check: 400 access denied to /cib[@epoch]: default --__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy'][@description]: default -+__xml_acl_check: 400 access denied to /cib[@epoch]: default -+__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy'][@description]: default - Call failed: Permission denied - =#=#=#= End test: niceguy: Replace - create attribute (deny) - Permission denied (13) =#=#=#= - * Passed: cibadmin - niceguy: Replace - create attribute (deny) -@@ -1180,28 +1182,28 @@ Call failed: Permission denied - - !#!#!#!#! Upgrading to pacemaker-2.0 and retesting !#!#!#!#! - =#=#=#= Begin test: root: Upgrade to pacemaker-2.0 =#=#=#= --__xml_acl_post_process: Creation of acl_permission=observer-read-1 is allowed --__xml_acl_post_process: Creation of acl_permission=observer-write-1 is allowed --__xml_acl_post_process: Creation of acl_permission=observer-write-2 is allowed --__xml_acl_post_process: Creation of acl_permission=admin-read-1 is allowed --__xml_acl_post_process: Creation of acl_permission=admin-write-1 is allowed --__xml_acl_post_process: Creation of acl_target=l33t-haxor is allowed --__xml_acl_post_process: Creation of role=auto-l33t-haxor is allowed --__xml_acl_post_process: Creation of acl_role=auto-l33t-haxor is allowed --__xml_acl_post_process: Creation of acl_permission=crook-nothing is allowed --__xml_acl_post_process: Creation of acl_target=niceguy is allowed --__xml_acl_post_process: Creation of role=observer is allowed --__xml_acl_post_process: Creation of acl_target=bob is allowed --__xml_acl_post_process: Creation of role=admin is allowed --__xml_acl_post_process: Creation of acl_target=badidea is allowed --__xml_acl_post_process: Creation of role=auto-badidea is allowed --__xml_acl_post_process: Creation of acl_role=auto-badidea is allowed --__xml_acl_post_process: Creation of acl_permission=badidea-resources is allowed --__xml_acl_post_process: Creation of acl_target=betteridea is allowed --__xml_acl_post_process: Creation of role=auto-betteridea is allowed --__xml_acl_post_process: Creation of acl_role=auto-betteridea is allowed --__xml_acl_post_process: Creation of acl_permission=betteridea-nothing is allowed --__xml_acl_post_process: Creation of acl_permission=betteridea-resources is allowed -+__xml_acl_post_process: Creation of acl_permission=observer-read-1 is allowed -+__xml_acl_post_process: Creation of acl_permission=observer-write-1 is allowed -+__xml_acl_post_process: Creation of acl_permission=observer-write-2 is allowed -+__xml_acl_post_process: Creation of acl_permission=admin-read-1 is allowed -+__xml_acl_post_process: Creation of acl_permission=admin-write-1 is allowed -+__xml_acl_post_process: Creation of acl_target=l33t-haxor is allowed -+__xml_acl_post_process: Creation of role=auto-l33t-haxor is allowed -+__xml_acl_post_process: Creation of acl_role=auto-l33t-haxor is allowed -+__xml_acl_post_process: Creation of acl_permission=crook-nothing is allowed -+__xml_acl_post_process: Creation of acl_target=niceguy is allowed -+__xml_acl_post_process: Creation of role=observer is allowed -+__xml_acl_post_process: Creation of acl_target=bob is allowed -+__xml_acl_post_process: Creation of role=admin is allowed -+__xml_acl_post_process: Creation of acl_target=badidea is allowed -+__xml_acl_post_process: Creation of role=auto-badidea is allowed -+__xml_acl_post_process: Creation of acl_role=auto-badidea is allowed -+__xml_acl_post_process: Creation of acl_permission=badidea-resources is allowed -+__xml_acl_post_process: Creation of acl_target=betteridea is allowed -+__xml_acl_post_process: Creation of role=auto-betteridea is allowed -+__xml_acl_post_process: Creation of acl_role=auto-betteridea is allowed -+__xml_acl_post_process: Creation of acl_permission=betteridea-nothing is allowed -+__xml_acl_post_process: Creation of acl_permission=betteridea-resources is allowed - =#=#=#= Current cib after: root: Upgrade to pacemaker-2.0 =#=#=#= - - -@@ -1271,10 +1273,10 @@ Error performing operation: Permission denied - =#=#=#= End test: unknownguy: Set stonith-enabled - Permission denied (13) =#=#=#= - * Passed: crm_attribute - unknownguy: Set stonith-enabled - =#=#=#= Begin test: unknownguy: Create a resource =#=#=#= --__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs --__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs --__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs --__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs -+__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs -+__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs -+__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs -+__xml_acl_check: Ordinary user unknownguy cannot access the CIB without any defined ACLs - Call failed: Permission denied - =#=#=#= End test: unknownguy: Create a resource - Permission denied (13) =#=#=#= - * Passed: cibadmin - unknownguy: Create a resource -@@ -1291,8 +1293,8 @@ Error performing operation: Permission denied - =#=#=#= End test: l33t-haxor: Set stonith-enabled - Permission denied (13) =#=#=#= - * Passed: crm_attribute - l33t-haxor: Set stonith-enabled - =#=#=#= Begin test: l33t-haxor: Create a resource =#=#=#= --__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy']: parent --__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy'] -+__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy']: parent -+__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy'] - Call failed: Permission denied - =#=#=#= End test: l33t-haxor: Create a resource - Permission denied (13) =#=#=#= - * Passed: cibadmin - l33t-haxor: Create a resource -@@ -1351,7 +1353,7 @@ Call failed: Permission denied - =#=#=#= End test: niceguy: Query configuration - OK (0) =#=#=#= - * Passed: cibadmin - niceguy: Query configuration - =#=#=#= Begin test: niceguy: Set enable-acl =#=#=#= --__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]: default -+__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]: default - Error performing operation: Permission denied - Error setting enable-acl=false (section=crm_config, set=): Permission denied - =#=#=#= End test: niceguy: Set enable-acl - Permission denied (13) =#=#=#= -@@ -1412,8 +1414,8 @@ Error setting enable-acl=false (section=crm_config, set=): Permission deni - =#=#=#= End test: niceguy: Set stonith-enabled - OK (0) =#=#=#= - * Passed: crm_attribute - niceguy: Set stonith-enabled - =#=#=#= Begin test: niceguy: Create a resource =#=#=#= --__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy']: default --__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy'] -+__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy']: default -+__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy'] - Call failed: Permission denied - =#=#=#= End test: niceguy: Create a resource - Permission denied (13) =#=#=#= - * Passed: cibadmin - niceguy: Create a resource -@@ -1596,10 +1598,11 @@ Error performing operation: Permission denied - =#=#=#= End test: l33t-haxor: Remove a resource meta attribute - Permission denied (13) =#=#=#= - * Passed: crm_resource - l33t-haxor: Remove a resource meta attribute - =#=#=#= Begin test: niceguy: Create a resource meta attribute =#=#=#= --error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined --error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option --error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity --__xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is allowed -+error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined -+error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option -+error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity -+__xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is allowed -+Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role=Stopped - =#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#= - - -@@ -1661,9 +1664,9 @@ __xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is - =#=#=#= End test: niceguy: Create a resource meta attribute - OK (0) =#=#=#= - * Passed: crm_resource - niceguy: Create a resource meta attribute - =#=#=#= Begin test: niceguy: Query a resource meta attribute =#=#=#= --error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined --error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option --error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity -+error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined -+error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option -+error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity - Stopped - =#=#=#= Current cib after: niceguy: Query a resource meta attribute =#=#=#= - -@@ -1726,10 +1729,10 @@ Stopped - =#=#=#= End test: niceguy: Query a resource meta attribute - OK (0) =#=#=#= - * Passed: crm_resource - niceguy: Query a resource meta attribute - =#=#=#= Begin test: niceguy: Remove a resource meta attribute =#=#=#= --error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined --error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option --error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity --Deleted dummy option: id=dummy-meta_attributes-target-role name=target-role -+error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined -+error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option -+error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity -+Deleted 'dummy' option: id=dummy-meta_attributes-target-role name=target-role - =#=#=#= Current cib after: niceguy: Remove a resource meta attribute =#=#=#= - - -@@ -1789,10 +1792,11 @@ Deleted dummy option: id=dummy-meta_attributes-target-role name=target-role - =#=#=#= End test: niceguy: Remove a resource meta attribute - OK (0) =#=#=#= - * Passed: crm_resource - niceguy: Remove a resource meta attribute - =#=#=#= Begin test: niceguy: Create a resource meta attribute =#=#=#= --error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined --error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option --error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity --__xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is allowed -+error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined -+error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option -+error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity -+__xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is allowed -+Set 'dummy' option: id=dummy-meta_attributes-target-role set=dummy-meta_attributes name=target-role=Started - =#=#=#= Current cib after: niceguy: Create a resource meta attribute =#=#=#= - - -@@ -1903,8 +1907,8 @@ __xml_acl_post_process: Creation of nvpair=dummy-meta_attributes-target-role is - - - =#=#=#= Begin test: niceguy: Replace - remove acls =#=#=#= --__xml_acl_check: 400 access denied to /cib[@epoch]: default --__xml_acl_check: 400 access denied to /cib/configuration/acls: default -+__xml_acl_check: 400 access denied to /cib[@epoch]: default -+__xml_acl_check: 400 access denied to /cib/configuration/acls: default - Call failed: Permission denied - =#=#=#= End test: niceguy: Replace - remove acls - Permission denied (13) =#=#=#= - * Passed: cibadmin - niceguy: Replace - remove acls -@@ -1967,9 +1971,9 @@ Call failed: Permission denied - - - =#=#=#= Begin test: niceguy: Replace - create resource =#=#=#= --__xml_acl_check: 400 access denied to /cib[@epoch]: default --__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy2']: default --__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy2'] -+__xml_acl_check: 400 access denied to /cib[@epoch]: default -+__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy2']: default -+__xml_acl_post_process: Cannot add new node primitive at /cib/configuration/resources/primitive[@id='dummy2'] - Call failed: Permission denied - =#=#=#= End test: niceguy: Replace - create resource - Permission denied (13) =#=#=#= - * Passed: cibadmin - niceguy: Replace - create resource -@@ -2031,8 +2035,8 @@ Call failed: Permission denied - - - =#=#=#= Begin test: niceguy: Replace - modify attribute (deny) =#=#=#= --__xml_acl_check: 400 access denied to /cib[@epoch]: default --__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]: default -+__xml_acl_check: 400 access denied to /cib[@epoch]: default -+__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl'][@value]: default - Call failed: Permission denied - =#=#=#= End test: niceguy: Replace - modify attribute (deny) - Permission denied (13) =#=#=#= - * Passed: cibadmin - niceguy: Replace - modify attribute (deny) -@@ -2094,8 +2098,8 @@ Call failed: Permission denied - - - =#=#=#= Begin test: niceguy: Replace - delete attribute (deny) =#=#=#= --__xml_acl_check: 400 access denied to /cib[@epoch]: default --__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl']: default -+__xml_acl_check: 400 access denied to /cib[@epoch]: default -+__xml_acl_check: 400 access denied to /cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']/nvpair[@id='cib-bootstrap-options-enable-acl']: default - Call failed: Permission denied - =#=#=#= End test: niceguy: Replace - delete attribute (deny) - Permission denied (13) =#=#=#= - * Passed: cibadmin - niceguy: Replace - delete attribute (deny) -@@ -2157,8 +2161,8 @@ Call failed: Permission denied - - - =#=#=#= Begin test: niceguy: Replace - create attribute (deny) =#=#=#= --__xml_acl_check: 400 access denied to /cib[@epoch]: default --__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy'][@description]: default -+__xml_acl_check: 400 access denied to /cib[@epoch]: default -+__xml_acl_check: 400 access denied to /cib/configuration/resources/primitive[@id='dummy'][@description]: default - Call failed: Permission denied - =#=#=#= End test: niceguy: Replace - create attribute (deny) - Permission denied (13) =#=#=#= - * Passed: cibadmin - niceguy: Replace - create attribute (deny) -diff --git a/tools/regression.tools.exp b/tools/regression.tools.exp -index 287caf9..b2f4df1 100644 ---- a/tools/regression.tools.exp -+++ b/tools/regression.tools.exp -@@ -626,6 +626,7 @@ Deleted nodes attribute: id=nodes-node1-standby name=standby - =#=#=#= End test: Create a resource - OK (0) =#=#=#= - * Passed: cibadmin - Create a resource - =#=#=#= Begin test: Create a resource meta attribute =#=#=#= -+Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed=false - =#=#=#= Current cib after: Create a resource meta attribute =#=#=#= - - -@@ -695,7 +696,7 @@ false - =#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#= - * Passed: crm_resource - Query a resource meta attribute - =#=#=#= Begin test: Remove a resource meta attribute =#=#=#= --Deleted dummy option: id=dummy-meta_attributes-is-managed name=is-managed -+Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed - =#=#=#= Current cib after: Remove a resource meta attribute =#=#=#= - - -@@ -728,6 +729,7 @@ Deleted dummy option: id=dummy-meta_attributes-is-managed name=is-managed - =#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#= - * Passed: crm_resource - Remove a resource meta attribute - =#=#=#= Begin test: Create a resource attribute =#=#=#= -+Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay=10s - =#=#=#= Current cib after: Create a resource attribute =#=#=#= - - -@@ -763,7 +765,7 @@ Deleted dummy option: id=dummy-meta_attributes-is-managed name=is-managed - =#=#=#= End test: Create a resource attribute - OK (0) =#=#=#= - * Passed: crm_resource - Create a resource attribute - =#=#=#= Begin test: List the configured resources =#=#=#= -- dummy (ocf::pacemaker:Dummy): Stopped -+ dummy (ocf::pacemaker:Dummy): Stopped - =#=#=#= Current cib after: List the configured resources =#=#=#= - - -@@ -973,8 +975,8 @@ Error performing operation: No such device or address - Current cluster status: - Online: [ node1 ] - -- dummy (ocf::pacemaker:Dummy): Stopped -- Fence (stonith:fence_true): Stopped -+ dummy (ocf::pacemaker:Dummy): Stopped -+ Fence (stonith:fence_true): Stopped - - Transition Summary: - * Start dummy (node1) -@@ -990,8 +992,8 @@ Executing cluster transition: - Revised cluster status: - Online: [ node1 ] - -- dummy (ocf::pacemaker:Dummy): Started node1 -- Fence (stonith:fence_true): Started node1 -+ dummy (ocf::pacemaker:Dummy): Started node1 -+ Fence (stonith:fence_true): Started node1 - - =#=#=#= Current cib after: Bring resources online =#=#=#= - -@@ -1710,8 +1712,8 @@ Error performing operation: No such device or address - Current cluster status: - Online: [ node1 ] - -- dummy (ocf::pacemaker:Dummy): Started node1 -- Fence (stonith:fence_true): Started node1 -+ dummy (ocf::pacemaker:Dummy): Started node1 -+ Fence (stonith:fence_true): Started node1 - - Performing requested modifications - + Bringing node node2 online -@@ -1733,8 +1735,8 @@ Executing cluster transition: - Revised cluster status: - Online: [ node1 node2 node3 ] - -- dummy (ocf::pacemaker:Dummy): Started node1 -- Fence (stonith:fence_true): Started node2 -+ dummy (ocf::pacemaker:Dummy): Started node1 -+ Fence (stonith:fence_true): Started node2 - - =#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#= - -@@ -1996,8 +1998,8 @@ WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node2' with a score - Current cluster status: - Online: [ node1 node2 node3 ] - -- dummy (ocf::pacemaker:Dummy): Started node1 -- Fence (stonith:fence_true): Started node2 -+ dummy (ocf::pacemaker:Dummy): Started node1 -+ Fence (stonith:fence_true): Started node2 - - Transition Summary: - * Move dummy (Started node1 -> node3) -@@ -2010,8 +2012,8 @@ Executing cluster transition: - Revised cluster status: - Online: [ node1 node2 node3 ] - -- dummy (ocf::pacemaker:Dummy): Started node3 -- Fence (stonith:fence_true): Started node2 -+ dummy (ocf::pacemaker:Dummy): Started node3 -+ Fence (stonith:fence_true): Started node2 - - =#=#=#= Current cib after: Relocate resources due to ban =#=#=#= - -diff --git a/valgrind-pcmk.suppressions b/valgrind-pcmk.suppressions -index 2e382df..0a47096 100644 ---- a/valgrind-pcmk.suppressions -+++ b/valgrind-pcmk.suppressions -@@ -1,4 +1,4 @@ --# Valgrind suppressions for PE testing -+# Valgrind suppressions for Pacemaker testing - { - Valgrind bug - Memcheck:Addr8 -@@ -57,6 +57,15 @@ - } - - { -+ Cman - Who cares if unused bytes are uninitialized -+ Memcheck:Param -+ sendmsg(msg) -+ fun:__sendmsg_nocancel -+ obj:*/libcman.so.3.0 -+ obj:*/libcman.so.3.0 -+} -+ -+{ - Cman - Jump or move depends on uninitialized values - Memcheck:Cond - obj:*/libcman.so.3.0 diff --git a/SOURCES/pacemaker-rollup-7-1-3d781d3.patch b/SOURCES/pacemaker-rollup-7-1-3d781d3.patch deleted file mode 100644 index 30afd6d..0000000 --- a/SOURCES/pacemaker-rollup-7-1-3d781d3.patch +++ /dev/null @@ -1,7989 +0,0 @@ -diff --git a/cib/io.c b/cib/io.c -index e2873a8..4e2b24a 100644 ---- a/cib/io.c -+++ b/cib/io.c -@@ -254,9 +254,7 @@ readCibXmlFile(const char *dir, const char *file, gboolean discard_status) - if (cib_writes_enabled && use_valgrind) { - if (crm_is_true(use_valgrind) || strstr(use_valgrind, "cib")) { - cib_writes_enabled = FALSE; -- crm_err("*********************************************************"); - crm_err("*** Disabling disk writes to avoid confusing Valgrind ***"); -- crm_err("*********************************************************"); - } - } - -diff --git a/crmd/crmd_lrm.h b/crmd/crmd_lrm.h -index 81a53c5..78432df 100644 ---- a/crmd/crmd_lrm.h -+++ b/crmd/crmd_lrm.h -@@ -37,6 +37,8 @@ typedef struct resource_history_s { - GHashTable *stop_params; - } rsc_history_t; - -+void history_free(gpointer data); -+ - /* TDOD - Replace this with lrmd_event_data_t */ - struct recurring_op_s { - int call_id; -diff --git a/crmd/lrm.c b/crmd/lrm.c -index 062f769..418e7cf 100644 ---- a/crmd/lrm.c -+++ b/crmd/lrm.c -@@ -103,6 +103,80 @@ copy_meta_keys(gpointer key, gpointer value, gpointer user_data) - } - } - -+/* -+ * \internal -+ * \brief Remove a recurring operation from a resource's history -+ * -+ * \param[in,out] history Resource history to modify -+ * \param[in] op Operation to remove -+ * -+ * \return TRUE if the operation was found and removed, FALSE otherwise -+ */ -+static gboolean -+history_remove_recurring_op(rsc_history_t *history, const lrmd_event_data_t *op) -+{ -+ GList *iter; -+ -+ for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) { -+ lrmd_event_data_t *existing = iter->data; -+ -+ if ((op->interval == existing->interval) -+ && crm_str_eq(op->rsc_id, existing->rsc_id, TRUE) -+ && safe_str_eq(op->op_type, existing->op_type)) { -+ -+ history->recurring_op_list = g_list_delete_link(history->recurring_op_list, iter); -+ lrmd_free_event(existing); -+ return TRUE; -+ } -+ } -+ return FALSE; -+} -+ -+/* -+ * \internal -+ * \brief Free all recurring operations in resource history -+ * -+ * \param[in,out] history Resource history to modify -+ */ -+static void -+history_free_recurring_ops(rsc_history_t *history) -+{ -+ GList *iter; -+ -+ for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) { -+ lrmd_free_event(iter->data); -+ } -+ g_list_free(history->recurring_op_list); -+ history->recurring_op_list = NULL; -+} -+ -+/* -+ * \internal -+ * \brief Free resource history -+ * -+ * \param[in,out] history Resource history to free -+ */ -+void -+history_free(gpointer data) -+{ -+ rsc_history_t *history = (rsc_history_t*)data; -+ -+ if (history->stop_params) { -+ g_hash_table_destroy(history->stop_params); -+ } -+ -+ /* Don't need to free history->rsc.id because it's set to history->id */ -+ free(history->rsc.type); -+ free(history->rsc.class); -+ free(history->rsc.provider); -+ -+ lrmd_free_event(history->failed); -+ lrmd_free_event(history->last); -+ free(history->id); -+ history_free_recurring_ops(history); -+ free(history); -+} -+ - static void - update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op) - { -@@ -145,25 +219,10 @@ update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_ - target_rc = rsc_op_expected_rc(op); - if (op->op_status == PCMK_LRM_OP_CANCELLED) { - if (op->interval > 0) { -- GList *gIter, *gIterNext; -- - crm_trace("Removing cancelled recurring op: %s_%s_%d", op->rsc_id, op->op_type, - op->interval); -- -- for (gIter = entry->recurring_op_list; gIter != NULL; gIter = gIterNext) { -- lrmd_event_data_t *existing = gIter->data; -- -- gIterNext = gIter->next; -- -- if (crm_str_eq(op->rsc_id, existing->rsc_id, TRUE) -- && safe_str_eq(op->op_type, existing->op_type) -- && op->interval == existing->interval) { -- lrmd_free_event(existing); -- entry->recurring_op_list = g_list_delete_link(entry->recurring_op_list, gIter); -- } -- } -+ history_remove_recurring_op(entry, op); - return; -- - } else { - crm_trace("Skipping %s_%s_%d rc=%d, status=%d", op->rsc_id, op->op_type, op->interval, - op->rc, op->op_status); -@@ -201,32 +260,17 @@ update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_ - } - - if (op->interval > 0) { -- GListPtr iter = NULL; -- -- for(iter = entry->recurring_op_list; iter; iter = iter->next) { -- lrmd_event_data_t *o = iter->data; -- -- /* op->rsc_id is implied */ -- if(op->interval == o->interval && strcmp(op->op_type, o->op_type) == 0) { -- crm_trace("Removing existing recurring op entry: %s_%s_%d", op->rsc_id, op->op_type, op->interval); -- entry->recurring_op_list = g_list_remove(entry->recurring_op_list, o); -- break; -- } -- } -+ /* Ensure there are no duplicates */ -+ history_remove_recurring_op(entry, op); - - crm_trace("Adding recurring op: %s_%s_%d", op->rsc_id, op->op_type, op->interval); - entry->recurring_op_list = g_list_prepend(entry->recurring_op_list, lrmd_copy_event(op)); - - } else if (entry->recurring_op_list && safe_str_eq(op->op_type, RSC_STATUS) == FALSE) { -- GList *gIter = entry->recurring_op_list; -- - crm_trace("Dropping %d recurring ops because of: %s_%s_%d", -- g_list_length(gIter), op->rsc_id, op->op_type, op->interval); -- for (; gIter != NULL; gIter = gIter->next) { -- lrmd_free_event(gIter->data); -- } -- g_list_free(entry->recurring_op_list); -- entry->recurring_op_list = NULL; -+ g_list_length(entry->recurring_op_list), op->rsc_id, -+ op->op_type, op->interval); -+ history_free_recurring_ops(entry); - } - } - -diff --git a/crmd/lrm_state.c b/crmd/lrm_state.c -index 374c806..162ad03 100644 ---- a/crmd/lrm_state.c -+++ b/crmd/lrm_state.c -@@ -32,24 +32,6 @@ int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg); - void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg)); - - static void --history_cache_destroy(gpointer data) --{ -- rsc_history_t *entry = data; -- -- if (entry->stop_params) { -- g_hash_table_destroy(entry->stop_params); -- } -- -- free(entry->rsc.type); -- free(entry->rsc.class); -- free(entry->rsc.provider); -- -- lrmd_free_event(entry->failed); -- lrmd_free_event(entry->last); -- free(entry->id); -- free(entry); --} --static void - free_rsc_info(gpointer value) - { - lrmd_rsc_info_t *rsc_info = value; -@@ -155,7 +137,7 @@ lrm_state_create(const char *node_name) - g_str_equal, g_hash_destroy_str, free_recurring_op); - - state->resource_history = g_hash_table_new_full(crm_str_hash, -- g_str_equal, NULL, history_cache_destroy); -+ g_str_equal, NULL, history_free); - - g_hash_table_insert(lrm_state_table, (char *)state->node_name, state); - return state; -diff --git a/cts/CM_ais.py b/cts/CM_ais.py -index 44f91cd..a34f9b1 100644 ---- a/cts/CM_ais.py -+++ b/cts/CM_ais.py -@@ -49,42 +49,46 @@ class crm_ais(crm_lha): - def NodeUUID(self, node): - return node - -- def ais_components(self): -+ def ais_components(self, extra={}): - - complist = [] - if not len(self.fullcomplist.keys()): - for c in ["cib", "lrmd", "crmd", "attrd" ]: -- self.fullcomplist[c] = Process( -- self, c, -- pats = self.templates.get_component(self.name, c), -- badnews_ignore = self.templates.get_component(self.name, "%s-ignore"%c), -- common_ignore = self.templates.get_component(self.name, "common-ignore")) -- -- self.fullcomplist["pengine"] = Process( -- self, "pengine", -- dc_pats = self.templates.get_component(self.name, "pengine"), -- badnews_ignore = self.templates.get_component(self.name, "pengine-ignore"), -- common_ignore = self.templates.get_component(self.name, "common-ignore")) -- -- self.fullcomplist["stonith-ng"] = Process( -- self, "stonith-ng", process="stonithd", -- pats = self.templates.get_component(self.name, "stonith"), -- badnews_ignore = self.templates.get_component(self.name, "stonith-ignore"), -- common_ignore = self.templates.get_component(self.name, "common-ignore")) -- -+ self.fullcomplist[c] = Process( -+ self, c, -+ pats = self.templates.get_component(self.name, c), -+ badnews_ignore = self.templates.get_component(self.name, "%s-ignore" % c), -+ common_ignore = self.templates.get_component(self.name, "common-ignore")) -+ -+ # pengine uses dc_pats instead of pats -+ self.fullcomplist["pengine"] = Process( -+ self, "pengine", -+ dc_pats = self.templates.get_component(self.name, "pengine"), -+ badnews_ignore = self.templates.get_component(self.name, "pengine-ignore"), -+ common_ignore = self.templates.get_component(self.name, "common-ignore")) -+ -+ # stonith-ng's process name is different from its component name -+ self.fullcomplist["stonith-ng"] = Process( -+ self, "stonith-ng", process="stonithd", -+ pats = self.templates.get_component(self.name, "stonith"), -+ badnews_ignore = self.templates.get_component(self.name, "stonith-ignore"), -+ common_ignore = self.templates.get_component(self.name, "common-ignore")) -+ -+ # add (or replace) any extra components passed in -+ self.fullcomplist.update(extra) -+ -+ # Processes running under valgrind can't be shot with "killall -9 processname", -+ # so don't include them in the returned list - vgrind = self.Env["valgrind-procs"].split() - for key in self.fullcomplist.keys(): - if self.Env["valgrind-tests"]: -- if key in vgrind: -- # Processes running under valgrind can't be shot with "killall -9 processname" -+ if key in vgrind: - self.log("Filtering %s from the component list as it is being profiled by valgrind" % key) - continue - if key == "stonith-ng" and not self.Env["DoFencing"]: - continue -- - complist.append(self.fullcomplist[key]) - -- #self.complist = [ fullcomplist["pengine"] ] - return complist - - -@@ -100,17 +104,14 @@ class crm_cs_v0(crm_ais): - crm_ais.__init__(self, Environment, randseed=randseed, name=name) - - def Components(self): -- self.ais_components() -- c = "corosync" -- -- self.fullcomplist[c] = Process( -- self, c, -- pats = self.templates.get_component(self.name, c), -- badnews_ignore = self.templates.get_component(self.name, "%s-ignore"%c), -+ extra = {} -+ extra["corosync"] = Process( -+ self, "corosync", -+ pats = self.templates.get_component(self.name, "corosync"), -+ badnews_ignore = self.templates.get_component(self.name, "corosync-ignore"), - common_ignore = self.templates.get_component(self.name, "common-ignore") - ) -- -- return self.ais_components() -+ return self.ais_components(extra=extra) - - - class crm_cs_v1(crm_cs_v0): -diff --git a/cts/environment.py b/cts/environment.py -index a3399c3..61d4211 100644 ---- a/cts/environment.py -+++ b/cts/environment.py -@@ -59,7 +59,7 @@ class Environment: - self["stonith-params"] = "hostlist=all,livedangerously=yes" - self["loop-minutes"] = 60 - self["valgrind-prefix"] = None -- self["valgrind-procs"] = "cib crmd attrd pengine stonith-ng" -+ self["valgrind-procs"] = "attrd cib crmd lrmd pengine stonith-ng" - self["valgrind-opts"] = """--leak-check=full --show-reachable=yes --trace-children=no --num-callers=25 --gen-suppressions=all --suppressions="""+CTSvars.CTS_home+"""/cts.supp""" - - self["experimental-tests"] = 0 -@@ -578,6 +578,10 @@ class Environment: - elif args[i] == "--valgrind-tests": - self["valgrind-tests"] = 1 - -+ elif args[i] == "--valgrind-procs": -+ self["valgrind-procs"] = args[i+1] -+ skipthis = 1 -+ - elif args[i] == "--no-loop-tests": - self["loop-tests"] = 0 - -diff --git a/cts/patterns.py b/cts/patterns.py -index 1bc05a6..493b690 100644 ---- a/cts/patterns.py -+++ b/cts/patterns.py -@@ -7,7 +7,9 @@ class BasePatterns: - def __init__(self, name): - self.name = name - patternvariants[name] = self -- self.ignore = [] -+ self.ignore = [ -+ "avoid confusing Valgrind", -+ ] - self.BadNews = [] - self.components = {} - self.commands = { -@@ -140,7 +142,7 @@ class crm_lha(BasePatterns): - r"Parameters to .* changed", - ] - -- self.ignore = [ -+ self.ignore = self.ignore + [ - r"(ERROR|error):.*\s+assert\s+at\s+crm_glib_handler:" - "(ERROR|error): Message hist queue is filling up", - "stonithd.*CRIT: external_hostlist:.*'vmware gethosts' returned an empty hostlist", -@@ -177,7 +179,7 @@ class crm_cs_v0(BasePatterns): - "Pat:PacemakerUp" : "%s\W.*pacemakerd.*Starting Pacemaker", - }) - -- self.ignore = [ -+ self.ignore = self.ignore + [ - r"crm_mon:", - r"crmadmin:", - r"update_trace_data", -diff --git a/extra/ansible/docker/group_vars/all b/extra/ansible/docker/group_vars/all -new file mode 100644 -index 0000000..935e88a ---- /dev/null -+++ b/extra/ansible/docker/group_vars/all -@@ -0,0 +1,5 @@ -+max: 4 -+prefix: ansible-pcmk -+base_image: centos:centos7 -+subnet: 172.17.200 -+pacemaker_authkey: this_is_very_insecure -\ No newline at end of file -diff --git a/extra/ansible/docker/hosts b/extra/ansible/docker/hosts -new file mode 100644 -index 0000000..5b0fb71 ---- /dev/null -+++ b/extra/ansible/docker/hosts -@@ -0,0 +1,7 @@ -+[controllers] -+oss-uk-1.clusterlabs.org -+ -+[containers] -+ansible-1 -+ansible-2 -+ansible-3 -diff --git a/extra/ansible/docker/roles/docker-host/files/docker-enter b/extra/ansible/docker/roles/docker-host/files/docker-enter -new file mode 100644 -index 0000000..04c4822 ---- /dev/null -+++ b/extra/ansible/docker/roles/docker-host/files/docker-enter -@@ -0,0 +1,29 @@ -+#! /bin/sh -e -+ -+case "$1" in -+ -h|--help) -+ echo "Usage: docker-enter CONTAINER [COMMAND]" -+ exit 0 -+ ;; -+esac -+ -+if [ $(id -ru) -ne 0 ]; then -+ echo "You have to be root." -+ exit 1 -+fi -+ -+if [ $# -eq 0 ]; then -+ echo "Usage: docker-enter CONTAINER [COMMAND]" -+ exit 1 -+fi -+ -+container=$1; shift -+PID=$(docker inspect --format {{.State.Pid}} "$container") -+ -+if [ $# -ne 0 ]; then -+ nsenter --target $PID --mount --uts --ipc --net --pid -- $* -+ exit $? -+fi -+ -+nsenter --target $PID --mount --uts --ipc --net --pid -+exit 0 -diff --git a/extra/ansible/docker/roles/docker-host/files/fence_docker_cts b/extra/ansible/docker/roles/docker-host/files/fence_docker_cts -new file mode 100644 -index 0000000..6d6f025 ---- /dev/null -+++ b/extra/ansible/docker/roles/docker-host/files/fence_docker_cts -@@ -0,0 +1,202 @@ -+#!/bin/bash -+# -+# Copyright (c) 2014 David Vossel -+# All Rights Reserved. -+# -+# This program is free software; you can redistribute it and/or modify -+# it under the terms of version 2 of the GNU General Public License as -+# published by the Free Software Foundation. -+# -+# This program is distributed in the hope that it would be useful, but -+# WITHOUT ANY WARRANTY; without even the implied warranty of -+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -+# -+# Further, this software is distributed without any warranty that it is -+# free of the rightful claim of any third person regarding infringement -+# or the like. Any license provided herein, whether implied or -+# otherwise, applies only to this software file. Patent licenses, if -+# any, provided herein do not apply to combinations of this program with -+# other software, or any other product whatsoever. -+# -+# You should have received a copy of the GNU General Public License -+# along with this program; if not, write the Free Software Foundation, -+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. -+# -+####################################################################### -+ -+port="" -+action="list" # Default fence action -+ -+function usage() -+{ -+cat < -+ -+ -+ fence_docker_cts fences docker containers for testing purposes. -+ -+ -+ -+ -+ -+ Fencing Action -+ -+ -+ -+ -+ The name/id of docker container to control/check -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+EOF -+ exit 0; -+} -+ -+function docker_log() { -+ if ! [ "$action" = "list" ]; then -+ printf "$*\n" 1>&2 -+ fi -+} -+ -+# stdin option processing -+if [ -z $1 ]; then -+ # If there are no command line args, look for options from stdin -+ while read line; do -+ for word in $(echo "$line"); do -+ case $word in -+ option=*|action=*) action=`echo $word | sed s/.*=//`;; -+ port=*) port=`echo $word | sed s/.*=//`;; -+ node=*) port=`echo $word | sed s/.*=//`;; -+ nodename=*) port=`echo $word | sed s/.*=//`;; -+ --);; -+ *) docker_log "Invalid command: $word";; -+ esac -+ done -+ done -+fi -+ -+# Command line option processing -+while true ; do -+ if [ -z "$1" ]; then -+ break; -+ fi -+ case "$1" in -+ -o|--action|--option) action=$2; shift; shift;; -+ -n|--port) port=$2; shift; shift;; -+ -V|--version) echo "1.0.0"; exit 0;; -+ --help|-h) -+ usage; -+ exit 0;; -+ --) shift ; break ;; -+ *) docker_log "Unknown option: $1. See --help for details."; exit 1;; -+ esac -+done -+ -+action=`echo $action | tr 'A-Z' 'a-z'` -+case $action in -+ hostlist|list) action=list;; -+ stat|status) action=status;; -+ restart|reboot|reset) action=reboot;; -+ poweron|on) action=start;; -+ poweroff|off) action=stop;; -+esac -+ -+function fence_done() -+{ -+ if [ $1 -eq 0 ]; then -+ docker_log "Operation $action (port=$port) passed" -+ else -+ docker_log "Operation $action (port=$port) failed: $1" -+ fi -+ if [ -z "$returnfile" ]; then -+ rm -f $returnfile -+ fi -+ if [ -z "$helperscript" ]; then -+ rm -f $helperscript -+ fi -+ exit $1 -+} -+ -+case $action in -+ metadata) metadata;; -+esac -+ -+returnfile=$(mktemp /tmp/fence_docker_cts_returnfileXXXX) -+returnstring="" -+helper_script=$(mktemp /tmp/fence_docker_cts_helperXXXX) -+ -+exec_action() -+{ -+ echo "#!/bin/bash" > $helper_script -+ echo "sleep 10000" >> $helper_script -+ chmod 755 $helper_script -+ src="$(uname -n)" -+ -+ $helper_script "$src" "$action" "$returnfile" "$port" > /dev/null 2>&1 & -+ pid=$! -+ docker_log "waiting on pid $pid" -+ wait $pid > /dev/null 2>&1 -+ returnstring=$(cat $returnfile) -+ -+ if [ -z "$returnstring" ]; then -+ docker_log "fencing daemon did not respond" -+ fence_done 1 -+ fi -+ -+ if [ "$returnstring" == "fail" ]; then -+ docker_log "fencing daemon failed to execute action [$action on port $port]" -+ fence_done 1 -+ fi -+ -+ return 0 -+} -+ -+exec_action -+case $action in -+ list) -+ cat $returnfile -+ fence_done 0 -+ ;; -+ -+ status) -+ # 0 if container is on -+ # 1 if container can not be contacted or unknown -+ # 2 if container is off -+ if [ "$returnstring" = "true" ]; then -+ fence_done 0 -+ else -+ fence_done 2 -+ fi -+ ;; -+ monitor|stop|start|reboot) : ;; -+ *) docker_log "Unknown action: $action"; fence_done 1;; -+esac -+ -+fence_done $? -diff --git a/extra/ansible/docker/roles/docker-host/files/launch.sh b/extra/ansible/docker/roles/docker-host/files/launch.sh -new file mode 100644 -index 0000000..66bebf4 ---- /dev/null -+++ b/extra/ansible/docker/roles/docker-host/files/launch.sh -@@ -0,0 +1,4 @@ -+#!/bin/bash -+while true; do -+ sleep 1 -+done -diff --git a/extra/ansible/docker/roles/docker-host/files/pcmk_remote_start b/extra/ansible/docker/roles/docker-host/files/pcmk_remote_start -new file mode 100644 -index 0000000..1bf0320 ---- /dev/null -+++ b/extra/ansible/docker/roles/docker-host/files/pcmk_remote_start -@@ -0,0 +1,18 @@ -+#!/bin/bash -+/usr/sbin/ip_start -+pid=$(pidof pacemaker_remoted) -+if [ "$?" -ne 0 ]; then -+ mkdir -p /var/run -+ -+ export PCMK_debugfile=$pcmklogs -+ (pacemaker_remoted &) & > /dev/null 2>&1 -+ sleep 5 -+ -+ pid=$(pidof pacemaker_remoted) -+ if [ "$?" -ne 0 ]; then -+ echo "startup of pacemaker failed" -+ exit 1 -+ fi -+ echo "$pid" > /var/run/pacemaker_remoted.pid -+fi -+exit 0 -diff --git a/extra/ansible/docker/roles/docker-host/files/pcmk_remote_stop b/extra/ansible/docker/roles/docker-host/files/pcmk_remote_stop -new file mode 100644 -index 0000000..074cd59 ---- /dev/null -+++ b/extra/ansible/docker/roles/docker-host/files/pcmk_remote_stop -@@ -0,0 +1,36 @@ -+#!/bin/bash -+status() -+{ -+ pid=$(pidof $1 2>/dev/null) -+ rtrn=$? -+ if [ $rtrn -ne 0 ]; then -+ echo "$1 is stopped" -+ else -+ echo "$1 (pid $pid) is running..." -+ fi -+ return $rtrn -+} -+stop() -+{ -+ desc="Pacemaker Remote" -+ prog=$1 -+ shutdown_prog=$prog -+ -+ if status $shutdown_prog > /dev/null 2>&1; then -+ kill -TERM $(pidof $prog) > /dev/null 2>&1 -+ -+ while status $prog > /dev/null 2>&1; do -+ sleep 1 -+ echo -n "." -+ done -+ else -+ echo -n "$desc is already stopped" -+ fi -+ -+ rm -f /var/lock/subsystem/pacemaker -+ rm -f /var/run/${prog}.pid -+ killall -q -9 'crmd stonithd attrd cib lrmd pacemakerd pacemaker_remoted' -+} -+ -+stop "pacemaker_remoted" -+exit 0 -diff --git a/extra/ansible/docker/roles/docker-host/files/pcmk_start b/extra/ansible/docker/roles/docker-host/files/pcmk_start -new file mode 100644 -index 0000000..d8b2ba8 ---- /dev/null -+++ b/extra/ansible/docker/roles/docker-host/files/pcmk_start -@@ -0,0 +1,23 @@ -+#!/bin/bash -+ -+/usr/sbin/ip_start -+sed -i 's@to_syslog:.*yes@to_logfile: yes\nlogfile: /var/log/pacemaker.log@g' /etc/corosync/corosync.conf -+ -+/usr/share/corosync/corosync start > /dev/null 2>&1 -+ -+pid=$(pidof pacemakerd) -+if [ "$?" -ne 0 ]; then -+ mkdir -p /var/run -+ -+ export PCMK_debugfile=$pcmklogs -+ (pacemakerd &) & > /dev/null 2>&1 -+ sleep 5 -+ -+ pid=$(pidof pacemakerd) -+ if [ "$?" -ne 0 ]; then -+ echo "startup of pacemaker failed" -+ exit 1 -+ fi -+ echo "$pid" > /var/run/pacemakerd.pid -+fi -+exit 0 -diff --git a/extra/ansible/docker/roles/docker-host/files/pcmk_stop b/extra/ansible/docker/roles/docker-host/files/pcmk_stop -new file mode 100644 -index 0000000..a8f395a ---- /dev/null -+++ b/extra/ansible/docker/roles/docker-host/files/pcmk_stop -@@ -0,0 +1,45 @@ -+#!/bin/bash -+status() -+{ -+ pid=$(pidof $1 2>/dev/null) -+ rtrn=$? -+ if [ $rtrn -ne 0 ]; then -+ echo "$1 is stopped" -+ else -+ echo "$1 (pid $pid) is running..." -+ fi -+ return $rtrn -+} -+stop() -+{ -+ desc="Pacemaker Cluster Manager" -+ prog=$1 -+ shutdown_prog=$prog -+ -+ if ! status $prog > /dev/null 2>&1; then -+ shutdown_prog="crmd" -+ fi -+ -+ cname=$(crm_node --name) -+ crm_attribute -N $cname -n standby -v true -l reboot -+ -+ if status $shutdown_prog > /dev/null 2>&1; then -+ kill -TERM $(pidof $prog) > /dev/null 2>&1 -+ -+ while status $prog > /dev/null 2>&1; do -+ sleep 1 -+ echo -n "." -+ done -+ else -+ echo -n "$desc is already stopped" -+ fi -+ -+ rm -f /var/lock/subsystem/pacemaker -+ rm -f /var/run/${prog}.pid -+ killall -q -9 'crmd stonithd attrd cib lrmd pacemakerd pacemaker_remoted' -+} -+ -+stop "pacemakerd" -+/usr/share/corosync/corosync stop > /dev/null 2>&1 -+killall -q -9 'corosync' -+exit 0 -diff --git a/extra/ansible/docker/roles/docker-host/tasks/main.yml b/extra/ansible/docker/roles/docker-host/tasks/main.yml -new file mode 100644 -index 0000000..ce69adf ---- /dev/null -+++ b/extra/ansible/docker/roles/docker-host/tasks/main.yml -@@ -0,0 +1,77 @@ -+--- -+#local_action: command /usr/bin/take_out_of_pool {{ inventory_hostname }} -+- name: Update docker -+ yum: pkg=docker state=latest -+- name: Start docker -+ service: name=docker state=started enabled=yes -+- name: Install helper -+ copy: src=docker-enter dest=/usr/sbin/ mode=0755 -+- name: Download image -+ shell: docker pull {{ base_image }} -+- name: Cleanup kill -+ shell: docker kill $(docker ps -a | grep {{ prefix }} | awk '{print $1}') || echo "Nothing to kill" -+- name: Cleanup remove -+ shell: docker rm $(docker ps -a | grep {{ prefix }} | awk '{print $1}') || echo "Nothing to remove" -+- name: Cleanup docker skeleton -+ file: path={{ prefix }} state=absent -+- name: Create docker skeleton -+ file: path={{ prefix }}/{{ item }} state=directory recurse=yes -+ with_items: -+ - rpms -+ - repos -+ - bin_files -+ - launch_scripts -+- name: Create IP helper -+ template: src=ip_start.j2 dest={{ prefix }}/bin_files/ip_start mode=0755 -+- name: Copy helper scripts -+ copy: src={{ item }} dest={{ prefix }}/bin_files/{{ item }} mode=0755 -+ with_items: -+ - pcmk_stop -+ - pcmk_start -+ - pcmk_remote_stop -+ - pcmk_remote_start -+ - fence_docker_cts -+- name: Copy launch script -+ copy: src=launch.sh dest={{ prefix }}/launch_scripts/launch.sh mode=0755 -+- name: Copy authorized keys -+ shell: cp /root/.ssh/authorized_keys {{ prefix }} -+- name: Create docker file -+ template: src=Dockerfile.j2 dest={{ prefix }}/Dockerfile -+- name: Making image -+ shell: docker build -t {{ prefix }} {{ prefix }} -+- name: Launch images -+ shell: docker run -d -i -t -P -h {{ prefix }}-{{ item }} --name={{ prefix }}-{{ item }} -p 2200{{ item }}:22 $(docker images | grep {{ prefix }}.*latest | awk '{print $3}') /bin/bash -+ with_sequence: count={{ max }} -+- name: Calculate IPs -+ shell: for n in $(seq {{ max }} ); do echo {{ subnet }}.${n}; done | tr '\n' ' ' -+ register: node_ips -+- name: Start the IP -+ shell: docker-enter {{ prefix }}-{{ item }} ip_start -+ with_sequence: count={{ max }} -+- name: Configure cluster -+ shell: docker-enter {{ prefix }}-{{ item }} pcs cluster setup --local --name {{ prefix }} {{ node_ips.stdout }} -+ with_sequence: count={{ max }} -+- name: Start the cluster -+ shell: docker-enter {{ prefix }}-{{ item }} pcmk_start -+ with_sequence: count={{ max }} -+- name: Set cluster options -+ shell: docker-enter {{ prefix }}-1 pcs property set stonith-enabled=false -+- name: Configure VIP -+ shell: docker-enter {{ prefix }}-1 pcs resource create ClusterIP ocf:heartbeat:IPaddr2 ip={{ subnet }}.100 cidr_netmask=32 op monitor interval=30s -+- name: Configure -+ shell: docker-enter {{ prefix }}-1 pcs resource defaults resource-stickiness=100 -+- name: Configure -+ shell: docker-enter {{ prefix }}-1 pcs resource create WebSite apache configfile=/etc/httpd/conf/httpd.conf statusurl="http://localhost/server-status" op monitor interval=1min -+- name: Configure -+ shell: docker-enter {{ prefix }}-1 pcs constraint colocation add WebSite with ClusterIP INFINITY -+- name: Configure -+ shell: docker-enter {{ prefix }}-1 pcs constraint order ClusterIP then WebSite -+- name: Configure -+ shell: docker-enter {{ prefix }}-1 pcs constraint location WebSite prefers {{ prefix }}-1=50 -+# TODO: Enable fencing -+# TODO: Make this a full LAMP stack similar to https://github.com/ansible/ansible-examples/tree/master/lamp_simple -+# TODO: Create a Pacemaker module? -+ -+# run_once: true -+# delegate_to: web01.example.org -+ -diff --git a/extra/ansible/docker/roles/docker-host/templates/Dockerfile.j2 b/extra/ansible/docker/roles/docker-host/templates/Dockerfile.j2 -new file mode 100644 -index 0000000..1d57175 ---- /dev/null -+++ b/extra/ansible/docker/roles/docker-host/templates/Dockerfile.j2 -@@ -0,0 +1,16 @@ -+FROM {{ base_image }} -+ADD /repos /etc/yum.repos.d/ -+#ADD /rpms /root/ -+#RUN yum install -y /root/*.rpm -+ADD /launch_scripts /root/ -+ADD /bin_files /usr/sbin/ -+ -+RUN mkdir -p /root/.ssh; chmod 700 /root/.ssh -+ADD authorized_keys /root/.ssh/ -+ -+RUN yum install -y openssh-server net-tools pacemaker pacemaker-cts resource-agents pcs corosync which fence-agents-common sysvinit-tools -+RUN mkdir -p /etc/pacemaker/ -+RUN echo {{ pacemaker_authkey }} > /etc/pacemaker/authkey -+RUN /usr/sbin/sshd -+ -+ENTRYPOINT ["/root/launch.sh"] -diff --git a/extra/ansible/docker/roles/docker-host/templates/ip_start.j2 b/extra/ansible/docker/roles/docker-host/templates/ip_start.j2 -new file mode 100755 -index 0000000..edbd392 ---- /dev/null -+++ b/extra/ansible/docker/roles/docker-host/templates/ip_start.j2 -@@ -0,0 +1,3 @@ -+offset=$(hostname | sed s/.*-//) -+export OCF_ROOT=/usr/lib/ocf/ OCF_RESKEY_ip={{ subnet }}.${offset} OCF_RESKEY_cidr_netmask=32 -+/usr/lib/ocf/resource.d/heartbeat/IPaddr2 start -diff --git a/extra/ansible/docker/site.yml b/extra/ansible/docker/site.yml -new file mode 100644 -index 0000000..0cc65e4 ---- /dev/null -+++ b/extra/ansible/docker/site.yml -@@ -0,0 +1,12 @@ -+--- -+# See /etc/ansible/hosts or -i hosts -+- hosts: controllers -+ remote_user: root -+ roles: -+ - docker-host -+ -+#- hosts: containers -+# gather_facts: no -+# remote_user: root -+# roles: -+# - docker-container -diff --git a/include/crm/msg_xml.h b/include/crm/msg_xml.h -index 42f9003..15f1b3c 100644 ---- a/include/crm/msg_xml.h -+++ b/include/crm/msg_xml.h -@@ -194,6 +194,7 @@ - # define XML_RSC_ATTR_INTERLEAVE "interleave" - # define XML_RSC_ATTR_INCARNATION "clone" - # define XML_RSC_ATTR_INCARNATION_MAX "clone-max" -+# define XML_RSC_ATTR_INCARNATION_MIN "clone-min" - # define XML_RSC_ATTR_INCARNATION_NODEMAX "clone-node-max" - # define XML_RSC_ATTR_MASTER_MAX "master-max" - # define XML_RSC_ATTR_MASTER_NODEMAX "master-node-max" -diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h -index 4214959..b95b1e5 100644 ---- a/include/crm/pengine/status.h -+++ b/include/crm/pengine/status.h -@@ -256,7 +256,6 @@ struct resource_s { - int stickiness; - int sort_index; - int failure_timeout; -- int remote_reconnect_interval; - int effective_priority; - int migration_threshold; - -@@ -295,6 +294,7 @@ struct resource_s { - - const char *isolation_wrapper; - gboolean exclusive_discover; -+ int remote_reconnect_interval; - }; - - struct pe_action_s { -@@ -324,6 +324,26 @@ struct pe_action_s { - GHashTable *meta; - GHashTable *extra; - -+ /* -+ * These two varables are associated with the constraint logic -+ * that involves first having one or more actions runnable before -+ * then allowing this action to execute. -+ * -+ * These varables are used with features such as 'clone-min' which -+ * requires at minimum X number of cloned instances to be running -+ * before an order dependency can run. Another option that uses -+ * this is 'require-all=false' in ordering constrants. This option -+ * says "only required one instance of a resource to start before -+ * allowing dependencies to start" basicall require-all=false is -+ * the same as clone-min=1. -+ */ -+ -+ /* current number of known runnable actions in the before list. */ -+ int runnable_before; -+ /* the number of "before" runnable actions required for this action -+ * to be considered runnable */ -+ int required_runnable_before; -+ - GListPtr actions_before; /* action_warpper_t* */ - GListPtr actions_after; /* action_warpper_t* */ - }; -diff --git a/lib/cib/Makefile.am b/lib/cib/Makefile.am -index e84f4f7..1e50511 100644 ---- a/lib/cib/Makefile.am -+++ b/lib/cib/Makefile.am -@@ -28,7 +28,7 @@ noinst_HEADERS = - libcib_la_SOURCES = cib_ops.c cib_utils.c cib_client.c cib_native.c cib_attrs.c - libcib_la_SOURCES += cib_file.c cib_remote.c - --libcib_la_LDFLAGS = -version-info 4:1:0 -L$(top_builddir)/lib/pengine/.libs -+libcib_la_LDFLAGS = -version-info 4:2:0 -L$(top_builddir)/lib/pengine/.libs - libcib_la_LIBADD = $(CRYPTOLIB) $(top_builddir)/lib/pengine/libpe_rules.la $(top_builddir)/lib/common/libcrmcommon.la - libcib_la_CFLAGS = -I$(top_srcdir) - -diff --git a/lib/cluster/Makefile.am b/lib/cluster/Makefile.am -index 29413ba..29daeb2 100644 ---- a/lib/cluster/Makefile.am -+++ b/lib/cluster/Makefile.am -@@ -28,7 +28,7 @@ header_HEADERS = - lib_LTLIBRARIES = libcrmcluster.la - - libcrmcluster_la_SOURCES = election.c cluster.c membership.c --libcrmcluster_la_LDFLAGS = -version-info 4:2:0 $(CLUSTERLIBS) -+libcrmcluster_la_LDFLAGS = -version-info 5:0:1 $(CLUSTERLIBS) - libcrmcluster_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la $(top_builddir)/lib/fencing/libstonithd.la - libcrmcluster_la_DEPENDENCIES = $(top_builddir)/lib/common/libcrmcommon.la $(top_builddir)/lib/fencing/libstonithd.la - -diff --git a/lib/common/Makefile.am b/lib/common/Makefile.am -index a593f40..f5c0766 100644 ---- a/lib/common/Makefile.am -+++ b/lib/common/Makefile.am -@@ -37,7 +37,7 @@ if BUILD_CIBSECRETS - libcrmcommon_la_SOURCES += cib_secrets.c - endif - --libcrmcommon_la_LDFLAGS = -version-info 7:0:4 -+libcrmcommon_la_LDFLAGS = -version-info 8:0:5 - libcrmcommon_la_LIBADD = @LIBADD_DL@ $(GNUTLSLIBS) - libcrmcommon_la_SOURCES += $(top_builddir)/lib/gnu/md5.c - -diff --git a/lib/fencing/Makefile.am b/lib/fencing/Makefile.am -index 2bdcfeb..fbe02e4 100644 ---- a/lib/fencing/Makefile.am -+++ b/lib/fencing/Makefile.am -@@ -25,7 +25,7 @@ AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include \ - lib_LTLIBRARIES = libstonithd.la - - libstonithd_la_SOURCES = st_client.c --libstonithd_la_LDFLAGS = -version-info 3:2:1 -+libstonithd_la_LDFLAGS = -version-info 3:3:1 - libstonithd_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la - - AM_CFLAGS = $(AM_CPPFLAGS) -diff --git a/lib/lrmd/Makefile.am b/lib/lrmd/Makefile.am -index f961ae1..820654c 100644 ---- a/lib/lrmd/Makefile.am -+++ b/lib/lrmd/Makefile.am -@@ -25,7 +25,7 @@ AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include \ - lib_LTLIBRARIES = liblrmd.la - - liblrmd_la_SOURCES = lrmd_client.c proxy_common.c --liblrmd_la_LDFLAGS = -version-info 3:0:2 -+liblrmd_la_LDFLAGS = -version-info 3:1:2 - liblrmd_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la \ - $(top_builddir)/lib/services/libcrmservice.la \ - $(top_builddir)/lib/fencing/libstonithd.la -diff --git a/lib/pengine/Makefile.am b/lib/pengine/Makefile.am -index 78da075..60d1770 100644 ---- a/lib/pengine/Makefile.am -+++ b/lib/pengine/Makefile.am -@@ -26,11 +26,11 @@ lib_LTLIBRARIES = libpe_rules.la libpe_status.la - ## SOURCES - noinst_HEADERS = unpack.h variant.h - --libpe_rules_la_LDFLAGS = -version-info 2:4:0 -+libpe_rules_la_LDFLAGS = -version-info 2:5:0 - libpe_rules_la_SOURCES = rules.c common.c - libpe_rules_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la - --libpe_status_la_LDFLAGS = -version-info 8:0:4 -+libpe_status_la_LDFLAGS = -version-info 9:0:5 - libpe_status_la_SOURCES = status.c unpack.c utils.c complex.c native.c group.c clone.c rules.c common.c - libpe_status_la_LIBADD = @CURSESLIBS@ $(top_builddir)/lib/common/libcrmcommon.la - -diff --git a/lib/services/dbus.c b/lib/services/dbus.c -index 6341fc5..e2efecb 100644 ---- a/lib/services/dbus.c -+++ b/lib/services/dbus.c -@@ -64,11 +64,14 @@ pcmk_dbus_find_error(const char *method, DBusPendingCall* pending, DBusMessage * - } else { - DBusMessageIter args; - int dtype = dbus_message_get_type(reply); -+ char *sig; - - switch(dtype) { - case DBUS_MESSAGE_TYPE_METHOD_RETURN: - dbus_message_iter_init(reply, &args); -- crm_trace("Call to %s returned '%s'", method, dbus_message_iter_get_signature(&args)); -+ sig = dbus_message_iter_get_signature(&args); -+ crm_trace("Call to %s returned '%s'", method, sig); -+ dbus_free(sig); - break; - case DBUS_MESSAGE_TYPE_INVALID: - error.message = "Invalid reply"; -@@ -217,11 +220,14 @@ bool pcmk_dbus_type_check(DBusMessage *msg, DBusMessageIter *field, int expected - - if(dtype != expected) { - DBusMessageIter args; -+ char *sig; - - dbus_message_iter_init(msg, &args); -+ sig = dbus_message_iter_get_signature(&args); - do_crm_log_alias(LOG_ERR, __FILE__, function, line, -- "Unexepcted DBus type, expected %c in '%s' instead of %c", -- expected, dbus_message_iter_get_signature(&args), dtype); -+ "Unexpected DBus type, expected %c in '%s' instead of %c", -+ expected, sig, dtype); -+ dbus_free(sig); - return FALSE; - } - -diff --git a/lib/services/services.c b/lib/services/services.c -index 08bff88..7e2b9f7 100644 ---- a/lib/services/services.c -+++ b/lib/services/services.c -@@ -348,6 +348,34 @@ services_action_create_generic(const char *exec, const char *args[]) - return op; - } - -+#if SUPPORT_DBUS -+/* -+ * \internal -+ * \brief Update operation's pending DBus call, unreferencing old one if needed -+ * -+ * \param[in,out] op Operation to modify -+ * \param[in] pending Pending call to set -+ */ -+void -+services_set_op_pending(svc_action_t *op, DBusPendingCall *pending) -+{ -+ if (op->opaque->pending && (op->opaque->pending != pending)) { -+ if (pending) { -+ crm_info("Lost pending DBus call (%p)", op->opaque->pending); -+ } else { -+ crm_trace("Done with pending DBus call (%p)", op->opaque->pending); -+ } -+ dbus_pending_call_unref(op->opaque->pending); -+ } -+ op->opaque->pending = pending; -+ if (pending) { -+ crm_trace("Updated pending DBus call (%p)", pending); -+ } else { -+ crm_trace("Cleared pending DBus call"); -+ } -+} -+#endif -+ - void - services_action_cleanup(svc_action_t * op) - { -diff --git a/lib/services/services_private.h b/lib/services/services_private.h -index 183afb5..a98cd91 100644 ---- a/lib/services/services_private.h -+++ b/lib/services/services_private.h -@@ -63,4 +63,8 @@ void handle_blocked_ops(void); - - gboolean is_op_blocked(const char *rsc); - -+#if SUPPORT_DBUS -+void services_set_op_pending(svc_action_t *op, DBusPendingCall *pending); -+#endif -+ - #endif /* __MH_SERVICES_PRIVATE_H__ */ -diff --git a/lib/services/systemd.c b/lib/services/systemd.c -index 749d61c..e1e1bc9 100644 ---- a/lib/services/systemd.c -+++ b/lib/services/systemd.c -@@ -461,7 +461,12 @@ systemd_async_dispatch(DBusPendingCall *pending, void *user_data) - - if(op) { - crm_trace("Got result: %p for %p for %s, %s", reply, pending, op->rsc, op->action); -- op->opaque->pending = NULL; -+ if (pending == op->opaque->pending) { -+ op->opaque->pending = NULL; -+ } else { -+ crm_info("Received unexpected reply for pending DBus call (%p vs %p)", -+ op->opaque->pending, pending); -+ } - systemd_exec_result(reply, op); - - } else { -@@ -499,10 +504,7 @@ systemd_unit_check(const char *name, const char *state, void *userdata) - } - - if (op->synchronous == FALSE) { -- if (op->opaque->pending) { -- dbus_pending_call_unref(op->opaque->pending); -- } -- op->opaque->pending = NULL; -+ services_set_op_pending(op, NULL); - operation_finalize(op); - } - } -@@ -535,7 +537,7 @@ systemd_unit_exec_with_unit(svc_action_t * op, const char *unit) - return op->rc == PCMK_OCF_OK; - } else if (pending) { - dbus_pending_call_ref(pending); -- op->opaque->pending = pending; -+ services_set_op_pending(op, pending); - return TRUE; - } - -@@ -617,8 +619,7 @@ systemd_unit_exec_with_unit(svc_action_t * op, const char *unit) - - dbus_message_unref(msg); - if(pending) { -- dbus_pending_call_ref(pending); -- op->opaque->pending = pending; -+ services_set_op_pending(op, pending); - return TRUE; - } - return FALSE; -diff --git a/lib/transition/Makefile.am b/lib/transition/Makefile.am -index 8ce7775..04d18fe 100644 ---- a/lib/transition/Makefile.am -+++ b/lib/transition/Makefile.am -@@ -27,7 +27,7 @@ lib_LTLIBRARIES = libtransitioner.la - noinst_HEADERS = - libtransitioner_la_SOURCES = unpack.c graph.c utils.c - --libtransitioner_la_LDFLAGS = -version-info 2:3:0 -+libtransitioner_la_LDFLAGS = -version-info 2:4:0 - libtransitioner_la_CFLAGS = -I$(top_builddir) - libtransitioner_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la - -diff --git a/pengine/Makefile.am b/pengine/Makefile.am -index 31532cf..0e12a1f 100644 ---- a/pengine/Makefile.am -+++ b/pengine/Makefile.am -@@ -61,7 +61,7 @@ endif - noinst_HEADERS = allocate.h utils.h pengine.h - #utils.h pengine.h - --libpengine_la_LDFLAGS = -version-info 8:0:4 -+libpengine_la_LDFLAGS = -version-info 9:0:5 - # -L$(top_builddir)/lib/pils -lpils -export-dynamic -module -avoid-version - libpengine_la_SOURCES = pengine.c allocate.c utils.c constraints.c - libpengine_la_SOURCES += native.c group.c clone.c master.c graph.c utilization.c -diff --git a/pengine/allocate.c b/pengine/allocate.c -index 68cafd4..ec5a18d 100644 ---- a/pengine/allocate.c -+++ b/pengine/allocate.c -@@ -1962,7 +1962,6 @@ expand_node_list(GListPtr list) - if(node_list) { - existing_len = strlen(node_list); - } -- - crm_trace("Adding %s (%dc) at offset %d", node->details->uname, len - 2, existing_len); - node_list = realloc_safe(node_list, len + existing_len); - sprintf(node_list + existing_len, "%s%s", existing_len == 0 ? "":" ", node->details->uname); -diff --git a/pengine/allocate.h b/pengine/allocate.h -index f6602c6..73f750e 100644 ---- a/pengine/allocate.h -+++ b/pengine/allocate.h -@@ -171,5 +171,6 @@ extern enum pe_graph_flags clone_update_actions(action_t * first, action_t * the - enum pe_action_flags filter, enum pe_ordering type); - - gboolean update_action_flags(action_t * action, enum pe_action_flags flags); -+gboolean update_action(action_t * action); - - #endif -diff --git a/pengine/clone.c b/pengine/clone.c -index 3840a0a..ebf53ed 100644 ---- a/pengine/clone.c -+++ b/pengine/clone.c -@@ -21,6 +21,7 @@ - #include - #include - #include -+#include - - #define VARIANT_CLONE 1 - #include -@@ -1338,6 +1339,8 @@ clone_update_actions(action_t * first, action_t * then, node_t * node, enum pe_a - changed |= native_update_actions(first, then, node, flags, filter, type); - - for (; gIter != NULL; gIter = gIter->next) { -+ enum pe_graph_flags child_changed = pe_graph_none; -+ GListPtr lpc = NULL; - resource_t *child = (resource_t *) gIter->data; - action_t *child_action = find_first_action(child->actions, NULL, then->task, node); - -@@ -1345,9 +1348,17 @@ clone_update_actions(action_t * first, action_t * then, node_t * node, enum pe_a - enum pe_action_flags child_flags = child->cmds->action_flags(child_action, node); - - if (is_set(child_flags, pe_action_runnable)) { -- changed |= -+ -+ child_changed |= - child->cmds->update_actions(first, child_action, node, flags, filter, type); - } -+ changed |= child_changed; -+ if (child_changed & pe_graph_updated_then) { -+ for (lpc = child_action->actions_after; lpc != NULL; lpc = lpc->next) { -+ action_wrapper_t *other = (action_wrapper_t *) lpc->data; -+ update_action(other->action); -+ } -+ } - } - } - } -diff --git a/pengine/constraints.c b/pengine/constraints.c -index 1f44811..7527aa6 100644 ---- a/pengine/constraints.c -+++ b/pengine/constraints.c -@@ -256,7 +256,7 @@ unpack_simple_rsc_order(xmlNode * xml_obj, pe_working_set_t * data_set) - resource_t *rsc_then = NULL; - resource_t *rsc_first = NULL; - gboolean invert_bool = TRUE; -- gboolean require_all = TRUE; -+ int min_required_before = 0; - enum pe_order_kind kind = pe_order_kind_mandatory; - enum pe_ordering cons_weight = pe_order_optional; - -@@ -351,7 +351,15 @@ unpack_simple_rsc_order(xmlNode * xml_obj, pe_working_set_t * data_set) - && crm_is_true(require_all_s) == FALSE - && rsc_first->variant >= pe_clone) { - -- require_all = FALSE; -+ /* require-all=false means only one instance of the clone is required */ -+ min_required_before = 1; -+ } else if (rsc_first->variant >= pe_clone) { -+ const char *min_clones_s = g_hash_table_lookup(rsc_first->meta, XML_RSC_ATTR_INCARNATION_MIN); -+ if (min_clones_s) { -+ /* if clone min is set, we require at a minimum X number of instances -+ * to be runnable before allowing dependencies to be runnable. */ -+ min_required_before = crm_parse_int(min_clones_s, "0"); -+ } - } - - cons_weight = pe_order_optional; -@@ -368,22 +376,31 @@ unpack_simple_rsc_order(xmlNode * xml_obj, pe_working_set_t * data_set) - cons_weight |= get_flags(id, kind, action_first, action_then, FALSE); - } - -- if (require_all == FALSE) { -+ /* If there is a minimum number of instances that must be runnable before -+ * the 'then' action is runnable, we use a pseudo action as an intermediate step -+ * start min number of clones -> pseudo action is runnable -> dependency runnable. */ -+ if (min_required_before) { - GListPtr rIter = NULL; - char *task = crm_concat(CRM_OP_RELAXED_CLONE, id, ':'); - action_t *unordered_action = get_pseudo_op(task, data_set); - free(task); - -+ /* require the pseudo action to have "min_required_before" number of -+ * actions to be considered runnable before allowing the pseudo action -+ * to be runnable. */ -+ unordered_action->required_runnable_before = min_required_before; - update_action_flags(unordered_action, pe_action_requires_any); - - for (rIter = rsc_first->children; id && rIter; rIter = rIter->next) { - resource_t *child = rIter->data; -- -+ /* order each clone instance before the pseudo action */ - custom_action_order(child, generate_op_key(child->id, action_first, 0), NULL, - NULL, NULL, unordered_action, - pe_order_one_or_more | pe_order_implies_then_printed, data_set); - } - -+ /* order the "then" dependency to occur after the pseudo action only if -+ * the pseudo action is runnable */ - order_id = custom_action_order(NULL, NULL, unordered_action, - rsc_then, generate_op_key(rsc_then->id, action_then, 0), NULL, - cons_weight | pe_order_runnable_left, data_set); -diff --git a/pengine/graph.c b/pengine/graph.c -index 9cfede6..3d832f0 100644 ---- a/pengine/graph.c -+++ b/pengine/graph.c -@@ -29,7 +29,6 @@ - #include - #include - --gboolean update_action(action_t * action); - void update_colo_start_chain(action_t * action); - gboolean rsc_update_action(action_t * first, action_t * then, enum pe_ordering type); - -@@ -261,8 +260,16 @@ graph_update_action(action_t * first, action_t * then, node_t * node, enum pe_ac - pe_action_runnable, pe_order_one_or_more); - - } else if (is_set(flags, pe_action_runnable)) { -- if (update_action_flags(then, pe_action_runnable)) { -- changed |= pe_graph_updated_then; -+ /* alright. a "first" action is considered runnable, incremente -+ * the 'runnable_before' counter */ -+ then->runnable_before++; -+ -+ /* if the runnable before count for then exceeds the required number -+ * of "before" runnable actions... mark then as runnable */ -+ if (then->runnable_before >= then->required_runnable_before) { -+ if (update_action_flags(then, pe_action_runnable)) { -+ changed |= pe_graph_updated_then; -+ } - } - } - if (changed) { -@@ -456,6 +463,18 @@ update_action(action_t * then) - pe_action_pseudo) ? "pseudo" : then->node ? then->node->details->uname : ""); - - if (is_set(then->flags, pe_action_requires_any)) { -+ /* initialize current known runnable before actions to 0 -+ * from here as graph_update_action is called for each of -+ * then's before actions, this number will increment as -+ * runnable 'first' actions are encountered */ -+ then->runnable_before = 0; -+ -+ /* for backwards compatibility with previous options that use -+ * the 'requires_any' flag, initalize required to 1 if it is -+ * not set. */ -+ if (then->required_runnable_before == 0) { -+ then->required_runnable_before = 1; -+ } - clear_bit(then->flags, pe_action_runnable); - /* We are relying on the pe_order_one_or_more clause of - * graph_update_action(), called as part of the: -diff --git a/pengine/native.c b/pengine/native.c -index b93f8da..7d5f602 100644 ---- a/pengine/native.c -+++ b/pengine/native.c -@@ -2817,8 +2817,7 @@ native_create_probe(resource_t * rsc, node_t * node, action_t * complete, - } - - static void --native_start_constraints(resource_t * rsc, action_t * stonith_op, gboolean is_stonith, -- pe_working_set_t * data_set) -+native_start_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set) - { - node_t *target = stonith_op ? stonith_op->node : NULL; - -@@ -2893,14 +2892,24 @@ find_fence_target_node_actions(GListPtr search_list, const char *key, node_t *fe - } - - static void --native_stop_constraints(resource_t * rsc, action_t * stonith_op, gboolean is_stonith, -- pe_working_set_t * data_set) -+native_stop_constraints(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set) - { - char *key = NULL; - GListPtr gIter = NULL; - GListPtr action_list = NULL; -+ -+ action_t *start = NULL; - resource_t *top = uber_parent(rsc); - -+ key = start_key(rsc); -+ action_list = find_actions(rsc->actions, key, NULL); -+ if(action_list) { -+ start = action_list->data; -+ } -+ -+ g_list_free(action_list); -+ free(key); -+ - key = stop_key(rsc); - action_list = find_fence_target_node_actions(rsc->actions, key, stonith_op->node, data_set); - free(key); -@@ -2932,7 +2941,7 @@ native_stop_constraints(resource_t * rsc, action_t * stonith_op, gboolean is_sto - update_action_flags(action, pe_action_runnable); - update_action_flags(action, pe_action_implied_by_stonith); - -- { -+ if(start == NULL || start->needs > rsc_req_quorum) { - enum pe_ordering flags = pe_order_optional; - action_t *parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL); - -@@ -3032,7 +3041,8 @@ native_stop_constraints(resource_t * rsc, action_t * stonith_op, gboolean is_sto - crm_trace("here - 1"); - update_action_flags(action, pe_action_pseudo); - update_action_flags(action, pe_action_runnable); -- if (is_stonith == FALSE) { -+ -+ if (start == NULL || start->needs > rsc_req_quorum) { - order_actions(stonith_op, action, pe_order_preserve|pe_order_optional); - } - } -@@ -3044,8 +3054,6 @@ native_stop_constraints(resource_t * rsc, action_t * stonith_op, gboolean is_sto - void - rsc_stonith_ordering(resource_t * rsc, action_t * stonith_op, pe_working_set_t * data_set) - { -- gboolean is_stonith = FALSE; -- - if (rsc->children) { - GListPtr gIter = NULL; - -@@ -3063,11 +3071,11 @@ rsc_stonith_ordering(resource_t * rsc, action_t * stonith_op, pe_working_set_t * - } - - /* Start constraints */ -- native_start_constraints(rsc, stonith_op, is_stonith, data_set); -+ native_start_constraints(rsc, stonith_op, data_set); - - /* Stop constraints */ - if (stonith_op) { -- native_stop_constraints(rsc, stonith_op, is_stonith, data_set); -+ native_stop_constraints(rsc, stonith_op, data_set); - } - } - -diff --git a/pengine/regression.sh b/pengine/regression.sh -index d184798..7f73f92 100755 ---- a/pengine/regression.sh -+++ b/pengine/regression.sh -@@ -31,6 +31,20 @@ info Performing the following tests from $io_dir - create_mode="false" - - echo "" -+do_test cloned_start_one "order first clone then clone... first clone_min=2" -+do_test cloned_start_two "order first clone then clone... first clone_min=2" -+do_test cloned_stop_one "order first clone then clone... first clone_min=2" -+do_test cloned_stop_two "order first clone then clone... first clone_min=2" -+do_test clone_min_interleave_start_one "order first clone then clone... first clone_min=2 and then has interleave=true" -+do_test clone_min_interleave_start_two "order first clone then clone... first clone_min=2 and then has interleave=true" -+do_test clone_min_interleave_stop_one "order first clone then clone... first clone_min=2 and then has interleave=true" -+do_test clone_min_interleave_stop_two "order first clone then clone... first clone_min=2 and then has interleave=true" -+do_test clone_min_start_one "order first clone then primitive... first clone_min=2" -+do_test clone_min_start_two "order first clone then primitive... first clone_min=2" -+do_test clone_min_stop_all "order first clone then primitive... first clone_min=2" -+do_test clone_min_stop_one "order first clone then primitive... first clone_min=2" -+do_test clone_min_stop_two "order first clone then primitive... first clone_min=2" -+ - do_test simple1 "Offline " - do_test simple2 "Start " - do_test simple3 "Start 2 " -diff --git a/pengine/test10/bug-5186-partial-migrate.dot b/pengine/test10/bug-5186-partial-migrate.dot -index 033d41d..65f5616 100644 ---- a/pengine/test10/bug-5186-partial-migrate.dot -+++ b/pengine/test10/bug-5186-partial-migrate.dot -@@ -66,13 +66,10 @@ - "stonith 'reboot' bl460g1n7" -> "clnDiskd1_stop_0" [ style = bold] - "stonith 'reboot' bl460g1n7" -> "clnDiskd2_stop_0" [ style = bold] - "stonith 'reboot' bl460g1n7" -> "clnPing_stop_0" [ style = bold] --"stonith 'reboot' bl460g1n7" -> "grpStonith8_stop_0" [ style = bold] - "stonith 'reboot' bl460g1n7" -> "prmDiskd1_stop_0 bl460g1n7" [ style = bold] - "stonith 'reboot' bl460g1n7" -> "prmDiskd2_stop_0 bl460g1n7" [ style = bold] - "stonith 'reboot' bl460g1n7" -> "prmDummy_stop_0 bl460g1n7" [ style = bold] - "stonith 'reboot' bl460g1n7" -> "prmPing_stop_0 bl460g1n7" [ style = bold] --"stonith 'reboot' bl460g1n7" -> "prmStonith8-1_stop_0 bl460g1n7" [ style = bold] --"stonith 'reboot' bl460g1n7" -> "prmStonith8-2_stop_0 bl460g1n7" [ style = bold] - "stonith 'reboot' bl460g1n7" -> "prmVM2_stop_0 bl460g1n7" [ style = bold] - "stonith 'reboot' bl460g1n7" -> "stonith_complete" [ style = bold] - "stonith 'reboot' bl460g1n7" [ style=bold color="green" fontcolor="black"] -diff --git a/pengine/test10/bug-5186-partial-migrate.exp b/pengine/test10/bug-5186-partial-migrate.exp -index 216d962..bc058ea 100644 ---- a/pengine/test10/bug-5186-partial-migrate.exp -+++ b/pengine/test10/bug-5186-partial-migrate.exp -@@ -104,11 +104,7 @@ - - - -- -- -- -- -- -+ - - - -@@ -182,9 +178,6 @@ - - - -- -- -- - - - -@@ -229,9 +222,6 @@ - - - -- -- -- -
- - -diff --git a/pengine/test10/bug-5186-partial-migrate.summary b/pengine/test10/bug-5186-partial-migrate.summary -index f848c97..5e62a23 100644 ---- a/pengine/test10/bug-5186-partial-migrate.summary -+++ b/pengine/test10/bug-5186-partial-migrate.summary -@@ -35,18 +35,22 @@ Transition Summary: - - Executing cluster transition: - * Resource action: prmVM2 stop on bl460g1n6 -+ * Pseudo action: grpStonith8_stop_0 -+ * Pseudo action: prmStonith8-2_stop_0 - * Fencing bl460g1n7 (reboot) - * Pseudo action: stonith_complete - * Pseudo action: prmDummy_stop_0 - * Pseudo action: prmVM2_stop_0 -- * Pseudo action: grpStonith8_stop_0 -- * Pseudo action: prmStonith8-2_stop_0 -+ * Pseudo action: prmStonith8-1_stop_0 - * Pseudo action: clnDiskd1_stop_0 - * Pseudo action: clnDiskd2_stop_0 - * Pseudo action: clnPing_stop_0 - * Resource action: prmDummy start on bl460g1n6 - * Resource action: prmVM2 start on bl460g1n8 -- * Pseudo action: prmStonith8-1_stop_0 -+ * Pseudo action: grpStonith8_stopped_0 -+ * Pseudo action: grpStonith8_start_0 -+ * Resource action: prmStonith8-1 start on bl460g1n6 -+ * Resource action: prmStonith8-2 start on bl460g1n6 - * Pseudo action: prmDiskd1_stop_0 - * Pseudo action: clnDiskd1_stopped_0 - * Pseudo action: prmDiskd2_stop_0 -@@ -55,10 +59,6 @@ Executing cluster transition: - * Pseudo action: clnPing_stopped_0 - * Pseudo action: all_stopped - * Resource action: prmVM2 monitor=10000 on bl460g1n8 -- * Pseudo action: grpStonith8_stopped_0 -- * Pseudo action: grpStonith8_start_0 -- * Resource action: prmStonith8-1 start on bl460g1n6 -- * Resource action: prmStonith8-2 start on bl460g1n6 - * Pseudo action: grpStonith8_running_0 - * Resource action: prmStonith8-1 monitor=10000 on bl460g1n6 - * Resource action: prmStonith8-2 monitor=3600000 on bl460g1n6 -diff --git a/pengine/test10/bug-lf-2551.dot b/pengine/test10/bug-lf-2551.dot -index ed80e15..18bca44 100644 ---- a/pengine/test10/bug-lf-2551.dot -+++ b/pengine/test10/bug-lf-2551.dot -@@ -56,7 +56,6 @@ digraph "g" { - "stonith 'reboot' hex-9" -> "cmirrord:3_stop_0 hex-9" [ style = bold] - "stonith 'reboot' hex-9" -> "dlm:3_stop_0 hex-9" [ style = bold] - "stonith 'reboot' hex-9" -> "dummy1_stop_0 hex-9" [ style = bold] --"stonith 'reboot' hex-9" -> "fencing-sbd_stop_0 hex-9" [ style = bold] - "stonith 'reboot' hex-9" -> "o2cb:3_stop_0 hex-9" [ style = bold] - "stonith 'reboot' hex-9" -> "ocfs2-1:3_stop_0 hex-9" [ style = bold] - "stonith 'reboot' hex-9" -> "stonith_complete" [ style = bold] -diff --git a/pengine/test10/bug-lf-2551.exp b/pengine/test10/bug-lf-2551.exp -index 0af9010..d6266e1 100644 ---- a/pengine/test10/bug-lf-2551.exp -+++ b/pengine/test10/bug-lf-2551.exp -@@ -18,11 +18,7 @@ - - - -- -- -- -- -- -+ - - - -diff --git a/pengine/test10/bug-lf-2551.summary b/pengine/test10/bug-lf-2551.summary -index f8d861c..158eb73 100644 ---- a/pengine/test10/bug-lf-2551.summary -+++ b/pengine/test10/bug-lf-2551.summary -@@ -107,6 +107,7 @@ Transition Summary: - * Stop vm-61 (hex-9) - - Executing cluster transition: -+ * Pseudo action: fencing-sbd_stop_0 - * Resource action: dummy1 monitor=300000 on hex-8 - * Resource action: dummy1 monitor=300000 on hex-7 - * Fencing hex-9 (reboot) -@@ -114,7 +115,7 @@ Executing cluster transition: - * Pseudo action: load_stopped_hex-8 - * Pseudo action: load_stopped_hex-7 - * Pseudo action: load_stopped_hex-0 -- * Pseudo action: fencing-sbd_stop_0 -+ * Resource action: fencing-sbd start on hex-0 - * Pseudo action: dummy1_stop_0 - * Pseudo action: vm-03_stop_0 - * Pseudo action: vm-06_stop_0 -@@ -133,7 +134,6 @@ Executing cluster transition: - * Pseudo action: vm-57_stop_0 - * Pseudo action: vm-61_stop_0 - * Pseudo action: load_stopped_hex-9 -- * Resource action: fencing-sbd start on hex-0 - * Resource action: dummy1 start on hex-0 - * Pseudo action: base-clone_stop_0 - * Resource action: dummy1 monitor=30000 on hex-0 -diff --git a/pengine/test10/clone_min_interleave_start_one.dot b/pengine/test10/clone_min_interleave_start_one.dot -new file mode 100644 -index 0000000..15ac9be ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_start_one.dot -@@ -0,0 +1,50 @@ -+ digraph "g" { -+"FAKE1-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE1-clone_start_0" -> "FAKE1-clone_running_0" [ style = bold] -+"FAKE1-clone_start_0" -> "FAKE1_start_0 c7auto1" [ style = bold] -+"FAKE1-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE1_monitor_10000 c7auto1" [ style=bold color="green" fontcolor="black"] -+"FAKE1_start_0 c7auto1" -> "FAKE1-clone_running_0" [ style = bold] -+"FAKE1_start_0 c7auto1" -> "FAKE1_monitor_10000 c7auto1" [ style = bold] -+"FAKE1_start_0 c7auto1" [ style=bold color="green" fontcolor="black"] -+"FAKE2-clone_running_0" -> "FAKE3-clone_start_0" [ style = dashed] -+"FAKE2-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"FAKE2-clone_start_0" -> "FAKE2-clone_running_0" [ style = dashed] -+"FAKE2-clone_start_0" -> "FAKE2:1_start_0 c7auto3" [ style = dashed] -+"FAKE2-clone_start_0" -> "FAKE2:2_start_0 c7auto1" [ style = dashed] -+"FAKE2-clone_start_0" -> "FAKE2_start_0 c7auto2" [ style = dashed] -+"FAKE2-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"FAKE2:1_monitor_10000 c7auto3" [ style=dashed color="red" fontcolor="black"] -+"FAKE2:1_start_0 c7auto3" -> "FAKE2-clone_running_0" [ style = dashed] -+"FAKE2:1_start_0 c7auto3" -> "FAKE2:1_monitor_10000 c7auto3" [ style = dashed] -+"FAKE2:1_start_0 c7auto3" -> "FAKE3:1_start_0 c7auto3" [ style = dashed] -+"FAKE2:1_start_0 c7auto3" [ style=dashed color="red" fontcolor="black"] -+"FAKE2:2_monitor_10000 c7auto1" [ style=dashed color="red" fontcolor="black"] -+"FAKE2:2_start_0 c7auto1" -> "FAKE2-clone_running_0" [ style = dashed] -+"FAKE2:2_start_0 c7auto1" -> "FAKE2:2_monitor_10000 c7auto1" [ style = dashed] -+"FAKE2:2_start_0 c7auto1" -> "FAKE3:2_start_0 c7auto1" [ style = dashed] -+"FAKE2:2_start_0 c7auto1" [ style=dashed color="red" fontcolor="black"] -+"FAKE2_monitor_10000 c7auto2" [ style=dashed color="red" fontcolor="black"] -+"FAKE2_start_0 c7auto2" -> "FAKE2-clone_running_0" [ style = dashed] -+"FAKE2_start_0 c7auto2" -> "FAKE2_monitor_10000 c7auto2" [ style = dashed] -+"FAKE2_start_0 c7auto2" -> "FAKE3_start_0 c7auto2" [ style = dashed] -+"FAKE2_start_0 c7auto2" [ style=dashed color="red" fontcolor="black"] -+"FAKE3-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"FAKE3-clone_start_0" -> "FAKE3-clone_running_0" [ style = dashed] -+"FAKE3-clone_start_0" -> "FAKE3:1_start_0 c7auto3" [ style = dashed] -+"FAKE3-clone_start_0" -> "FAKE3:2_start_0 c7auto1" [ style = dashed] -+"FAKE3-clone_start_0" -> "FAKE3_start_0 c7auto2" [ style = dashed] -+"FAKE3-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"FAKE3:1_monitor_10000 c7auto3" [ style=dashed color="red" fontcolor="black"] -+"FAKE3:1_start_0 c7auto3" -> "FAKE3-clone_running_0" [ style = dashed] -+"FAKE3:1_start_0 c7auto3" -> "FAKE3:1_monitor_10000 c7auto3" [ style = dashed] -+"FAKE3:1_start_0 c7auto3" [ style=dashed color="red" fontcolor="black"] -+"FAKE3:2_monitor_10000 c7auto1" [ style=dashed color="red" fontcolor="black"] -+"FAKE3:2_start_0 c7auto1" -> "FAKE3-clone_running_0" [ style = dashed] -+"FAKE3:2_start_0 c7auto1" -> "FAKE3:2_monitor_10000 c7auto1" [ style = dashed] -+"FAKE3:2_start_0 c7auto1" [ style=dashed color="red" fontcolor="black"] -+"FAKE3_monitor_10000 c7auto2" [ style=dashed color="red" fontcolor="black"] -+"FAKE3_start_0 c7auto2" -> "FAKE3-clone_running_0" [ style = dashed] -+"FAKE3_start_0 c7auto2" -> "FAKE3_monitor_10000 c7auto2" [ style = dashed] -+"FAKE3_start_0 c7auto2" [ style=dashed color="red" fontcolor="black"] -+} -diff --git a/pengine/test10/clone_min_interleave_start_one.exp b/pengine/test10/clone_min_interleave_start_one.exp -new file mode 100644 -index 0000000..b6e0c5d ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_start_one.exp -@@ -0,0 +1,51 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/clone_min_interleave_start_one.scores b/pengine/test10/clone_min_interleave_start_one.scores -new file mode 100644 -index 0000000..03de018 ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_start_one.scores -@@ -0,0 +1,67 @@ -+Allocation scores: -+clone_color: FAKE1-clone allocation score on c7auto1: 0 -+clone_color: FAKE1-clone allocation score on c7auto2: -INFINITY -+clone_color: FAKE1-clone allocation score on c7auto3: -INFINITY -+clone_color: FAKE1:0 allocation score on c7auto1: 0 -+clone_color: FAKE1:0 allocation score on c7auto2: -INFINITY -+clone_color: FAKE1:0 allocation score on c7auto3: -INFINITY -+clone_color: FAKE1:1 allocation score on c7auto1: 0 -+clone_color: FAKE1:1 allocation score on c7auto2: -INFINITY -+clone_color: FAKE1:1 allocation score on c7auto3: -INFINITY -+clone_color: FAKE1:2 allocation score on c7auto1: 0 -+clone_color: FAKE1:2 allocation score on c7auto2: -INFINITY -+clone_color: FAKE1:2 allocation score on c7auto3: -INFINITY -+clone_color: FAKE2-clone allocation score on c7auto1: 0 -+clone_color: FAKE2-clone allocation score on c7auto2: 0 -+clone_color: FAKE2-clone allocation score on c7auto3: 0 -+clone_color: FAKE2:0 allocation score on c7auto1: 0 -+clone_color: FAKE2:0 allocation score on c7auto2: 0 -+clone_color: FAKE2:0 allocation score on c7auto3: 0 -+clone_color: FAKE2:1 allocation score on c7auto1: 0 -+clone_color: FAKE2:1 allocation score on c7auto2: 0 -+clone_color: FAKE2:1 allocation score on c7auto3: 0 -+clone_color: FAKE2:2 allocation score on c7auto1: 0 -+clone_color: FAKE2:2 allocation score on c7auto2: 0 -+clone_color: FAKE2:2 allocation score on c7auto3: 0 -+clone_color: FAKE3-clone allocation score on c7auto1: 0 -+clone_color: FAKE3-clone allocation score on c7auto2: 0 -+clone_color: FAKE3-clone allocation score on c7auto3: 0 -+clone_color: FAKE3:0 allocation score on c7auto1: 0 -+clone_color: FAKE3:0 allocation score on c7auto2: 0 -+clone_color: FAKE3:0 allocation score on c7auto3: 0 -+clone_color: FAKE3:1 allocation score on c7auto1: 0 -+clone_color: FAKE3:1 allocation score on c7auto2: 0 -+clone_color: FAKE3:1 allocation score on c7auto3: 0 -+clone_color: FAKE3:2 allocation score on c7auto1: 0 -+clone_color: FAKE3:2 allocation score on c7auto2: 0 -+clone_color: FAKE3:2 allocation score on c7auto3: 0 -+native_color: FAKE1:0 allocation score on c7auto1: 0 -+native_color: FAKE1:0 allocation score on c7auto2: -INFINITY -+native_color: FAKE1:0 allocation score on c7auto3: -INFINITY -+native_color: FAKE1:1 allocation score on c7auto1: -INFINITY -+native_color: FAKE1:1 allocation score on c7auto2: -INFINITY -+native_color: FAKE1:1 allocation score on c7auto3: -INFINITY -+native_color: FAKE1:2 allocation score on c7auto1: -INFINITY -+native_color: FAKE1:2 allocation score on c7auto2: -INFINITY -+native_color: FAKE1:2 allocation score on c7auto3: -INFINITY -+native_color: FAKE2:0 allocation score on c7auto1: 0 -+native_color: FAKE2:0 allocation score on c7auto2: 0 -+native_color: FAKE2:0 allocation score on c7auto3: 0 -+native_color: FAKE2:1 allocation score on c7auto1: 0 -+native_color: FAKE2:1 allocation score on c7auto2: -INFINITY -+native_color: FAKE2:1 allocation score on c7auto3: 0 -+native_color: FAKE2:2 allocation score on c7auto1: 0 -+native_color: FAKE2:2 allocation score on c7auto2: -INFINITY -+native_color: FAKE2:2 allocation score on c7auto3: -INFINITY -+native_color: FAKE3:0 allocation score on c7auto1: 0 -+native_color: FAKE3:0 allocation score on c7auto2: 0 -+native_color: FAKE3:0 allocation score on c7auto3: 0 -+native_color: FAKE3:1 allocation score on c7auto1: 0 -+native_color: FAKE3:1 allocation score on c7auto2: -INFINITY -+native_color: FAKE3:1 allocation score on c7auto3: 0 -+native_color: FAKE3:2 allocation score on c7auto1: 0 -+native_color: FAKE3:2 allocation score on c7auto2: -INFINITY -+native_color: FAKE3:2 allocation score on c7auto3: -INFINITY -+native_color: shooter allocation score on c7auto1: 0 -+native_color: shooter allocation score on c7auto2: 0 -+native_color: shooter allocation score on c7auto3: 0 -diff --git a/pengine/test10/clone_min_interleave_start_one.summary b/pengine/test10/clone_min_interleave_start_one.summary -new file mode 100644 -index 0000000..b15f68a ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_start_one.summary -@@ -0,0 +1,39 @@ -+ -+Current cluster status: -+Online: [ c7auto1 c7auto2 c7auto3 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKE1-clone [FAKE1] -+ Stopped: [ c7auto1 c7auto2 c7auto3 ] -+ Clone Set: FAKE2-clone [FAKE2] -+ Stopped: [ c7auto1 c7auto2 c7auto3 ] -+ Clone Set: FAKE3-clone [FAKE3] -+ Stopped: [ c7auto1 c7auto2 c7auto3 ] -+ -+Transition Summary: -+ * Start FAKE1:0 (c7auto1) -+ * Start FAKE2:0 (c7auto2 - blocked) -+ * Start FAKE2:1 (c7auto3 - blocked) -+ * Start FAKE2:2 (c7auto1 - blocked) -+ * Start FAKE3:0 (c7auto2 - blocked) -+ * Start FAKE3:1 (c7auto3 - blocked) -+ * Start FAKE3:2 (c7auto1 - blocked) -+ -+Executing cluster transition: -+ * Pseudo action: FAKE1-clone_start_0 -+ * Resource action: FAKE1 start on c7auto1 -+ * Pseudo action: FAKE1-clone_running_0 -+ * Resource action: FAKE1 monitor=10000 on c7auto1 -+ -+Revised cluster status: -+Online: [ c7auto1 c7auto2 c7auto3 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKE1-clone [FAKE1] -+ Started: [ c7auto1 ] -+ Stopped: [ c7auto2 c7auto3 ] -+ Clone Set: FAKE2-clone [FAKE2] -+ Stopped: [ c7auto1 c7auto2 c7auto3 ] -+ Clone Set: FAKE3-clone [FAKE3] -+ Stopped: [ c7auto1 c7auto2 c7auto3 ] -+ -diff --git a/pengine/test10/clone_min_interleave_start_one.xml b/pengine/test10/clone_min_interleave_start_one.xml -new file mode 100644 -index 0000000..fbe99de ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_start_one.xml -@@ -0,0 +1,155 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/clone_min_interleave_start_two.dot b/pengine/test10/clone_min_interleave_start_two.dot -new file mode 100644 -index 0000000..f99ce32 ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_start_two.dot -@@ -0,0 +1,59 @@ -+ digraph "g" { -+"FAKE1-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE1-clone_start_0" -> "FAKE1-clone_running_0" [ style = bold] -+"FAKE1-clone_start_0" -> "FAKE1:1_start_0 c7auto1" [ style = bold] -+"FAKE1-clone_start_0" -> "FAKE1_start_0 c7auto2" [ style = bold] -+"FAKE1-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE1:1_monitor_10000 c7auto1" [ style=bold color="green" fontcolor="black"] -+"FAKE1:1_start_0 c7auto1" -> "FAKE1-clone_running_0" [ style = bold] -+"FAKE1:1_start_0 c7auto1" -> "FAKE1:1_monitor_10000 c7auto1" [ style = bold] -+"FAKE1:1_start_0 c7auto1" -> "clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory" [ style = bold] -+"FAKE1:1_start_0 c7auto1" [ style=bold color="green" fontcolor="black"] -+"FAKE1_monitor_10000 c7auto2" [ style=bold color="green" fontcolor="black"] -+"FAKE1_start_0 c7auto2" -> "FAKE1-clone_running_0" [ style = bold] -+"FAKE1_start_0 c7auto2" -> "FAKE1_monitor_10000 c7auto2" [ style = bold] -+"FAKE1_start_0 c7auto2" -> "clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory" [ style = bold] -+"FAKE1_start_0 c7auto2" [ style=bold color="green" fontcolor="black"] -+"FAKE2-clone_running_0" -> "FAKE3-clone_start_0" [ style = bold] -+"FAKE2-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE2-clone_start_0" -> "FAKE2-clone_running_0" [ style = bold] -+"FAKE2-clone_start_0" -> "FAKE2:1_start_0 c7auto2" [ style = bold] -+"FAKE2-clone_start_0" -> "FAKE2:2_start_0 c7auto1" [ style = bold] -+"FAKE2-clone_start_0" -> "FAKE2_start_0 c7auto3" [ style = bold] -+"FAKE2-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE2:1_monitor_10000 c7auto2" [ style=bold color="green" fontcolor="black"] -+"FAKE2:1_start_0 c7auto2" -> "FAKE2-clone_running_0" [ style = bold] -+"FAKE2:1_start_0 c7auto2" -> "FAKE2:1_monitor_10000 c7auto2" [ style = bold] -+"FAKE2:1_start_0 c7auto2" -> "FAKE3:1_start_0 c7auto2" [ style = bold] -+"FAKE2:1_start_0 c7auto2" [ style=bold color="green" fontcolor="black"] -+"FAKE2:2_monitor_10000 c7auto1" [ style=bold color="green" fontcolor="black"] -+"FAKE2:2_start_0 c7auto1" -> "FAKE2-clone_running_0" [ style = bold] -+"FAKE2:2_start_0 c7auto1" -> "FAKE2:2_monitor_10000 c7auto1" [ style = bold] -+"FAKE2:2_start_0 c7auto1" -> "FAKE3:2_start_0 c7auto1" [ style = bold] -+"FAKE2:2_start_0 c7auto1" [ style=bold color="green" fontcolor="black"] -+"FAKE2_monitor_10000 c7auto3" [ style=bold color="green" fontcolor="black"] -+"FAKE2_start_0 c7auto3" -> "FAKE2-clone_running_0" [ style = bold] -+"FAKE2_start_0 c7auto3" -> "FAKE2_monitor_10000 c7auto3" [ style = bold] -+"FAKE2_start_0 c7auto3" -> "FAKE3_start_0 c7auto3" [ style = bold] -+"FAKE2_start_0 c7auto3" [ style=bold color="green" fontcolor="black"] -+"FAKE3-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE3-clone_start_0" -> "FAKE3-clone_running_0" [ style = bold] -+"FAKE3-clone_start_0" -> "FAKE3:1_start_0 c7auto2" [ style = bold] -+"FAKE3-clone_start_0" -> "FAKE3:2_start_0 c7auto1" [ style = bold] -+"FAKE3-clone_start_0" -> "FAKE3_start_0 c7auto3" [ style = bold] -+"FAKE3-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE3:1_monitor_10000 c7auto2" [ style=bold color="green" fontcolor="black"] -+"FAKE3:1_start_0 c7auto2" -> "FAKE3-clone_running_0" [ style = bold] -+"FAKE3:1_start_0 c7auto2" -> "FAKE3:1_monitor_10000 c7auto2" [ style = bold] -+"FAKE3:1_start_0 c7auto2" [ style=bold color="green" fontcolor="black"] -+"FAKE3:2_monitor_10000 c7auto1" [ style=bold color="green" fontcolor="black"] -+"FAKE3:2_start_0 c7auto1" -> "FAKE3-clone_running_0" [ style = bold] -+"FAKE3:2_start_0 c7auto1" -> "FAKE3:2_monitor_10000 c7auto1" [ style = bold] -+"FAKE3:2_start_0 c7auto1" [ style=bold color="green" fontcolor="black"] -+"FAKE3_monitor_10000 c7auto3" [ style=bold color="green" fontcolor="black"] -+"FAKE3_start_0 c7auto3" -> "FAKE3-clone_running_0" [ style = bold] -+"FAKE3_start_0 c7auto3" -> "FAKE3_monitor_10000 c7auto3" [ style = bold] -+"FAKE3_start_0 c7auto3" [ style=bold color="green" fontcolor="black"] -+"clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory" -> "FAKE2-clone_start_0" [ style = bold] -+"clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory" [ style=bold color="green" fontcolor="orange"] -+} -diff --git a/pengine/test10/clone_min_interleave_start_two.exp b/pengine/test10/clone_min_interleave_start_two.exp -new file mode 100644 -index 0000000..9846072 ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_start_two.exp -@@ -0,0 +1,326 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/clone_min_interleave_start_two.scores b/pengine/test10/clone_min_interleave_start_two.scores -new file mode 100644 -index 0000000..d443c58 ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_start_two.scores -@@ -0,0 +1,67 @@ -+Allocation scores: -+clone_color: FAKE1-clone allocation score on c7auto1: 0 -+clone_color: FAKE1-clone allocation score on c7auto2: 0 -+clone_color: FAKE1-clone allocation score on c7auto3: -INFINITY -+clone_color: FAKE1:0 allocation score on c7auto1: 0 -+clone_color: FAKE1:0 allocation score on c7auto2: 0 -+clone_color: FAKE1:0 allocation score on c7auto3: -INFINITY -+clone_color: FAKE1:1 allocation score on c7auto1: 0 -+clone_color: FAKE1:1 allocation score on c7auto2: 0 -+clone_color: FAKE1:1 allocation score on c7auto3: -INFINITY -+clone_color: FAKE1:2 allocation score on c7auto1: 0 -+clone_color: FAKE1:2 allocation score on c7auto2: 0 -+clone_color: FAKE1:2 allocation score on c7auto3: -INFINITY -+clone_color: FAKE2-clone allocation score on c7auto1: 0 -+clone_color: FAKE2-clone allocation score on c7auto2: 0 -+clone_color: FAKE2-clone allocation score on c7auto3: 0 -+clone_color: FAKE2:0 allocation score on c7auto1: 0 -+clone_color: FAKE2:0 allocation score on c7auto2: 0 -+clone_color: FAKE2:0 allocation score on c7auto3: 0 -+clone_color: FAKE2:1 allocation score on c7auto1: 0 -+clone_color: FAKE2:1 allocation score on c7auto2: 0 -+clone_color: FAKE2:1 allocation score on c7auto3: 0 -+clone_color: FAKE2:2 allocation score on c7auto1: 0 -+clone_color: FAKE2:2 allocation score on c7auto2: 0 -+clone_color: FAKE2:2 allocation score on c7auto3: 0 -+clone_color: FAKE3-clone allocation score on c7auto1: 0 -+clone_color: FAKE3-clone allocation score on c7auto2: 0 -+clone_color: FAKE3-clone allocation score on c7auto3: 0 -+clone_color: FAKE3:0 allocation score on c7auto1: 0 -+clone_color: FAKE3:0 allocation score on c7auto2: 0 -+clone_color: FAKE3:0 allocation score on c7auto3: 0 -+clone_color: FAKE3:1 allocation score on c7auto1: 0 -+clone_color: FAKE3:1 allocation score on c7auto2: 0 -+clone_color: FAKE3:1 allocation score on c7auto3: 0 -+clone_color: FAKE3:2 allocation score on c7auto1: 0 -+clone_color: FAKE3:2 allocation score on c7auto2: 0 -+clone_color: FAKE3:2 allocation score on c7auto3: 0 -+native_color: FAKE1:0 allocation score on c7auto1: 0 -+native_color: FAKE1:0 allocation score on c7auto2: 0 -+native_color: FAKE1:0 allocation score on c7auto3: -INFINITY -+native_color: FAKE1:1 allocation score on c7auto1: 0 -+native_color: FAKE1:1 allocation score on c7auto2: -INFINITY -+native_color: FAKE1:1 allocation score on c7auto3: -INFINITY -+native_color: FAKE1:2 allocation score on c7auto1: -INFINITY -+native_color: FAKE1:2 allocation score on c7auto2: -INFINITY -+native_color: FAKE1:2 allocation score on c7auto3: -INFINITY -+native_color: FAKE2:0 allocation score on c7auto1: 0 -+native_color: FAKE2:0 allocation score on c7auto2: 0 -+native_color: FAKE2:0 allocation score on c7auto3: 0 -+native_color: FAKE2:1 allocation score on c7auto1: 0 -+native_color: FAKE2:1 allocation score on c7auto2: 0 -+native_color: FAKE2:1 allocation score on c7auto3: -INFINITY -+native_color: FAKE2:2 allocation score on c7auto1: 0 -+native_color: FAKE2:2 allocation score on c7auto2: -INFINITY -+native_color: FAKE2:2 allocation score on c7auto3: -INFINITY -+native_color: FAKE3:0 allocation score on c7auto1: 0 -+native_color: FAKE3:0 allocation score on c7auto2: 0 -+native_color: FAKE3:0 allocation score on c7auto3: 0 -+native_color: FAKE3:1 allocation score on c7auto1: 0 -+native_color: FAKE3:1 allocation score on c7auto2: 0 -+native_color: FAKE3:1 allocation score on c7auto3: -INFINITY -+native_color: FAKE3:2 allocation score on c7auto1: 0 -+native_color: FAKE3:2 allocation score on c7auto2: -INFINITY -+native_color: FAKE3:2 allocation score on c7auto3: -INFINITY -+native_color: shooter allocation score on c7auto1: 0 -+native_color: shooter allocation score on c7auto2: 0 -+native_color: shooter allocation score on c7auto3: 0 -diff --git a/pengine/test10/clone_min_interleave_start_two.summary b/pengine/test10/clone_min_interleave_start_two.summary -new file mode 100644 -index 0000000..9f928f2 ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_start_two.summary -@@ -0,0 +1,59 @@ -+ -+Current cluster status: -+Online: [ c7auto1 c7auto2 c7auto3 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKE1-clone [FAKE1] -+ Stopped: [ c7auto1 c7auto2 c7auto3 ] -+ Clone Set: FAKE2-clone [FAKE2] -+ Stopped: [ c7auto1 c7auto2 c7auto3 ] -+ Clone Set: FAKE3-clone [FAKE3] -+ Stopped: [ c7auto1 c7auto2 c7auto3 ] -+ -+Transition Summary: -+ * Start FAKE1:0 (c7auto2) -+ * Start FAKE1:1 (c7auto1) -+ * Start FAKE2:0 (c7auto3) -+ * Start FAKE2:1 (c7auto2) -+ * Start FAKE2:2 (c7auto1) -+ * Start FAKE3:0 (c7auto3) -+ * Start FAKE3:1 (c7auto2) -+ * Start FAKE3:2 (c7auto1) -+ -+Executing cluster transition: -+ * Pseudo action: FAKE1-clone_start_0 -+ * Resource action: FAKE1 start on c7auto2 -+ * Resource action: FAKE1 start on c7auto1 -+ * Pseudo action: FAKE1-clone_running_0 -+ * Pseudo action: clone-one-or-more:order-FAKE1-clone-FAKE2-clone-mandatory -+ * Resource action: FAKE1 monitor=10000 on c7auto2 -+ * Resource action: FAKE1 monitor=10000 on c7auto1 -+ * Pseudo action: FAKE2-clone_start_0 -+ * Resource action: FAKE2 start on c7auto3 -+ * Resource action: FAKE2 start on c7auto2 -+ * Resource action: FAKE2 start on c7auto1 -+ * Pseudo action: FAKE2-clone_running_0 -+ * Pseudo action: FAKE3-clone_start_0 -+ * Resource action: FAKE2 monitor=10000 on c7auto3 -+ * Resource action: FAKE2 monitor=10000 on c7auto2 -+ * Resource action: FAKE2 monitor=10000 on c7auto1 -+ * Resource action: FAKE3 start on c7auto3 -+ * Resource action: FAKE3 start on c7auto2 -+ * Resource action: FAKE3 start on c7auto1 -+ * Pseudo action: FAKE3-clone_running_0 -+ * Resource action: FAKE3 monitor=10000 on c7auto3 -+ * Resource action: FAKE3 monitor=10000 on c7auto2 -+ * Resource action: FAKE3 monitor=10000 on c7auto1 -+ -+Revised cluster status: -+Online: [ c7auto1 c7auto2 c7auto3 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKE1-clone [FAKE1] -+ Started: [ c7auto1 c7auto2 ] -+ Stopped: [ c7auto3 ] -+ Clone Set: FAKE2-clone [FAKE2] -+ Started: [ c7auto1 c7auto2 c7auto3 ] -+ Clone Set: FAKE3-clone [FAKE3] -+ Started: [ c7auto1 c7auto2 c7auto3 ] -+ -diff --git a/pengine/test10/clone_min_interleave_start_two.xml b/pengine/test10/clone_min_interleave_start_two.xml -new file mode 100644 -index 0000000..2507018 ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_start_two.xml -@@ -0,0 +1,154 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/clone_min_interleave_stop_one.dot b/pengine/test10/clone_min_interleave_stop_one.dot -new file mode 100644 -index 0000000..a66ceb6 ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_stop_one.dot -@@ -0,0 +1,18 @@ -+ digraph "g" { -+"FAKE1-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE1-clone_start_0" -> "FAKE1-clone_running_0" [ style = bold] -+"FAKE1-clone_start_0" -> "FAKE1_start_0 " [ style = dashed] -+"FAKE1-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE1-clone_stop_0" -> "FAKE1-clone_stopped_0" [ style = bold] -+"FAKE1-clone_stop_0" -> "FAKE1_stop_0 c7auto3" [ style = bold] -+"FAKE1-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE1-clone_stopped_0" -> "FAKE1-clone_start_0" [ style = bold] -+"FAKE1-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE1_start_0 " -> "FAKE1-clone_running_0" [ style = dashed] -+"FAKE1_start_0 " [ style=dashed color="red" fontcolor="black"] -+"FAKE1_stop_0 c7auto3" -> "FAKE1-clone_stopped_0" [ style = bold] -+"FAKE1_stop_0 c7auto3" -> "FAKE1_start_0 " [ style = dashed] -+"FAKE1_stop_0 c7auto3" -> "all_stopped" [ style = bold] -+"FAKE1_stop_0 c7auto3" [ style=bold color="green" fontcolor="black"] -+"all_stopped" [ style=bold color="green" fontcolor="orange"] -+} -diff --git a/pengine/test10/clone_min_interleave_stop_one.exp b/pengine/test10/clone_min_interleave_stop_one.exp -new file mode 100644 -index 0000000..31a15da ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_stop_one.exp -@@ -0,0 +1,74 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/clone_min_interleave_stop_one.scores b/pengine/test10/clone_min_interleave_stop_one.scores -new file mode 100644 -index 0000000..1a98230 ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_stop_one.scores -@@ -0,0 +1,67 @@ -+Allocation scores: -+clone_color: FAKE1-clone allocation score on c7auto1: 0 -+clone_color: FAKE1-clone allocation score on c7auto2: 0 -+clone_color: FAKE1-clone allocation score on c7auto3: -INFINITY -+clone_color: FAKE1:0 allocation score on c7auto1: 0 -+clone_color: FAKE1:0 allocation score on c7auto2: 0 -+clone_color: FAKE1:0 allocation score on c7auto3: -INFINITY -+clone_color: FAKE1:1 allocation score on c7auto1: 1 -+clone_color: FAKE1:1 allocation score on c7auto2: 0 -+clone_color: FAKE1:1 allocation score on c7auto3: -INFINITY -+clone_color: FAKE1:2 allocation score on c7auto1: 0 -+clone_color: FAKE1:2 allocation score on c7auto2: 1 -+clone_color: FAKE1:2 allocation score on c7auto3: -INFINITY -+clone_color: FAKE2-clone allocation score on c7auto1: 0 -+clone_color: FAKE2-clone allocation score on c7auto2: 0 -+clone_color: FAKE2-clone allocation score on c7auto3: 0 -+clone_color: FAKE2:0 allocation score on c7auto1: 0 -+clone_color: FAKE2:0 allocation score on c7auto2: 0 -+clone_color: FAKE2:0 allocation score on c7auto3: 1 -+clone_color: FAKE2:1 allocation score on c7auto1: 1 -+clone_color: FAKE2:1 allocation score on c7auto2: 0 -+clone_color: FAKE2:1 allocation score on c7auto3: 0 -+clone_color: FAKE2:2 allocation score on c7auto1: 0 -+clone_color: FAKE2:2 allocation score on c7auto2: 1 -+clone_color: FAKE2:2 allocation score on c7auto3: 0 -+clone_color: FAKE3-clone allocation score on c7auto1: 0 -+clone_color: FAKE3-clone allocation score on c7auto2: 0 -+clone_color: FAKE3-clone allocation score on c7auto3: 0 -+clone_color: FAKE3:0 allocation score on c7auto1: 0 -+clone_color: FAKE3:0 allocation score on c7auto2: 0 -+clone_color: FAKE3:0 allocation score on c7auto3: 1 -+clone_color: FAKE3:1 allocation score on c7auto1: 1 -+clone_color: FAKE3:1 allocation score on c7auto2: 0 -+clone_color: FAKE3:1 allocation score on c7auto3: 0 -+clone_color: FAKE3:2 allocation score on c7auto1: 0 -+clone_color: FAKE3:2 allocation score on c7auto2: 1 -+clone_color: FAKE3:2 allocation score on c7auto3: 0 -+native_color: FAKE1:0 allocation score on c7auto1: -INFINITY -+native_color: FAKE1:0 allocation score on c7auto2: -INFINITY -+native_color: FAKE1:0 allocation score on c7auto3: -INFINITY -+native_color: FAKE1:1 allocation score on c7auto1: 1 -+native_color: FAKE1:1 allocation score on c7auto2: -INFINITY -+native_color: FAKE1:1 allocation score on c7auto3: -INFINITY -+native_color: FAKE1:2 allocation score on c7auto1: 0 -+native_color: FAKE1:2 allocation score on c7auto2: 1 -+native_color: FAKE1:2 allocation score on c7auto3: -INFINITY -+native_color: FAKE2:0 allocation score on c7auto1: 0 -+native_color: FAKE2:0 allocation score on c7auto2: 0 -+native_color: FAKE2:0 allocation score on c7auto3: 1 -+native_color: FAKE2:1 allocation score on c7auto1: 1 -+native_color: FAKE2:1 allocation score on c7auto2: -INFINITY -+native_color: FAKE2:1 allocation score on c7auto3: -INFINITY -+native_color: FAKE2:2 allocation score on c7auto1: 0 -+native_color: FAKE2:2 allocation score on c7auto2: 1 -+native_color: FAKE2:2 allocation score on c7auto3: -INFINITY -+native_color: FAKE3:0 allocation score on c7auto1: 0 -+native_color: FAKE3:0 allocation score on c7auto2: 0 -+native_color: FAKE3:0 allocation score on c7auto3: 1 -+native_color: FAKE3:1 allocation score on c7auto1: 1 -+native_color: FAKE3:1 allocation score on c7auto2: -INFINITY -+native_color: FAKE3:1 allocation score on c7auto3: -INFINITY -+native_color: FAKE3:2 allocation score on c7auto1: 0 -+native_color: FAKE3:2 allocation score on c7auto2: 1 -+native_color: FAKE3:2 allocation score on c7auto3: -INFINITY -+native_color: shooter allocation score on c7auto1: 0 -+native_color: shooter allocation score on c7auto2: 0 -+native_color: shooter allocation score on c7auto3: 0 -diff --git a/pengine/test10/clone_min_interleave_stop_one.summary b/pengine/test10/clone_min_interleave_stop_one.summary -new file mode 100644 -index 0000000..9280b7e ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_stop_one.summary -@@ -0,0 +1,35 @@ -+ -+Current cluster status: -+Online: [ c7auto1 c7auto2 c7auto3 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKE1-clone [FAKE1] -+ Started: [ c7auto1 c7auto2 c7auto3 ] -+ Clone Set: FAKE2-clone [FAKE2] -+ Started: [ c7auto1 c7auto2 c7auto3 ] -+ Clone Set: FAKE3-clone [FAKE3] -+ Started: [ c7auto1 c7auto2 c7auto3 ] -+ -+Transition Summary: -+ * Stop FAKE1:0 (c7auto3) -+ -+Executing cluster transition: -+ * Pseudo action: FAKE1-clone_stop_0 -+ * Resource action: FAKE1 stop on c7auto3 -+ * Pseudo action: FAKE1-clone_stopped_0 -+ * Pseudo action: FAKE1-clone_start_0 -+ * Pseudo action: all_stopped -+ * Pseudo action: FAKE1-clone_running_0 -+ -+Revised cluster status: -+Online: [ c7auto1 c7auto2 c7auto3 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKE1-clone [FAKE1] -+ Started: [ c7auto1 c7auto2 ] -+ Stopped: [ c7auto3 ] -+ Clone Set: FAKE2-clone [FAKE2] -+ Started: [ c7auto1 c7auto2 c7auto3 ] -+ Clone Set: FAKE3-clone [FAKE3] -+ Started: [ c7auto1 c7auto2 c7auto3 ] -+ -diff --git a/pengine/test10/clone_min_interleave_stop_one.xml b/pengine/test10/clone_min_interleave_stop_one.xml -new file mode 100644 -index 0000000..31db5f3 ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_stop_one.xml -@@ -0,0 +1,153 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/clone_min_interleave_stop_two.dot b/pengine/test10/clone_min_interleave_stop_two.dot -new file mode 100644 -index 0000000..73f60dd ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_stop_two.dot -@@ -0,0 +1,108 @@ -+ digraph "g" { -+"FAKE1-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE1-clone_start_0" -> "FAKE1-clone_running_0" [ style = bold] -+"FAKE1-clone_start_0" -> "FAKE1_start_0 " [ style = dashed] -+"FAKE1-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE1-clone_stop_0" -> "FAKE1-clone_stopped_0" [ style = bold] -+"FAKE1-clone_stop_0" -> "FAKE1_stop_0 c7auto2" [ style = bold] -+"FAKE1-clone_stop_0" -> "FAKE1_stop_0 c7auto3" [ style = bold] -+"FAKE1-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE1-clone_stopped_0" -> "FAKE1-clone_start_0" [ style = bold] -+"FAKE1-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE1_start_0 " -> "FAKE1-clone_running_0" [ style = dashed] -+"FAKE1_start_0 " [ style=dashed color="red" fontcolor="black"] -+"FAKE1_stop_0 c7auto2" -> "FAKE1-clone_stopped_0" [ style = bold] -+"FAKE1_stop_0 c7auto2" -> "FAKE1_start_0 " [ style = dashed] -+"FAKE1_stop_0 c7auto2" -> "all_stopped" [ style = bold] -+"FAKE1_stop_0 c7auto2" [ style=bold color="green" fontcolor="black"] -+"FAKE1_stop_0 c7auto3" -> "FAKE1-clone_stopped_0" [ style = bold] -+"FAKE1_stop_0 c7auto3" -> "FAKE1_start_0 " [ style = dashed] -+"FAKE1_stop_0 c7auto3" -> "all_stopped" [ style = bold] -+"FAKE1_stop_0 c7auto3" [ style=bold color="green" fontcolor="black"] -+"FAKE2-clone_running_0" -> "FAKE3-clone_start_0" [ style = dashed] -+"FAKE2-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"FAKE2-clone_start_0" -> "FAKE2-clone_running_0" [ style = dashed] -+"FAKE2-clone_start_0" -> "FAKE2_start_0 c7auto1" [ style = dashed] -+"FAKE2-clone_start_0" -> "FAKE2_start_0 c7auto2" [ style = dashed] -+"FAKE2-clone_start_0" -> "FAKE2_start_0 c7auto3" [ style = dashed] -+"FAKE2-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"FAKE2-clone_stop_0" -> "FAKE2-clone_stopped_0" [ style = bold] -+"FAKE2-clone_stop_0" -> "FAKE2_stop_0 c7auto1" [ style = bold] -+"FAKE2-clone_stop_0" -> "FAKE2_stop_0 c7auto2" [ style = bold] -+"FAKE2-clone_stop_0" -> "FAKE2_stop_0 c7auto3" [ style = bold] -+"FAKE2-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE2-clone_stopped_0" -> "FAKE1-clone_stop_0" [ style = bold] -+"FAKE2-clone_stopped_0" -> "FAKE2-clone_start_0" [ style = dashed] -+"FAKE2-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE2_monitor_10000 c7auto1" [ style=dashed color="red" fontcolor="black"] -+"FAKE2_monitor_10000 c7auto2" [ style=dashed color="red" fontcolor="black"] -+"FAKE2_monitor_10000 c7auto3" [ style=dashed color="red" fontcolor="black"] -+"FAKE2_start_0 c7auto1" -> "FAKE2-clone_running_0" [ style = dashed] -+"FAKE2_start_0 c7auto1" -> "FAKE2_monitor_10000 c7auto1" [ style = dashed] -+"FAKE2_start_0 c7auto1" -> "FAKE3_start_0 c7auto1" [ style = dashed] -+"FAKE2_start_0 c7auto1" [ style=dashed color="red" fontcolor="black"] -+"FAKE2_start_0 c7auto2" -> "FAKE2-clone_running_0" [ style = dashed] -+"FAKE2_start_0 c7auto2" -> "FAKE2_monitor_10000 c7auto2" [ style = dashed] -+"FAKE2_start_0 c7auto2" -> "FAKE3_start_0 c7auto2" [ style = dashed] -+"FAKE2_start_0 c7auto2" [ style=dashed color="red" fontcolor="black"] -+"FAKE2_start_0 c7auto3" -> "FAKE2-clone_running_0" [ style = dashed] -+"FAKE2_start_0 c7auto3" -> "FAKE2_monitor_10000 c7auto3" [ style = dashed] -+"FAKE2_start_0 c7auto3" -> "FAKE3_start_0 c7auto3" [ style = dashed] -+"FAKE2_start_0 c7auto3" [ style=dashed color="red" fontcolor="black"] -+"FAKE2_stop_0 c7auto1" -> "FAKE2-clone_stopped_0" [ style = bold] -+"FAKE2_stop_0 c7auto1" -> "FAKE2_start_0 c7auto1" [ style = dashed] -+"FAKE2_stop_0 c7auto1" -> "all_stopped" [ style = bold] -+"FAKE2_stop_0 c7auto1" [ style=bold color="green" fontcolor="black"] -+"FAKE2_stop_0 c7auto2" -> "FAKE1_stop_0 c7auto2" [ style = bold] -+"FAKE2_stop_0 c7auto2" -> "FAKE2-clone_stopped_0" [ style = bold] -+"FAKE2_stop_0 c7auto2" -> "FAKE2_start_0 c7auto2" [ style = dashed] -+"FAKE2_stop_0 c7auto2" -> "all_stopped" [ style = bold] -+"FAKE2_stop_0 c7auto2" [ style=bold color="green" fontcolor="black"] -+"FAKE2_stop_0 c7auto3" -> "FAKE1_stop_0 c7auto3" [ style = bold] -+"FAKE2_stop_0 c7auto3" -> "FAKE2-clone_stopped_0" [ style = bold] -+"FAKE2_stop_0 c7auto3" -> "FAKE2_start_0 c7auto3" [ style = dashed] -+"FAKE2_stop_0 c7auto3" -> "all_stopped" [ style = bold] -+"FAKE2_stop_0 c7auto3" [ style=bold color="green" fontcolor="black"] -+"FAKE3-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"FAKE3-clone_start_0" -> "FAKE3-clone_running_0" [ style = dashed] -+"FAKE3-clone_start_0" -> "FAKE3_start_0 c7auto1" [ style = dashed] -+"FAKE3-clone_start_0" -> "FAKE3_start_0 c7auto2" [ style = dashed] -+"FAKE3-clone_start_0" -> "FAKE3_start_0 c7auto3" [ style = dashed] -+"FAKE3-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"FAKE3-clone_stop_0" -> "FAKE3-clone_stopped_0" [ style = bold] -+"FAKE3-clone_stop_0" -> "FAKE3_stop_0 c7auto1" [ style = bold] -+"FAKE3-clone_stop_0" -> "FAKE3_stop_0 c7auto2" [ style = bold] -+"FAKE3-clone_stop_0" -> "FAKE3_stop_0 c7auto3" [ style = bold] -+"FAKE3-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE3-clone_stopped_0" -> "FAKE2-clone_stop_0" [ style = bold] -+"FAKE3-clone_stopped_0" -> "FAKE3-clone_start_0" [ style = dashed] -+"FAKE3-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"FAKE3_monitor_10000 c7auto1" [ style=dashed color="red" fontcolor="black"] -+"FAKE3_monitor_10000 c7auto2" [ style=dashed color="red" fontcolor="black"] -+"FAKE3_monitor_10000 c7auto3" [ style=dashed color="red" fontcolor="black"] -+"FAKE3_start_0 c7auto1" -> "FAKE3-clone_running_0" [ style = dashed] -+"FAKE3_start_0 c7auto1" -> "FAKE3_monitor_10000 c7auto1" [ style = dashed] -+"FAKE3_start_0 c7auto1" [ style=dashed color="red" fontcolor="black"] -+"FAKE3_start_0 c7auto2" -> "FAKE3-clone_running_0" [ style = dashed] -+"FAKE3_start_0 c7auto2" -> "FAKE3_monitor_10000 c7auto2" [ style = dashed] -+"FAKE3_start_0 c7auto2" [ style=dashed color="red" fontcolor="black"] -+"FAKE3_start_0 c7auto3" -> "FAKE3-clone_running_0" [ style = dashed] -+"FAKE3_start_0 c7auto3" -> "FAKE3_monitor_10000 c7auto3" [ style = dashed] -+"FAKE3_start_0 c7auto3" [ style=dashed color="red" fontcolor="black"] -+"FAKE3_stop_0 c7auto1" -> "FAKE2_stop_0 c7auto1" [ style = bold] -+"FAKE3_stop_0 c7auto1" -> "FAKE3-clone_stopped_0" [ style = bold] -+"FAKE3_stop_0 c7auto1" -> "FAKE3_start_0 c7auto1" [ style = dashed] -+"FAKE3_stop_0 c7auto1" -> "all_stopped" [ style = bold] -+"FAKE3_stop_0 c7auto1" [ style=bold color="green" fontcolor="black"] -+"FAKE3_stop_0 c7auto2" -> "FAKE2_stop_0 c7auto2" [ style = bold] -+"FAKE3_stop_0 c7auto2" -> "FAKE3-clone_stopped_0" [ style = bold] -+"FAKE3_stop_0 c7auto2" -> "FAKE3_start_0 c7auto2" [ style = dashed] -+"FAKE3_stop_0 c7auto2" -> "all_stopped" [ style = bold] -+"FAKE3_stop_0 c7auto2" [ style=bold color="green" fontcolor="black"] -+"FAKE3_stop_0 c7auto3" -> "FAKE2_stop_0 c7auto3" [ style = bold] -+"FAKE3_stop_0 c7auto3" -> "FAKE3-clone_stopped_0" [ style = bold] -+"FAKE3_stop_0 c7auto3" -> "FAKE3_start_0 c7auto3" [ style = dashed] -+"FAKE3_stop_0 c7auto3" -> "all_stopped" [ style = bold] -+"FAKE3_stop_0 c7auto3" [ style=bold color="green" fontcolor="black"] -+"all_stopped" [ style=bold color="green" fontcolor="orange"] -+} -diff --git a/pengine/test10/clone_min_interleave_stop_two.exp b/pengine/test10/clone_min_interleave_stop_two.exp -new file mode 100644 -index 0000000..62fe1e6 ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_stop_two.exp -@@ -0,0 +1,270 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/clone_min_interleave_stop_two.scores b/pengine/test10/clone_min_interleave_stop_two.scores -new file mode 100644 -index 0000000..ee7df92 ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_stop_two.scores -@@ -0,0 +1,67 @@ -+Allocation scores: -+clone_color: FAKE1-clone allocation score on c7auto1: 0 -+clone_color: FAKE1-clone allocation score on c7auto2: -INFINITY -+clone_color: FAKE1-clone allocation score on c7auto3: -INFINITY -+clone_color: FAKE1:0 allocation score on c7auto1: 0 -+clone_color: FAKE1:0 allocation score on c7auto2: -INFINITY -+clone_color: FAKE1:0 allocation score on c7auto3: -INFINITY -+clone_color: FAKE1:1 allocation score on c7auto1: 1 -+clone_color: FAKE1:1 allocation score on c7auto2: -INFINITY -+clone_color: FAKE1:1 allocation score on c7auto3: -INFINITY -+clone_color: FAKE1:2 allocation score on c7auto1: 0 -+clone_color: FAKE1:2 allocation score on c7auto2: -INFINITY -+clone_color: FAKE1:2 allocation score on c7auto3: -INFINITY -+clone_color: FAKE2-clone allocation score on c7auto1: 0 -+clone_color: FAKE2-clone allocation score on c7auto2: 0 -+clone_color: FAKE2-clone allocation score on c7auto3: 0 -+clone_color: FAKE2:0 allocation score on c7auto1: 0 -+clone_color: FAKE2:0 allocation score on c7auto2: 0 -+clone_color: FAKE2:0 allocation score on c7auto3: 1 -+clone_color: FAKE2:1 allocation score on c7auto1: 1 -+clone_color: FAKE2:1 allocation score on c7auto2: 0 -+clone_color: FAKE2:1 allocation score on c7auto3: 0 -+clone_color: FAKE2:2 allocation score on c7auto1: 0 -+clone_color: FAKE2:2 allocation score on c7auto2: 1 -+clone_color: FAKE2:2 allocation score on c7auto3: 0 -+clone_color: FAKE3-clone allocation score on c7auto1: 0 -+clone_color: FAKE3-clone allocation score on c7auto2: 0 -+clone_color: FAKE3-clone allocation score on c7auto3: 0 -+clone_color: FAKE3:0 allocation score on c7auto1: 0 -+clone_color: FAKE3:0 allocation score on c7auto2: 0 -+clone_color: FAKE3:0 allocation score on c7auto3: 1 -+clone_color: FAKE3:1 allocation score on c7auto1: 1 -+clone_color: FAKE3:1 allocation score on c7auto2: 0 -+clone_color: FAKE3:1 allocation score on c7auto3: 0 -+clone_color: FAKE3:2 allocation score on c7auto1: 0 -+clone_color: FAKE3:2 allocation score on c7auto2: 1 -+clone_color: FAKE3:2 allocation score on c7auto3: 0 -+native_color: FAKE1:0 allocation score on c7auto1: -INFINITY -+native_color: FAKE1:0 allocation score on c7auto2: -INFINITY -+native_color: FAKE1:0 allocation score on c7auto3: -INFINITY -+native_color: FAKE1:1 allocation score on c7auto1: 1 -+native_color: FAKE1:1 allocation score on c7auto2: -INFINITY -+native_color: FAKE1:1 allocation score on c7auto3: -INFINITY -+native_color: FAKE1:2 allocation score on c7auto1: -INFINITY -+native_color: FAKE1:2 allocation score on c7auto2: -INFINITY -+native_color: FAKE1:2 allocation score on c7auto3: -INFINITY -+native_color: FAKE2:0 allocation score on c7auto1: 0 -+native_color: FAKE2:0 allocation score on c7auto2: -INFINITY -+native_color: FAKE2:0 allocation score on c7auto3: 1 -+native_color: FAKE2:1 allocation score on c7auto1: 1 -+native_color: FAKE2:1 allocation score on c7auto2: -INFINITY -+native_color: FAKE2:1 allocation score on c7auto3: -INFINITY -+native_color: FAKE2:2 allocation score on c7auto1: 0 -+native_color: FAKE2:2 allocation score on c7auto2: 1 -+native_color: FAKE2:2 allocation score on c7auto3: 0 -+native_color: FAKE3:0 allocation score on c7auto1: 0 -+native_color: FAKE3:0 allocation score on c7auto2: -INFINITY -+native_color: FAKE3:0 allocation score on c7auto3: 1 -+native_color: FAKE3:1 allocation score on c7auto1: 1 -+native_color: FAKE3:1 allocation score on c7auto2: -INFINITY -+native_color: FAKE3:1 allocation score on c7auto3: -INFINITY -+native_color: FAKE3:2 allocation score on c7auto1: 0 -+native_color: FAKE3:2 allocation score on c7auto2: 1 -+native_color: FAKE3:2 allocation score on c7auto3: 0 -+native_color: shooter allocation score on c7auto1: 0 -+native_color: shooter allocation score on c7auto2: 0 -+native_color: shooter allocation score on c7auto3: 0 -diff --git a/pengine/test10/clone_min_interleave_stop_two.summary b/pengine/test10/clone_min_interleave_stop_two.summary -new file mode 100644 -index 0000000..fb28e0d ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_stop_two.summary -@@ -0,0 +1,53 @@ -+ -+Current cluster status: -+Online: [ c7auto1 c7auto2 c7auto3 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKE1-clone [FAKE1] -+ Started: [ c7auto1 c7auto2 c7auto3 ] -+ Clone Set: FAKE2-clone [FAKE2] -+ Started: [ c7auto1 c7auto2 c7auto3 ] -+ Clone Set: FAKE3-clone [FAKE3] -+ Started: [ c7auto1 c7auto2 c7auto3 ] -+ -+Transition Summary: -+ * Stop FAKE1:0 (c7auto3) -+ * Stop FAKE1:2 (c7auto2) -+ * Stop FAKE2:0 (Started c7auto3) -+ * Stop FAKE2:1 (Started c7auto1) -+ * Stop FAKE2:2 (Started c7auto2) -+ * Stop FAKE3:0 (Started c7auto3) -+ * Stop FAKE3:1 (Started c7auto1) -+ * Stop FAKE3:2 (Started c7auto2) -+ -+Executing cluster transition: -+ * Pseudo action: FAKE3-clone_stop_0 -+ * Resource action: FAKE3 stop on c7auto3 -+ * Resource action: FAKE3 stop on c7auto1 -+ * Resource action: FAKE3 stop on c7auto2 -+ * Pseudo action: FAKE3-clone_stopped_0 -+ * Pseudo action: FAKE2-clone_stop_0 -+ * Resource action: FAKE2 stop on c7auto3 -+ * Resource action: FAKE2 stop on c7auto1 -+ * Resource action: FAKE2 stop on c7auto2 -+ * Pseudo action: FAKE2-clone_stopped_0 -+ * Pseudo action: FAKE1-clone_stop_0 -+ * Resource action: FAKE1 stop on c7auto3 -+ * Resource action: FAKE1 stop on c7auto2 -+ * Pseudo action: FAKE1-clone_stopped_0 -+ * Pseudo action: FAKE1-clone_start_0 -+ * Pseudo action: all_stopped -+ * Pseudo action: FAKE1-clone_running_0 -+ -+Revised cluster status: -+Online: [ c7auto1 c7auto2 c7auto3 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKE1-clone [FAKE1] -+ Started: [ c7auto1 ] -+ Stopped: [ c7auto2 c7auto3 ] -+ Clone Set: FAKE2-clone [FAKE2] -+ Stopped: [ c7auto1 c7auto2 c7auto3 ] -+ Clone Set: FAKE3-clone [FAKE3] -+ Stopped: [ c7auto1 c7auto2 c7auto3 ] -+ -diff --git a/pengine/test10/clone_min_interleave_stop_two.xml b/pengine/test10/clone_min_interleave_stop_two.xml -new file mode 100644 -index 0000000..32c2b3b ---- /dev/null -+++ b/pengine/test10/clone_min_interleave_stop_two.xml -@@ -0,0 +1,154 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/clone_min_start_one.dot b/pengine/test10/clone_min_start_one.dot -new file mode 100644 -index 0000000..3940361 ---- /dev/null -+++ b/pengine/test10/clone_min_start_one.dot -@@ -0,0 +1,20 @@ -+ digraph "g" { -+"FAKECLONE-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE-clone_start_0" -> "FAKECLONE-clone_running_0" [ style = bold] -+"FAKECLONE-clone_start_0" -> "FAKECLONE_start_0 c7auto3" [ style = bold] -+"FAKECLONE-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE_monitor_10000 c7auto3" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE_start_0 c7auto3" -> "FAKECLONE-clone_running_0" [ style = bold] -+"FAKECLONE_start_0 c7auto3" -> "FAKECLONE_monitor_10000 c7auto3" [ style = bold] -+"FAKECLONE_start_0 c7auto3" [ style=bold color="green" fontcolor="black"] -+"FAKE_monitor_10000 c7auto4" [ style=dashed color="red" fontcolor="black"] -+"FAKE_start_0 c7auto4" -> "FAKE_monitor_10000 c7auto4" [ style = dashed] -+"FAKE_start_0 c7auto4" [ style=dashed color="red" fontcolor="black"] -+"all_stopped" [ style=bold color="green" fontcolor="orange"] -+"shooter_monitor_60000 c7auto3" [ style=bold color="green" fontcolor="black"] -+"shooter_start_0 c7auto3" -> "shooter_monitor_60000 c7auto3" [ style = bold] -+"shooter_start_0 c7auto3" [ style=bold color="green" fontcolor="black"] -+"shooter_stop_0 c7auto1" -> "all_stopped" [ style = bold] -+"shooter_stop_0 c7auto1" -> "shooter_start_0 c7auto3" [ style = bold] -+"shooter_stop_0 c7auto1" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/pengine/test10/clone_min_start_one.exp b/pengine/test10/clone_min_start_one.exp -new file mode 100644 -index 0000000..a6868f6 ---- /dev/null -+++ b/pengine/test10/clone_min_start_one.exp -@@ -0,0 +1,98 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/clone_min_start_one.scores b/pengine/test10/clone_min_start_one.scores -new file mode 100644 -index 0000000..668689e ---- /dev/null -+++ b/pengine/test10/clone_min_start_one.scores -@@ -0,0 +1,45 @@ -+Allocation scores: -+clone_color: FAKECLONE-clone allocation score on c7auto1: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto2: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto3: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:0 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:1 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:2 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:3 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: FAKE allocation score on c7auto1: -INFINITY -+native_color: FAKE allocation score on c7auto2: -INFINITY -+native_color: FAKE allocation score on c7auto3: -INFINITY -+native_color: FAKE allocation score on c7auto4: 0 -+native_color: FAKECLONE:0 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto3: 0 -+native_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: shooter allocation score on c7auto1: 0 -+native_color: shooter allocation score on c7auto2: 0 -+native_color: shooter allocation score on c7auto3: 0 -+native_color: shooter allocation score on c7auto4: 0 -diff --git a/pengine/test10/clone_min_start_one.summary b/pengine/test10/clone_min_start_one.summary -new file mode 100644 -index 0000000..ee33e01 ---- /dev/null -+++ b/pengine/test10/clone_min_start_one.summary -@@ -0,0 +1,37 @@ -+ -+Current cluster status: -+Node c7auto1 (1): standby -+Node c7auto2 (2): standby -+Online: [ c7auto3 c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] -+ FAKE (ocf::heartbeat:Dummy): Stopped -+ -+Transition Summary: -+ * Move shooter (Started c7auto1 -> c7auto3) -+ * Start FAKECLONE:0 (c7auto3) -+ * Start FAKE (c7auto4 - blocked) -+ -+Executing cluster transition: -+ * Resource action: shooter stop on c7auto1 -+ * Pseudo action: FAKECLONE-clone_start_0 -+ * Pseudo action: all_stopped -+ * Resource action: shooter start on c7auto3 -+ * Resource action: FAKECLONE start on c7auto3 -+ * Pseudo action: FAKECLONE-clone_running_0 -+ * Resource action: shooter monitor=60000 on c7auto3 -+ * Resource action: FAKECLONE monitor=10000 on c7auto3 -+ -+Revised cluster status: -+Node c7auto1 (1): standby -+Node c7auto2 (2): standby -+Online: [ c7auto3 c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto3 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Started: [ c7auto3 ] -+ Stopped: [ c7auto1 c7auto2 c7auto4 ] -+ FAKE (ocf::heartbeat:Dummy): Stopped -+ -diff --git a/pengine/test10/clone_min_start_one.xml b/pengine/test10/clone_min_start_one.xml -new file mode 100644 -index 0000000..dfb9379 ---- /dev/null -+++ b/pengine/test10/clone_min_start_one.xml -@@ -0,0 +1,155 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/clone_min_start_two.dot b/pengine/test10/clone_min_start_two.dot -new file mode 100644 -index 0000000..3fe0062 ---- /dev/null -+++ b/pengine/test10/clone_min_start_two.dot -@@ -0,0 +1,22 @@ -+ digraph "g" { -+"FAKECLONE-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE-clone_start_0" -> "FAKECLONE-clone_running_0" [ style = bold] -+"FAKECLONE-clone_start_0" -> "FAKECLONE:1_start_0 c7auto1" [ style = bold] -+"FAKECLONE-clone_start_0" -> "FAKECLONE_start_0 c7auto3" [ style = bold] -+"FAKECLONE-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE:1_monitor_10000 c7auto1" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE:1_start_0 c7auto1" -> "FAKECLONE-clone_running_0" [ style = bold] -+"FAKECLONE:1_start_0 c7auto1" -> "FAKECLONE:1_monitor_10000 c7auto1" [ style = bold] -+"FAKECLONE:1_start_0 c7auto1" -> "clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory" [ style = bold] -+"FAKECLONE:1_start_0 c7auto1" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE_monitor_10000 c7auto3" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE_start_0 c7auto3" -> "FAKECLONE-clone_running_0" [ style = bold] -+"FAKECLONE_start_0 c7auto3" -> "FAKECLONE_monitor_10000 c7auto3" [ style = bold] -+"FAKECLONE_start_0 c7auto3" -> "clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory" [ style = bold] -+"FAKECLONE_start_0 c7auto3" [ style=bold color="green" fontcolor="black"] -+"FAKE_monitor_10000 c7auto4" [ style=bold color="green" fontcolor="black"] -+"FAKE_start_0 c7auto4" -> "FAKE_monitor_10000 c7auto4" [ style = bold] -+"FAKE_start_0 c7auto4" [ style=bold color="green" fontcolor="black"] -+"clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory" -> "FAKE_start_0 c7auto4" [ style = bold] -+"clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory" [ style=bold color="green" fontcolor="orange"] -+} -diff --git a/pengine/test10/clone_min_start_two.exp b/pengine/test10/clone_min_start_two.exp -new file mode 100644 -index 0000000..f7a053c ---- /dev/null -+++ b/pengine/test10/clone_min_start_two.exp -@@ -0,0 +1,121 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/clone_min_start_two.scores b/pengine/test10/clone_min_start_two.scores -new file mode 100644 -index 0000000..b3bcac0 ---- /dev/null -+++ b/pengine/test10/clone_min_start_two.scores -@@ -0,0 +1,45 @@ -+Allocation scores: -+clone_color: FAKECLONE-clone allocation score on c7auto1: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto2: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto3: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:0 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:1 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:2 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:3 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: FAKE allocation score on c7auto1: -INFINITY -+native_color: FAKE allocation score on c7auto2: -INFINITY -+native_color: FAKE allocation score on c7auto3: -INFINITY -+native_color: FAKE allocation score on c7auto4: 0 -+native_color: FAKECLONE:0 allocation score on c7auto1: 0 -+native_color: FAKECLONE:0 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto3: 0 -+native_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto1: 0 -+native_color: FAKECLONE:1 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: shooter allocation score on c7auto1: 0 -+native_color: shooter allocation score on c7auto2: 0 -+native_color: shooter allocation score on c7auto3: 0 -+native_color: shooter allocation score on c7auto4: 0 -diff --git a/pengine/test10/clone_min_start_two.summary b/pengine/test10/clone_min_start_two.summary -new file mode 100644 -index 0000000..f0c649c ---- /dev/null -+++ b/pengine/test10/clone_min_start_two.summary -@@ -0,0 +1,36 @@ -+ -+Current cluster status: -+Node c7auto2 (2): standby -+Online: [ c7auto1 c7auto3 c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] -+ FAKE (ocf::heartbeat:Dummy): Stopped -+ -+Transition Summary: -+ * Start FAKECLONE:0 (c7auto3) -+ * Start FAKECLONE:1 (c7auto1) -+ * Start FAKE (c7auto4) -+ -+Executing cluster transition: -+ * Pseudo action: FAKECLONE-clone_start_0 -+ * Resource action: FAKECLONE start on c7auto3 -+ * Resource action: FAKECLONE start on c7auto1 -+ * Pseudo action: FAKECLONE-clone_running_0 -+ * Pseudo action: clone-one-or-more:order-FAKECLONE-clone-FAKE-mandatory -+ * Resource action: FAKECLONE monitor=10000 on c7auto3 -+ * Resource action: FAKECLONE monitor=10000 on c7auto1 -+ * Resource action: FAKE start on c7auto4 -+ * Resource action: FAKE monitor=10000 on c7auto4 -+ -+Revised cluster status: -+Node c7auto2 (2): standby -+Online: [ c7auto1 c7auto3 c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Started: [ c7auto1 c7auto3 ] -+ Stopped: [ c7auto2 c7auto4 ] -+ FAKE (ocf::heartbeat:Dummy): Started c7auto4 -+ -diff --git a/pengine/test10/clone_min_start_two.xml b/pengine/test10/clone_min_start_two.xml -new file mode 100644 -index 0000000..ae84425 ---- /dev/null -+++ b/pengine/test10/clone_min_start_two.xml -@@ -0,0 +1,153 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/clone_min_stop_all.dot b/pengine/test10/clone_min_stop_all.dot -new file mode 100644 -index 0000000..254e889 ---- /dev/null -+++ b/pengine/test10/clone_min_stop_all.dot -@@ -0,0 +1,41 @@ -+ digraph "g" { -+"FAKECLONE-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"FAKECLONE-clone_start_0" -> "FAKECLONE-clone_running_0" [ style = dashed] -+"FAKECLONE-clone_start_0" -> "FAKECLONE_start_0 " [ style = dashed] -+"FAKECLONE-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"FAKECLONE-clone_stop_0" -> "FAKECLONE-clone_stopped_0" [ style = bold] -+"FAKECLONE-clone_stop_0" -> "FAKECLONE_stop_0 c7auto1" [ style = bold] -+"FAKECLONE-clone_stop_0" -> "FAKECLONE_stop_0 c7auto2" [ style = bold] -+"FAKECLONE-clone_stop_0" -> "FAKECLONE_stop_0 c7auto3" [ style = bold] -+"FAKECLONE-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE-clone_stopped_0" -> "FAKECLONE-clone_start_0" [ style = dashed] -+"FAKECLONE-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE_start_0 " -> "FAKECLONE-clone_running_0" [ style = dashed] -+"FAKECLONE_start_0 " [ style=dashed color="red" fontcolor="black"] -+"FAKECLONE_stop_0 c7auto1" -> "FAKECLONE-clone_stopped_0" [ style = bold] -+"FAKECLONE_stop_0 c7auto1" -> "FAKECLONE_start_0 " [ style = dashed] -+"FAKECLONE_stop_0 c7auto1" -> "all_stopped" [ style = bold] -+"FAKECLONE_stop_0 c7auto1" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE_stop_0 c7auto2" -> "FAKECLONE-clone_stopped_0" [ style = bold] -+"FAKECLONE_stop_0 c7auto2" -> "FAKECLONE_start_0 " [ style = dashed] -+"FAKECLONE_stop_0 c7auto2" -> "all_stopped" [ style = bold] -+"FAKECLONE_stop_0 c7auto2" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE_stop_0 c7auto3" -> "FAKECLONE-clone_stopped_0" [ style = bold] -+"FAKECLONE_stop_0 c7auto3" -> "FAKECLONE_start_0 " [ style = dashed] -+"FAKECLONE_stop_0 c7auto3" -> "all_stopped" [ style = bold] -+"FAKECLONE_stop_0 c7auto3" [ style=bold color="green" fontcolor="black"] -+"FAKE_monitor_10000 c7auto4" [ style=dashed color="red" fontcolor="black"] -+"FAKE_start_0 c7auto4" -> "FAKE_monitor_10000 c7auto4" [ style = dashed] -+"FAKE_start_0 c7auto4" [ style=dashed color="red" fontcolor="black"] -+"FAKE_stop_0 c7auto4" -> "FAKECLONE-clone_stop_0" [ style = bold] -+"FAKE_stop_0 c7auto4" -> "FAKE_start_0 c7auto4" [ style = dashed] -+"FAKE_stop_0 c7auto4" -> "all_stopped" [ style = bold] -+"FAKE_stop_0 c7auto4" [ style=bold color="green" fontcolor="black"] -+"all_stopped" [ style=bold color="green" fontcolor="orange"] -+"shooter_monitor_60000 c7auto4" [ style=bold color="green" fontcolor="black"] -+"shooter_start_0 c7auto4" -> "shooter_monitor_60000 c7auto4" [ style = bold] -+"shooter_start_0 c7auto4" [ style=bold color="green" fontcolor="black"] -+"shooter_stop_0 c7auto1" -> "all_stopped" [ style = bold] -+"shooter_stop_0 c7auto1" -> "shooter_start_0 c7auto4" [ style = bold] -+"shooter_stop_0 c7auto1" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/pengine/test10/clone_min_stop_all.exp b/pengine/test10/clone_min_stop_all.exp -new file mode 100644 -index 0000000..1b8c9ce ---- /dev/null -+++ b/pengine/test10/clone_min_stop_all.exp -@@ -0,0 +1,142 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/clone_min_stop_all.scores b/pengine/test10/clone_min_stop_all.scores -new file mode 100644 -index 0000000..0bcbb1f ---- /dev/null -+++ b/pengine/test10/clone_min_stop_all.scores -@@ -0,0 +1,45 @@ -+Allocation scores: -+clone_color: FAKECLONE-clone allocation score on c7auto1: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto2: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto3: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:0 allocation score on c7auto1: 1 -+clone_color: FAKECLONE:0 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:1 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto2: 1 -+clone_color: FAKECLONE:1 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:2 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto3: 1 -+clone_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:3 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: FAKE allocation score on c7auto1: -INFINITY -+native_color: FAKE allocation score on c7auto2: -INFINITY -+native_color: FAKE allocation score on c7auto3: -INFINITY -+native_color: FAKE allocation score on c7auto4: 0 -+native_color: FAKECLONE:0 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: shooter allocation score on c7auto1: 0 -+native_color: shooter allocation score on c7auto2: 0 -+native_color: shooter allocation score on c7auto3: 0 -+native_color: shooter allocation score on c7auto4: 0 -diff --git a/pengine/test10/clone_min_stop_all.summary b/pengine/test10/clone_min_stop_all.summary -new file mode 100644 -index 0000000..eb2944f ---- /dev/null -+++ b/pengine/test10/clone_min_stop_all.summary -@@ -0,0 +1,43 @@ -+ -+Current cluster status: -+Node c7auto1 (1): standby -+Node c7auto2 (2): standby -+Node c7auto3 (3): standby -+Online: [ c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Started: [ c7auto1 c7auto2 c7auto3 ] -+ Stopped: [ c7auto4 ] -+ FAKE (ocf::heartbeat:Dummy): Started c7auto4 -+ -+Transition Summary: -+ * Move shooter (Started c7auto1 -> c7auto4) -+ * Stop FAKECLONE:0 (c7auto1) -+ * Stop FAKECLONE:1 (c7auto2) -+ * Stop FAKECLONE:2 (c7auto3) -+ * Stop FAKE (Started c7auto4) -+ -+Executing cluster transition: -+ * Resource action: shooter stop on c7auto1 -+ * Resource action: FAKE stop on c7auto4 -+ * Resource action: shooter start on c7auto4 -+ * Pseudo action: FAKECLONE-clone_stop_0 -+ * Resource action: shooter monitor=60000 on c7auto4 -+ * Resource action: FAKECLONE stop on c7auto1 -+ * Resource action: FAKECLONE stop on c7auto2 -+ * Resource action: FAKECLONE stop on c7auto3 -+ * Pseudo action: FAKECLONE-clone_stopped_0 -+ * Pseudo action: all_stopped -+ -+Revised cluster status: -+Node c7auto1 (1): standby -+Node c7auto2 (2): standby -+Node c7auto3 (3): standby -+Online: [ c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto4 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] -+ FAKE (ocf::heartbeat:Dummy): Stopped -+ -diff --git a/pengine/test10/clone_min_stop_all.xml b/pengine/test10/clone_min_stop_all.xml -new file mode 100644 -index 0000000..70e8a96 ---- /dev/null -+++ b/pengine/test10/clone_min_stop_all.xml -@@ -0,0 +1,158 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/clone_min_stop_one.dot b/pengine/test10/clone_min_stop_one.dot -new file mode 100644 -index 0000000..19f84cc ---- /dev/null -+++ b/pengine/test10/clone_min_stop_one.dot -@@ -0,0 +1,18 @@ -+ digraph "g" { -+"FAKECLONE-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE-clone_start_0" -> "FAKECLONE-clone_running_0" [ style = bold] -+"FAKECLONE-clone_start_0" -> "FAKECLONE_start_0 " [ style = dashed] -+"FAKECLONE-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE-clone_stop_0" -> "FAKECLONE-clone_stopped_0" [ style = bold] -+"FAKECLONE-clone_stop_0" -> "FAKECLONE_stop_0 c7auto2" [ style = bold] -+"FAKECLONE-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE-clone_stopped_0" -> "FAKECLONE-clone_start_0" [ style = bold] -+"FAKECLONE-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE_start_0 " -> "FAKECLONE-clone_running_0" [ style = dashed] -+"FAKECLONE_start_0 " [ style=dashed color="red" fontcolor="black"] -+"FAKECLONE_stop_0 c7auto2" -> "FAKECLONE-clone_stopped_0" [ style = bold] -+"FAKECLONE_stop_0 c7auto2" -> "FAKECLONE_start_0 " [ style = dashed] -+"FAKECLONE_stop_0 c7auto2" -> "all_stopped" [ style = bold] -+"FAKECLONE_stop_0 c7auto2" [ style=bold color="green" fontcolor="black"] -+"all_stopped" [ style=bold color="green" fontcolor="orange"] -+} -diff --git a/pengine/test10/clone_min_stop_one.exp b/pengine/test10/clone_min_stop_one.exp -new file mode 100644 -index 0000000..4e6edb8 ---- /dev/null -+++ b/pengine/test10/clone_min_stop_one.exp -@@ -0,0 +1,74 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/clone_min_stop_one.scores b/pengine/test10/clone_min_stop_one.scores -new file mode 100644 -index 0000000..1f28932 ---- /dev/null -+++ b/pengine/test10/clone_min_stop_one.scores -@@ -0,0 +1,45 @@ -+Allocation scores: -+clone_color: FAKECLONE-clone allocation score on c7auto1: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto2: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto3: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:0 allocation score on c7auto1: 1 -+clone_color: FAKECLONE:0 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:1 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto2: 1 -+clone_color: FAKECLONE:1 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:2 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto3: 1 -+clone_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:3 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: FAKE allocation score on c7auto1: -INFINITY -+native_color: FAKE allocation score on c7auto2: -INFINITY -+native_color: FAKE allocation score on c7auto3: -INFINITY -+native_color: FAKE allocation score on c7auto4: 0 -+native_color: FAKECLONE:0 allocation score on c7auto1: 1 -+native_color: FAKECLONE:0 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto1: 0 -+native_color: FAKECLONE:2 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto3: 1 -+native_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: shooter allocation score on c7auto1: 0 -+native_color: shooter allocation score on c7auto2: 0 -+native_color: shooter allocation score on c7auto3: 0 -+native_color: shooter allocation score on c7auto4: 0 -diff --git a/pengine/test10/clone_min_stop_one.summary b/pengine/test10/clone_min_stop_one.summary -new file mode 100644 -index 0000000..9206a0d ---- /dev/null -+++ b/pengine/test10/clone_min_stop_one.summary -@@ -0,0 +1,32 @@ -+ -+Current cluster status: -+Node c7auto2 (2): standby -+Online: [ c7auto1 c7auto3 c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Started: [ c7auto1 c7auto2 c7auto3 ] -+ Stopped: [ c7auto4 ] -+ FAKE (ocf::heartbeat:Dummy): Started c7auto4 -+ -+Transition Summary: -+ * Stop FAKECLONE:1 (c7auto2) -+ -+Executing cluster transition: -+ * Pseudo action: FAKECLONE-clone_stop_0 -+ * Resource action: FAKECLONE stop on c7auto2 -+ * Pseudo action: FAKECLONE-clone_stopped_0 -+ * Pseudo action: FAKECLONE-clone_start_0 -+ * Pseudo action: all_stopped -+ * Pseudo action: FAKECLONE-clone_running_0 -+ -+Revised cluster status: -+Node c7auto2 (2): standby -+Online: [ c7auto1 c7auto3 c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Started: [ c7auto1 c7auto3 ] -+ Stopped: [ c7auto2 c7auto4 ] -+ FAKE (ocf::heartbeat:Dummy): Started c7auto4 -+ -diff --git a/pengine/test10/clone_min_stop_one.xml b/pengine/test10/clone_min_stop_one.xml -new file mode 100644 -index 0000000..eb05803 ---- /dev/null -+++ b/pengine/test10/clone_min_stop_one.xml -@@ -0,0 +1,152 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/clone_min_stop_two.dot b/pengine/test10/clone_min_stop_two.dot -new file mode 100644 -index 0000000..11640f4 ---- /dev/null -+++ b/pengine/test10/clone_min_stop_two.dot -@@ -0,0 +1,36 @@ -+ digraph "g" { -+"FAKECLONE-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE-clone_start_0" -> "FAKECLONE-clone_running_0" [ style = bold] -+"FAKECLONE-clone_start_0" -> "FAKECLONE_start_0 " [ style = dashed] -+"FAKECLONE-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE-clone_stop_0" -> "FAKECLONE-clone_stopped_0" [ style = bold] -+"FAKECLONE-clone_stop_0" -> "FAKECLONE_stop_0 c7auto1" [ style = bold] -+"FAKECLONE-clone_stop_0" -> "FAKECLONE_stop_0 c7auto2" [ style = bold] -+"FAKECLONE-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE-clone_stopped_0" -> "FAKECLONE-clone_start_0" [ style = bold] -+"FAKECLONE-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE_start_0 " -> "FAKECLONE-clone_running_0" [ style = dashed] -+"FAKECLONE_start_0 " [ style=dashed color="red" fontcolor="black"] -+"FAKECLONE_stop_0 c7auto1" -> "FAKECLONE-clone_stopped_0" [ style = bold] -+"FAKECLONE_stop_0 c7auto1" -> "FAKECLONE_start_0 " [ style = dashed] -+"FAKECLONE_stop_0 c7auto1" -> "all_stopped" [ style = bold] -+"FAKECLONE_stop_0 c7auto1" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE_stop_0 c7auto2" -> "FAKECLONE-clone_stopped_0" [ style = bold] -+"FAKECLONE_stop_0 c7auto2" -> "FAKECLONE_start_0 " [ style = dashed] -+"FAKECLONE_stop_0 c7auto2" -> "all_stopped" [ style = bold] -+"FAKECLONE_stop_0 c7auto2" [ style=bold color="green" fontcolor="black"] -+"FAKE_monitor_10000 c7auto4" [ style=dashed color="red" fontcolor="black"] -+"FAKE_start_0 c7auto4" -> "FAKE_monitor_10000 c7auto4" [ style = dashed] -+"FAKE_start_0 c7auto4" [ style=dashed color="red" fontcolor="black"] -+"FAKE_stop_0 c7auto4" -> "FAKECLONE-clone_stop_0" [ style = bold] -+"FAKE_stop_0 c7auto4" -> "FAKE_start_0 c7auto4" [ style = dashed] -+"FAKE_stop_0 c7auto4" -> "all_stopped" [ style = bold] -+"FAKE_stop_0 c7auto4" [ style=bold color="green" fontcolor="black"] -+"all_stopped" [ style=bold color="green" fontcolor="orange"] -+"shooter_monitor_60000 c7auto3" [ style=bold color="green" fontcolor="black"] -+"shooter_start_0 c7auto3" -> "shooter_monitor_60000 c7auto3" [ style = bold] -+"shooter_start_0 c7auto3" [ style=bold color="green" fontcolor="black"] -+"shooter_stop_0 c7auto1" -> "all_stopped" [ style = bold] -+"shooter_stop_0 c7auto1" -> "shooter_start_0 c7auto3" [ style = bold] -+"shooter_stop_0 c7auto1" [ style=bold color="green" fontcolor="black"] -+} -diff --git a/pengine/test10/clone_min_stop_two.exp b/pengine/test10/clone_min_stop_two.exp -new file mode 100644 -index 0000000..5697611 ---- /dev/null -+++ b/pengine/test10/clone_min_stop_two.exp -@@ -0,0 +1,147 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/clone_min_stop_two.scores b/pengine/test10/clone_min_stop_two.scores -new file mode 100644 -index 0000000..ce43eb9 ---- /dev/null -+++ b/pengine/test10/clone_min_stop_two.scores -@@ -0,0 +1,45 @@ -+Allocation scores: -+clone_color: FAKECLONE-clone allocation score on c7auto1: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto2: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto3: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:0 allocation score on c7auto1: 1 -+clone_color: FAKECLONE:0 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:1 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto2: 1 -+clone_color: FAKECLONE:1 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:2 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto3: 1 -+clone_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:3 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: FAKE allocation score on c7auto1: -INFINITY -+native_color: FAKE allocation score on c7auto2: -INFINITY -+native_color: FAKE allocation score on c7auto3: -INFINITY -+native_color: FAKE allocation score on c7auto4: 0 -+native_color: FAKECLONE:0 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto3: 1 -+native_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: shooter allocation score on c7auto1: 0 -+native_color: shooter allocation score on c7auto2: 0 -+native_color: shooter allocation score on c7auto3: 0 -+native_color: shooter allocation score on c7auto4: 0 -diff --git a/pengine/test10/clone_min_stop_two.summary b/pengine/test10/clone_min_stop_two.summary -new file mode 100644 -index 0000000..c009d7d ---- /dev/null -+++ b/pengine/test10/clone_min_stop_two.summary -@@ -0,0 +1,42 @@ -+ -+Current cluster status: -+Node c7auto1 (1): standby -+Node c7auto2 (2): standby -+Online: [ c7auto3 c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Started: [ c7auto1 c7auto2 c7auto3 ] -+ Stopped: [ c7auto4 ] -+ FAKE (ocf::heartbeat:Dummy): Started c7auto4 -+ -+Transition Summary: -+ * Move shooter (Started c7auto1 -> c7auto3) -+ * Stop FAKECLONE:0 (c7auto1) -+ * Stop FAKECLONE:1 (c7auto2) -+ * Stop FAKE (Started c7auto4) -+ -+Executing cluster transition: -+ * Resource action: shooter stop on c7auto1 -+ * Resource action: FAKE stop on c7auto4 -+ * Resource action: shooter start on c7auto3 -+ * Pseudo action: FAKECLONE-clone_stop_0 -+ * Resource action: shooter monitor=60000 on c7auto3 -+ * Resource action: FAKECLONE stop on c7auto1 -+ * Resource action: FAKECLONE stop on c7auto2 -+ * Pseudo action: FAKECLONE-clone_stopped_0 -+ * Pseudo action: FAKECLONE-clone_start_0 -+ * Pseudo action: all_stopped -+ * Pseudo action: FAKECLONE-clone_running_0 -+ -+Revised cluster status: -+Node c7auto1 (1): standby -+Node c7auto2 (2): standby -+Online: [ c7auto3 c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto3 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Started: [ c7auto3 ] -+ Stopped: [ c7auto1 c7auto2 c7auto4 ] -+ FAKE (ocf::heartbeat:Dummy): Stopped -+ -diff --git a/pengine/test10/clone_min_stop_two.xml b/pengine/test10/clone_min_stop_two.xml -new file mode 100644 -index 0000000..8d085ad ---- /dev/null -+++ b/pengine/test10/clone_min_stop_two.xml -@@ -0,0 +1,154 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/cloned_start_one.dot b/pengine/test10/cloned_start_one.dot -new file mode 100644 -index 0000000..b3c254c ---- /dev/null -+++ b/pengine/test10/cloned_start_one.dot -@@ -0,0 +1,32 @@ -+ digraph "g" { -+"FAKECLONE-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE-clone_start_0" -> "FAKECLONE-clone_running_0" [ style = bold] -+"FAKECLONE-clone_start_0" -> "FAKECLONE_start_0 c7auto1" [ style = bold] -+"FAKECLONE-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE2-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"FAKECLONE2-clone_start_0" -> "FAKECLONE2-clone_running_0" [ style = dashed] -+"FAKECLONE2-clone_start_0" -> "FAKECLONE2_start_0 c7auto4" [ style = dashed] -+"FAKECLONE2-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"FAKECLONE2-clone_stop_0" -> "FAKECLONE2-clone_stopped_0" [ style = bold] -+"FAKECLONE2-clone_stop_0" -> "FAKECLONE2_stop_0 c7auto3" [ style = bold] -+"FAKECLONE2-clone_stop_0" -> "FAKECLONE2_stop_0 c7auto4" [ style = bold] -+"FAKECLONE2-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE2-clone_stopped_0" -> "FAKECLONE2-clone_start_0" [ style = dashed] -+"FAKECLONE2-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE2_monitor_10000 c7auto4" [ style=dashed color="red" fontcolor="black"] -+"FAKECLONE2_start_0 c7auto4" -> "FAKECLONE2-clone_running_0" [ style = dashed] -+"FAKECLONE2_start_0 c7auto4" -> "FAKECLONE2_monitor_10000 c7auto4" [ style = dashed] -+"FAKECLONE2_start_0 c7auto4" [ style=dashed color="red" fontcolor="black"] -+"FAKECLONE2_stop_0 c7auto3" -> "FAKECLONE2-clone_stopped_0" [ style = bold] -+"FAKECLONE2_stop_0 c7auto3" -> "all_stopped" [ style = bold] -+"FAKECLONE2_stop_0 c7auto3" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE2_stop_0 c7auto4" -> "FAKECLONE2-clone_stopped_0" [ style = bold] -+"FAKECLONE2_stop_0 c7auto4" -> "FAKECLONE2_start_0 c7auto4" [ style = dashed] -+"FAKECLONE2_stop_0 c7auto4" -> "all_stopped" [ style = bold] -+"FAKECLONE2_stop_0 c7auto4" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE_monitor_10000 c7auto1" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE_start_0 c7auto1" -> "FAKECLONE-clone_running_0" [ style = bold] -+"FAKECLONE_start_0 c7auto1" -> "FAKECLONE_monitor_10000 c7auto1" [ style = bold] -+"FAKECLONE_start_0 c7auto1" [ style=bold color="green" fontcolor="black"] -+"all_stopped" [ style=bold color="green" fontcolor="orange"] -+} -diff --git a/pengine/test10/cloned_start_one.exp b/pengine/test10/cloned_start_one.exp -new file mode 100644 -index 0000000..636ccd8 ---- /dev/null -+++ b/pengine/test10/cloned_start_one.exp -@@ -0,0 +1,118 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/cloned_start_one.scores b/pengine/test10/cloned_start_one.scores -new file mode 100644 -index 0000000..3dc6ab8 ---- /dev/null -+++ b/pengine/test10/cloned_start_one.scores -@@ -0,0 +1,77 @@ -+Allocation scores: -+clone_color: FAKECLONE-clone allocation score on c7auto1: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto2: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto3: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE2-clone allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2-clone allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2-clone allocation score on c7auto3: 0 -+clone_color: FAKECLONE2-clone allocation score on c7auto4: 0 -+clone_color: FAKECLONE2:0 allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2:0 allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2:0 allocation score on c7auto3: 1 -+clone_color: FAKECLONE2:0 allocation score on c7auto4: 0 -+clone_color: FAKECLONE2:1 allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2:1 allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2:1 allocation score on c7auto3: 0 -+clone_color: FAKECLONE2:1 allocation score on c7auto4: 1 -+clone_color: FAKECLONE2:2 allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2:2 allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2:2 allocation score on c7auto3: 0 -+clone_color: FAKECLONE2:2 allocation score on c7auto4: 0 -+clone_color: FAKECLONE2:3 allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2:3 allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2:3 allocation score on c7auto3: 0 -+clone_color: FAKECLONE2:3 allocation score on c7auto4: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:1 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:2 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:3 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE2:0 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE2:0 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE2:0 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE2:0 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE2:1 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE2:1 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE2:1 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE2:1 allocation score on c7auto4: 1 -+native_color: FAKECLONE2:2 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE2:2 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE2:2 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE2:2 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE2:3 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE2:3 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE2:3 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE2:3 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto1: 0 -+native_color: FAKECLONE:0 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: shooter allocation score on c7auto1: 0 -+native_color: shooter allocation score on c7auto2: 0 -+native_color: shooter allocation score on c7auto3: 0 -+native_color: shooter allocation score on c7auto4: 0 -diff --git a/pengine/test10/cloned_start_one.summary b/pengine/test10/cloned_start_one.summary -new file mode 100644 -index 0000000..20ac58f ---- /dev/null -+++ b/pengine/test10/cloned_start_one.summary -@@ -0,0 +1,41 @@ -+ -+Current cluster status: -+Node c7auto2 (2): standby -+Node c7auto3 (3): standby -+Online: [ c7auto1 c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] -+ Clone Set: FAKECLONE2-clone [FAKECLONE2] -+ Started: [ c7auto3 c7auto4 ] -+ Stopped: [ c7auto1 c7auto2 ] -+ -+Transition Summary: -+ * Start FAKECLONE:0 (c7auto1) -+ * Stop FAKECLONE2:0 (c7auto3) -+ * Stop FAKECLONE2:1 (Started c7auto4) -+ -+Executing cluster transition: -+ * Pseudo action: FAKECLONE-clone_start_0 -+ * Pseudo action: FAKECLONE2-clone_stop_0 -+ * Resource action: FAKECLONE start on c7auto1 -+ * Pseudo action: FAKECLONE-clone_running_0 -+ * Resource action: FAKECLONE2 stop on c7auto3 -+ * Resource action: FAKECLONE2 stop on c7auto4 -+ * Pseudo action: FAKECLONE2-clone_stopped_0 -+ * Pseudo action: all_stopped -+ * Resource action: FAKECLONE monitor=10000 on c7auto1 -+ -+Revised cluster status: -+Node c7auto2 (2): standby -+Node c7auto3 (3): standby -+Online: [ c7auto1 c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Started: [ c7auto1 ] -+ Stopped: [ c7auto2 c7auto3 c7auto4 ] -+ Clone Set: FAKECLONE2-clone [FAKECLONE2] -+ Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] -+ -diff --git a/pengine/test10/cloned_start_one.xml b/pengine/test10/cloned_start_one.xml -new file mode 100644 -index 0000000..6c2bfe1 ---- /dev/null -+++ b/pengine/test10/cloned_start_one.xml -@@ -0,0 +1,154 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/cloned_start_two.dot b/pengine/test10/cloned_start_two.dot -new file mode 100644 -index 0000000..348d435 ---- /dev/null -+++ b/pengine/test10/cloned_start_two.dot -@@ -0,0 +1,26 @@ -+ digraph "g" { -+"FAKECLONE-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE-clone_start_0" -> "FAKECLONE-clone_running_0" [ style = bold] -+"FAKECLONE-clone_start_0" -> "FAKECLONE:1_start_0 c7auto1" [ style = bold] -+"FAKECLONE-clone_start_0" -> "FAKECLONE_start_0 c7auto2" [ style = bold] -+"FAKECLONE-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE2-clone_stop_0" -> "FAKECLONE2-clone_stopped_0" [ style = bold] -+"FAKECLONE2-clone_stop_0" -> "FAKECLONE2_stop_0 c7auto3" [ style = bold] -+"FAKECLONE2-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE2-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE2_stop_0 c7auto3" -> "FAKECLONE2-clone_stopped_0" [ style = bold] -+"FAKECLONE2_stop_0 c7auto3" -> "all_stopped" [ style = bold] -+"FAKECLONE2_stop_0 c7auto3" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE:1_monitor_10000 c7auto1" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE:1_start_0 c7auto1" -> "FAKECLONE-clone_running_0" [ style = bold] -+"FAKECLONE:1_start_0 c7auto1" -> "FAKECLONE:1_monitor_10000 c7auto1" [ style = bold] -+"FAKECLONE:1_start_0 c7auto1" -> "clone-one-or-more:order-FAKECLONE-clone-FAKECLONE2-clone-mandatory" [ style = bold] -+"FAKECLONE:1_start_0 c7auto1" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE_monitor_10000 c7auto2" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE_start_0 c7auto2" -> "FAKECLONE-clone_running_0" [ style = bold] -+"FAKECLONE_start_0 c7auto2" -> "FAKECLONE_monitor_10000 c7auto2" [ style = bold] -+"FAKECLONE_start_0 c7auto2" -> "clone-one-or-more:order-FAKECLONE-clone-FAKECLONE2-clone-mandatory" [ style = bold] -+"FAKECLONE_start_0 c7auto2" [ style=bold color="green" fontcolor="black"] -+"all_stopped" [ style=bold color="green" fontcolor="orange"] -+"clone-one-or-more:order-FAKECLONE-clone-FAKECLONE2-clone-mandatory" [ style=bold color="green" fontcolor="orange"] -+} -diff --git a/pengine/test10/cloned_start_two.exp b/pengine/test10/cloned_start_two.exp -new file mode 100644 -index 0000000..ee82324 ---- /dev/null -+++ b/pengine/test10/cloned_start_two.exp -@@ -0,0 +1,143 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/cloned_start_two.scores b/pengine/test10/cloned_start_two.scores -new file mode 100644 -index 0000000..dae3b5d ---- /dev/null -+++ b/pengine/test10/cloned_start_two.scores -@@ -0,0 +1,77 @@ -+Allocation scores: -+clone_color: FAKECLONE-clone allocation score on c7auto1: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto2: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto3: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE2-clone allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2-clone allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2-clone allocation score on c7auto3: 0 -+clone_color: FAKECLONE2-clone allocation score on c7auto4: 0 -+clone_color: FAKECLONE2:0 allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2:0 allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2:0 allocation score on c7auto3: 1 -+clone_color: FAKECLONE2:0 allocation score on c7auto4: 0 -+clone_color: FAKECLONE2:1 allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2:1 allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2:1 allocation score on c7auto3: 0 -+clone_color: FAKECLONE2:1 allocation score on c7auto4: 1 -+clone_color: FAKECLONE2:2 allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2:2 allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2:2 allocation score on c7auto3: 0 -+clone_color: FAKECLONE2:2 allocation score on c7auto4: 0 -+clone_color: FAKECLONE2:3 allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2:3 allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2:3 allocation score on c7auto3: 0 -+clone_color: FAKECLONE2:3 allocation score on c7auto4: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:1 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:2 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:3 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE2:0 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE2:0 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE2:0 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE2:0 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE2:1 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE2:1 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE2:1 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE2:1 allocation score on c7auto4: 1 -+native_color: FAKECLONE2:2 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE2:2 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE2:2 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE2:2 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE2:3 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE2:3 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE2:3 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE2:3 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto1: 0 -+native_color: FAKECLONE:0 allocation score on c7auto2: 0 -+native_color: FAKECLONE:0 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto1: 0 -+native_color: FAKECLONE:1 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: shooter allocation score on c7auto1: 0 -+native_color: shooter allocation score on c7auto2: 0 -+native_color: shooter allocation score on c7auto3: 0 -+native_color: shooter allocation score on c7auto4: 0 -diff --git a/pengine/test10/cloned_start_two.summary b/pengine/test10/cloned_start_two.summary -new file mode 100644 -index 0000000..bea4609 ---- /dev/null -+++ b/pengine/test10/cloned_start_two.summary -@@ -0,0 +1,42 @@ -+ -+Current cluster status: -+Node c7auto3 (3): standby -+Online: [ c7auto1 c7auto2 c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] -+ Clone Set: FAKECLONE2-clone [FAKECLONE2] -+ Started: [ c7auto3 c7auto4 ] -+ Stopped: [ c7auto1 c7auto2 ] -+ -+Transition Summary: -+ * Start FAKECLONE:0 (c7auto2) -+ * Start FAKECLONE:1 (c7auto1) -+ * Stop FAKECLONE2:0 (c7auto3) -+ -+Executing cluster transition: -+ * Pseudo action: FAKECLONE-clone_start_0 -+ * Pseudo action: FAKECLONE2-clone_stop_0 -+ * Resource action: FAKECLONE start on c7auto2 -+ * Resource action: FAKECLONE start on c7auto1 -+ * Pseudo action: FAKECLONE-clone_running_0 -+ * Resource action: FAKECLONE2 stop on c7auto3 -+ * Pseudo action: FAKECLONE2-clone_stopped_0 -+ * Pseudo action: all_stopped -+ * Pseudo action: clone-one-or-more:order-FAKECLONE-clone-FAKECLONE2-clone-mandatory -+ * Resource action: FAKECLONE monitor=10000 on c7auto2 -+ * Resource action: FAKECLONE monitor=10000 on c7auto1 -+ -+Revised cluster status: -+Node c7auto3 (3): standby -+Online: [ c7auto1 c7auto2 c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Started: [ c7auto1 c7auto2 ] -+ Stopped: [ c7auto3 c7auto4 ] -+ Clone Set: FAKECLONE2-clone [FAKECLONE2] -+ Started: [ c7auto4 ] -+ Stopped: [ c7auto1 c7auto2 c7auto3 ] -+ -diff --git a/pengine/test10/cloned_start_two.xml b/pengine/test10/cloned_start_two.xml -new file mode 100644 -index 0000000..be78317 ---- /dev/null -+++ b/pengine/test10/cloned_start_two.xml -@@ -0,0 +1,152 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/cloned_stop_one.dot b/pengine/test10/cloned_stop_one.dot -new file mode 100644 -index 0000000..d181135 ---- /dev/null -+++ b/pengine/test10/cloned_stop_one.dot -@@ -0,0 +1,26 @@ -+ digraph "g" { -+"FAKECLONE-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE-clone_start_0" -> "FAKECLONE-clone_running_0" [ style = bold] -+"FAKECLONE-clone_start_0" -> "FAKECLONE_start_0 " [ style = dashed] -+"FAKECLONE-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE-clone_stop_0" -> "FAKECLONE-clone_stopped_0" [ style = bold] -+"FAKECLONE-clone_stop_0" -> "FAKECLONE_stop_0 c7auto3" [ style = bold] -+"FAKECLONE-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE-clone_stopped_0" -> "FAKECLONE-clone_start_0" [ style = bold] -+"FAKECLONE-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE2-clone_stop_0" -> "FAKECLONE2-clone_stopped_0" [ style = bold] -+"FAKECLONE2-clone_stop_0" -> "FAKECLONE2_stop_0 c7auto3" [ style = bold] -+"FAKECLONE2-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE2-clone_stopped_0" -> "FAKECLONE-clone_stop_0" [ style = bold] -+"FAKECLONE2-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE2_stop_0 c7auto3" -> "FAKECLONE2-clone_stopped_0" [ style = bold] -+"FAKECLONE2_stop_0 c7auto3" -> "all_stopped" [ style = bold] -+"FAKECLONE2_stop_0 c7auto3" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE_start_0 " -> "FAKECLONE-clone_running_0" [ style = dashed] -+"FAKECLONE_start_0 " [ style=dashed color="red" fontcolor="black"] -+"FAKECLONE_stop_0 c7auto3" -> "FAKECLONE-clone_stopped_0" [ style = bold] -+"FAKECLONE_stop_0 c7auto3" -> "FAKECLONE_start_0 " [ style = dashed] -+"FAKECLONE_stop_0 c7auto3" -> "all_stopped" [ style = bold] -+"FAKECLONE_stop_0 c7auto3" [ style=bold color="green" fontcolor="black"] -+"all_stopped" [ style=bold color="green" fontcolor="orange"] -+} -diff --git a/pengine/test10/cloned_stop_one.exp b/pengine/test10/cloned_stop_one.exp -new file mode 100644 -index 0000000..9613d6f ---- /dev/null -+++ b/pengine/test10/cloned_stop_one.exp -@@ -0,0 +1,117 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/cloned_stop_one.scores b/pengine/test10/cloned_stop_one.scores -new file mode 100644 -index 0000000..6d66638 ---- /dev/null -+++ b/pengine/test10/cloned_stop_one.scores -@@ -0,0 +1,77 @@ -+Allocation scores: -+clone_color: FAKECLONE-clone allocation score on c7auto1: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto2: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto3: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE2-clone allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2-clone allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2-clone allocation score on c7auto3: 0 -+clone_color: FAKECLONE2-clone allocation score on c7auto4: 0 -+clone_color: FAKECLONE2:0 allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2:0 allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2:0 allocation score on c7auto3: 1 -+clone_color: FAKECLONE2:0 allocation score on c7auto4: 0 -+clone_color: FAKECLONE2:1 allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2:1 allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2:1 allocation score on c7auto3: 0 -+clone_color: FAKECLONE2:1 allocation score on c7auto4: 1 -+clone_color: FAKECLONE2:2 allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2:2 allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2:2 allocation score on c7auto3: 0 -+clone_color: FAKECLONE2:2 allocation score on c7auto4: 0 -+clone_color: FAKECLONE2:3 allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2:3 allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2:3 allocation score on c7auto3: 0 -+clone_color: FAKECLONE2:3 allocation score on c7auto4: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto1: 1 -+clone_color: FAKECLONE:0 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:1 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto2: 1 -+clone_color: FAKECLONE:1 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:2 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto3: 1 -+clone_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:3 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE2:0 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE2:0 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE2:0 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE2:0 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE2:1 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE2:1 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE2:1 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE2:1 allocation score on c7auto4: 1 -+native_color: FAKECLONE2:2 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE2:2 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE2:2 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE2:2 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE2:3 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE2:3 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE2:3 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE2:3 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto1: 1 -+native_color: FAKECLONE:0 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto1: 0 -+native_color: FAKECLONE:1 allocation score on c7auto2: 1 -+native_color: FAKECLONE:1 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: shooter allocation score on c7auto1: 0 -+native_color: shooter allocation score on c7auto2: 0 -+native_color: shooter allocation score on c7auto3: 0 -+native_color: shooter allocation score on c7auto4: 0 -diff --git a/pengine/test10/cloned_stop_one.summary b/pengine/test10/cloned_stop_one.summary -new file mode 100644 -index 0000000..1a952a2 ---- /dev/null -+++ b/pengine/test10/cloned_stop_one.summary -@@ -0,0 +1,40 @@ -+ -+Current cluster status: -+Node c7auto3 (3): standby -+Online: [ c7auto1 c7auto2 c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Started: [ c7auto1 c7auto2 c7auto3 ] -+ Stopped: [ c7auto4 ] -+ Clone Set: FAKECLONE2-clone [FAKECLONE2] -+ Started: [ c7auto3 c7auto4 ] -+ Stopped: [ c7auto1 c7auto2 ] -+ -+Transition Summary: -+ * Stop FAKECLONE:2 (c7auto3) -+ * Stop FAKECLONE2:0 (c7auto3) -+ -+Executing cluster transition: -+ * Pseudo action: FAKECLONE2-clone_stop_0 -+ * Resource action: FAKECLONE2 stop on c7auto3 -+ * Pseudo action: FAKECLONE2-clone_stopped_0 -+ * Pseudo action: FAKECLONE-clone_stop_0 -+ * Resource action: FAKECLONE stop on c7auto3 -+ * Pseudo action: FAKECLONE-clone_stopped_0 -+ * Pseudo action: FAKECLONE-clone_start_0 -+ * Pseudo action: all_stopped -+ * Pseudo action: FAKECLONE-clone_running_0 -+ -+Revised cluster status: -+Node c7auto3 (3): standby -+Online: [ c7auto1 c7auto2 c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Started: [ c7auto1 c7auto2 ] -+ Stopped: [ c7auto3 c7auto4 ] -+ Clone Set: FAKECLONE2-clone [FAKECLONE2] -+ Started: [ c7auto4 ] -+ Stopped: [ c7auto1 c7auto2 c7auto3 ] -+ -diff --git a/pengine/test10/cloned_stop_one.xml b/pengine/test10/cloned_stop_one.xml -new file mode 100644 -index 0000000..2e2fdfd ---- /dev/null -+++ b/pengine/test10/cloned_stop_one.xml -@@ -0,0 +1,153 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/cloned_stop_two.dot b/pengine/test10/cloned_stop_two.dot -new file mode 100644 -index 0000000..2c7fd3d ---- /dev/null -+++ b/pengine/test10/cloned_stop_two.dot -@@ -0,0 +1,45 @@ -+ digraph "g" { -+"FAKECLONE-clone_running_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE-clone_start_0" -> "FAKECLONE-clone_running_0" [ style = bold] -+"FAKECLONE-clone_start_0" -> "FAKECLONE_start_0 " [ style = dashed] -+"FAKECLONE-clone_start_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE-clone_stop_0" -> "FAKECLONE-clone_stopped_0" [ style = bold] -+"FAKECLONE-clone_stop_0" -> "FAKECLONE_stop_0 c7auto2" [ style = bold] -+"FAKECLONE-clone_stop_0" -> "FAKECLONE_stop_0 c7auto3" [ style = bold] -+"FAKECLONE-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE-clone_stopped_0" -> "FAKECLONE-clone_start_0" [ style = bold] -+"FAKECLONE-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE2-clone_running_0" [ style=dashed color="red" fontcolor="orange"] -+"FAKECLONE2-clone_start_0" -> "FAKECLONE2-clone_running_0" [ style = dashed] -+"FAKECLONE2-clone_start_0" -> "FAKECLONE2_start_0 c7auto4" [ style = dashed] -+"FAKECLONE2-clone_start_0" [ style=dashed color="red" fontcolor="orange"] -+"FAKECLONE2-clone_stop_0" -> "FAKECLONE2-clone_stopped_0" [ style = bold] -+"FAKECLONE2-clone_stop_0" -> "FAKECLONE2_stop_0 c7auto3" [ style = bold] -+"FAKECLONE2-clone_stop_0" -> "FAKECLONE2_stop_0 c7auto4" [ style = bold] -+"FAKECLONE2-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE2-clone_stopped_0" -> "FAKECLONE-clone_stop_0" [ style = bold] -+"FAKECLONE2-clone_stopped_0" -> "FAKECLONE2-clone_start_0" [ style = dashed] -+"FAKECLONE2-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -+"FAKECLONE2_monitor_10000 c7auto4" [ style=dashed color="red" fontcolor="black"] -+"FAKECLONE2_start_0 c7auto4" -> "FAKECLONE2-clone_running_0" [ style = dashed] -+"FAKECLONE2_start_0 c7auto4" -> "FAKECLONE2_monitor_10000 c7auto4" [ style = dashed] -+"FAKECLONE2_start_0 c7auto4" [ style=dashed color="red" fontcolor="black"] -+"FAKECLONE2_stop_0 c7auto3" -> "FAKECLONE2-clone_stopped_0" [ style = bold] -+"FAKECLONE2_stop_0 c7auto3" -> "all_stopped" [ style = bold] -+"FAKECLONE2_stop_0 c7auto3" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE2_stop_0 c7auto4" -> "FAKECLONE2-clone_stopped_0" [ style = bold] -+"FAKECLONE2_stop_0 c7auto4" -> "FAKECLONE2_start_0 c7auto4" [ style = dashed] -+"FAKECLONE2_stop_0 c7auto4" -> "all_stopped" [ style = bold] -+"FAKECLONE2_stop_0 c7auto4" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE_start_0 " -> "FAKECLONE-clone_running_0" [ style = dashed] -+"FAKECLONE_start_0 " [ style=dashed color="red" fontcolor="black"] -+"FAKECLONE_stop_0 c7auto2" -> "FAKECLONE-clone_stopped_0" [ style = bold] -+"FAKECLONE_stop_0 c7auto2" -> "FAKECLONE_start_0 " [ style = dashed] -+"FAKECLONE_stop_0 c7auto2" -> "all_stopped" [ style = bold] -+"FAKECLONE_stop_0 c7auto2" [ style=bold color="green" fontcolor="black"] -+"FAKECLONE_stop_0 c7auto3" -> "FAKECLONE-clone_stopped_0" [ style = bold] -+"FAKECLONE_stop_0 c7auto3" -> "FAKECLONE_start_0 " [ style = dashed] -+"FAKECLONE_stop_0 c7auto3" -> "all_stopped" [ style = bold] -+"FAKECLONE_stop_0 c7auto3" [ style=bold color="green" fontcolor="black"] -+"all_stopped" [ style=bold color="green" fontcolor="orange"] -+} -diff --git a/pengine/test10/cloned_stop_two.exp b/pengine/test10/cloned_stop_two.exp -new file mode 100644 -index 0000000..4aa0e58 ---- /dev/null -+++ b/pengine/test10/cloned_stop_two.exp -@@ -0,0 +1,155 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/cloned_stop_two.scores b/pengine/test10/cloned_stop_two.scores -new file mode 100644 -index 0000000..f6e9779 ---- /dev/null -+++ b/pengine/test10/cloned_stop_two.scores -@@ -0,0 +1,77 @@ -+Allocation scores: -+clone_color: FAKECLONE-clone allocation score on c7auto1: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto2: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto3: 0 -+clone_color: FAKECLONE-clone allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE2-clone allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2-clone allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2-clone allocation score on c7auto3: 0 -+clone_color: FAKECLONE2-clone allocation score on c7auto4: 0 -+clone_color: FAKECLONE2:0 allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2:0 allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2:0 allocation score on c7auto3: 1 -+clone_color: FAKECLONE2:0 allocation score on c7auto4: 0 -+clone_color: FAKECLONE2:1 allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2:1 allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2:1 allocation score on c7auto3: 0 -+clone_color: FAKECLONE2:1 allocation score on c7auto4: 1 -+clone_color: FAKECLONE2:2 allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2:2 allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2:2 allocation score on c7auto3: 0 -+clone_color: FAKECLONE2:2 allocation score on c7auto4: 0 -+clone_color: FAKECLONE2:3 allocation score on c7auto1: -INFINITY -+clone_color: FAKECLONE2:3 allocation score on c7auto2: -INFINITY -+clone_color: FAKECLONE2:3 allocation score on c7auto3: 0 -+clone_color: FAKECLONE2:3 allocation score on c7auto4: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto1: 1 -+clone_color: FAKECLONE:0 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:1 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto2: 1 -+clone_color: FAKECLONE:1 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:2 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:2 allocation score on c7auto3: 1 -+clone_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+clone_color: FAKECLONE:3 allocation score on c7auto1: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto2: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto3: 0 -+clone_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE2:0 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE2:0 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE2:0 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE2:0 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE2:1 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE2:1 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE2:1 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE2:1 allocation score on c7auto4: 1 -+native_color: FAKECLONE2:2 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE2:2 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE2:2 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE2:2 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE2:3 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE2:3 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE2:3 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE2:3 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto1: 1 -+native_color: FAKECLONE:0 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:0 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:1 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:2 allocation score on c7auto4: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto1: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto2: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto3: -INFINITY -+native_color: FAKECLONE:3 allocation score on c7auto4: -INFINITY -+native_color: shooter allocation score on c7auto1: 0 -+native_color: shooter allocation score on c7auto2: 0 -+native_color: shooter allocation score on c7auto3: 0 -+native_color: shooter allocation score on c7auto4: 0 -diff --git a/pengine/test10/cloned_stop_two.summary b/pengine/test10/cloned_stop_two.summary -new file mode 100644 -index 0000000..531295f ---- /dev/null -+++ b/pengine/test10/cloned_stop_two.summary -@@ -0,0 +1,45 @@ -+ -+Current cluster status: -+Node c7auto2 (2): standby -+Node c7auto3 (3): standby -+Online: [ c7auto1 c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Started: [ c7auto1 c7auto2 c7auto3 ] -+ Stopped: [ c7auto4 ] -+ Clone Set: FAKECLONE2-clone [FAKECLONE2] -+ Started: [ c7auto3 c7auto4 ] -+ Stopped: [ c7auto1 c7auto2 ] -+ -+Transition Summary: -+ * Stop FAKECLONE:1 (c7auto2) -+ * Stop FAKECLONE:2 (c7auto3) -+ * Stop FAKECLONE2:0 (c7auto3) -+ * Stop FAKECLONE2:1 (Started c7auto4) -+ -+Executing cluster transition: -+ * Pseudo action: FAKECLONE2-clone_stop_0 -+ * Resource action: FAKECLONE2 stop on c7auto3 -+ * Resource action: FAKECLONE2 stop on c7auto4 -+ * Pseudo action: FAKECLONE2-clone_stopped_0 -+ * Pseudo action: FAKECLONE-clone_stop_0 -+ * Resource action: FAKECLONE stop on c7auto2 -+ * Resource action: FAKECLONE stop on c7auto3 -+ * Pseudo action: FAKECLONE-clone_stopped_0 -+ * Pseudo action: FAKECLONE-clone_start_0 -+ * Pseudo action: all_stopped -+ * Pseudo action: FAKECLONE-clone_running_0 -+ -+Revised cluster status: -+Node c7auto2 (2): standby -+Node c7auto3 (3): standby -+Online: [ c7auto1 c7auto4 ] -+ -+ shooter (stonith:fence_phd_kvm): Started c7auto1 -+ Clone Set: FAKECLONE-clone [FAKECLONE] -+ Started: [ c7auto1 ] -+ Stopped: [ c7auto2 c7auto3 c7auto4 ] -+ Clone Set: FAKECLONE2-clone [FAKECLONE2] -+ Stopped: [ c7auto1 c7auto2 c7auto3 c7auto4 ] -+ -diff --git a/pengine/test10/cloned_stop_two.xml b/pengine/test10/cloned_stop_two.xml -new file mode 100644 -index 0000000..220dfc2 ---- /dev/null -+++ b/pengine/test10/cloned_stop_two.xml -@@ -0,0 +1,157 @@ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -diff --git a/pengine/test10/stonith-1.dot b/pengine/test10/stonith-1.dot -index d64edcf..3814ac9 100644 ---- a/pengine/test10/stonith-1.dot -+++ b/pengine/test10/stonith-1.dot -@@ -88,8 +88,6 @@ digraph "g" { - "rsc_sles-3_stop_0 sles-3" -> "rsc_sles-3_start_0 sles-4" [ style = bold] - "rsc_sles-3_stop_0 sles-3" [ style=bold color="green" fontcolor="orange" ] - "rsc_sles-4_monitor_5000 sles-4" [ style=bold color="green" fontcolor="black" ] --"stonith 'reboot' sles-3" -> "DoFencing_stop_0" [ style = bold] --"stonith 'reboot' sles-3" -> "child_DoFencing:2_stop_0 sles-3" [ style = bold] - "stonith 'reboot' sles-3" -> "master_rsc_1_stop_0" [ style = bold] - "stonith 'reboot' sles-3" -> "migrator_stop_0 sles-3" [ style = bold] - "stonith 'reboot' sles-3" -> "ocf_msdummy:2_stop_0 sles-3" [ style = bold] -diff --git a/pengine/test10/stonith-1.exp b/pengine/test10/stonith-1.exp -index 4d58afa..40b22cb 100644 ---- a/pengine/test10/stonith-1.exp -+++ b/pengine/test10/stonith-1.exp -@@ -210,9 +210,6 @@ - - - -- -- -- -
- - -@@ -236,11 +233,7 @@ - - - -- -- -- -- -- -+ - - - -diff --git a/pengine/test10/stonith-1.summary b/pengine/test10/stonith-1.summary -index ef904fe..e99bb5e 100644 ---- a/pengine/test10/stonith-1.summary -+++ b/pengine/test10/stonith-1.summary -@@ -45,20 +45,22 @@ Executing cluster transition: - * Resource action: lsb_dummy monitor=5000 on sles-2 - * Resource action: rsc_sles-2 monitor=5000 on sles-2 - * Resource action: rsc_sles-4 monitor=5000 on sles-4 -+ * Pseudo action: DoFencing_stop_0 - * Fencing sles-3 (reboot) - * Pseudo action: stonith_complete - * Resource action: r192.168.100.183 start on sles-1 - * Pseudo action: migrator_stop_0 - * Pseudo action: rsc_sles-3_stop_0 -- * Pseudo action: DoFencing_stop_0 -+ * Pseudo action: child_DoFencing:2_stop_0 -+ * Pseudo action: DoFencing_stopped_0 -+ * Pseudo action: DoFencing_start_0 - * Pseudo action: master_rsc_1_stop_0 - * Pseudo action: group-1_running_0 - * Resource action: r192.168.100.183 monitor=5000 on sles-1 - * Resource action: migrator start on sles-4 - * Resource action: rsc_sles-3 start on sles-4 -- * Pseudo action: child_DoFencing:2_stop_0 -- * Pseudo action: DoFencing_stopped_0 -- * Pseudo action: DoFencing_start_0 -+ * Resource action: child_DoFencing:2 start on sles-4 -+ * Pseudo action: DoFencing_running_0 - * Pseudo action: ocf_msdummy:2_stop_0 - * Pseudo action: ocf_msdummy:5_stop_0 - * Pseudo action: master_rsc_1_stopped_0 -@@ -66,8 +68,7 @@ Executing cluster transition: - * Pseudo action: all_stopped - * Resource action: migrator monitor=10000 on sles-4 - * Resource action: rsc_sles-3 monitor=5000 on sles-4 -- * Resource action: child_DoFencing:2 start on sles-4 -- * Pseudo action: DoFencing_running_0 -+ * Resource action: child_DoFencing:2 monitor=60000 on sles-4 - * Resource action: ocf_msdummy:0 start on sles-4 - * Resource action: ocf_msdummy:1 start on sles-1 - * Resource action: ocf_msdummy:2 start on sles-2 -@@ -75,7 +76,6 @@ Executing cluster transition: - * Resource action: ocf_msdummy:4 start on sles-1 - * Resource action: ocf_msdummy:5 start on sles-2 - * Pseudo action: master_rsc_1_running_0 -- * Resource action: child_DoFencing:2 monitor=60000 on sles-4 - * Resource action: ocf_msdummy:0 monitor=5000 on sles-4 - * Resource action: ocf_msdummy:1 monitor=5000 on sles-1 - * Resource action: ocf_msdummy:2 monitor=5000 on sles-2 -diff --git a/pengine/test10/ticket-master-21.dot b/pengine/test10/ticket-master-21.dot -index 60386a8..3f94948 100644 ---- a/pengine/test10/ticket-master-21.dot -+++ b/pengine/test10/ticket-master-21.dot -@@ -23,7 +23,6 @@ digraph "g" { - "stonith 'reboot' node1" -> "ms1_stop_0" [ style = bold] - "stonith 'reboot' node1" -> "rsc1:1_demote_0 node1" [ style = bold] - "stonith 'reboot' node1" -> "rsc1:1_stop_0 node1" [ style = bold] --"stonith 'reboot' node1" -> "rsc_stonith_stop_0 node1" [ style = bold] - "stonith 'reboot' node1" -> "stonith_complete" [ style = bold] - "stonith 'reboot' node1" [ style=bold color="green" fontcolor="black"] - "stonith_complete" -> "all_stopped" [ style = bold] -diff --git a/pengine/test10/ticket-master-21.exp b/pengine/test10/ticket-master-21.exp -index cc8df2f..c32bac5 100644 ---- a/pengine/test10/ticket-master-21.exp -+++ b/pengine/test10/ticket-master-21.exp -@@ -18,11 +18,7 @@ - - - -- -- -- -- -- -+ - - - -diff --git a/pengine/test10/ticket-master-21.summary b/pengine/test10/ticket-master-21.summary -index 64a9cbe..b228696 100644 ---- a/pengine/test10/ticket-master-21.summary -+++ b/pengine/test10/ticket-master-21.summary -@@ -12,14 +12,14 @@ Transition Summary: - * Demote rsc1:0 (Master -> Stopped node1) - - Executing cluster transition: -+ * Pseudo action: rsc_stonith_stop_0 - * Pseudo action: ms1_demote_0 - * Fencing node1 (reboot) - * Pseudo action: stonith_complete -- * Pseudo action: rsc_stonith_stop_0 -+ * Resource action: rsc_stonith start on node2 - * Pseudo action: rsc1:1_demote_0 - * Pseudo action: ms1_demoted_0 - * Pseudo action: ms1_stop_0 -- * Resource action: rsc_stonith start on node2 - * Pseudo action: rsc1:1_stop_0 - * Pseudo action: ms1_stopped_0 - * Pseudo action: all_stopped -diff --git a/pengine/test10/ticket-master-9.dot b/pengine/test10/ticket-master-9.dot -index 3a29836..c648feb 100644 ---- a/pengine/test10/ticket-master-9.dot -+++ b/pengine/test10/ticket-master-9.dot -@@ -23,7 +23,6 @@ digraph "g" { - "stonith 'reboot' node1" -> "ms1_stop_0" [ style = bold] - "stonith 'reboot' node1" -> "rsc1:1_demote_0 node1" [ style = bold] - "stonith 'reboot' node1" -> "rsc1:1_stop_0 node1" [ style = bold] --"stonith 'reboot' node1" -> "rsc_stonith_stop_0 node1" [ style = bold] - "stonith 'reboot' node1" -> "stonith_complete" [ style = bold] - "stonith 'reboot' node1" [ style=bold color="green" fontcolor="black"] - "stonith_complete" -> "all_stopped" [ style = bold] -diff --git a/pengine/test10/ticket-master-9.exp b/pengine/test10/ticket-master-9.exp -index cc8df2f..c32bac5 100644 ---- a/pengine/test10/ticket-master-9.exp -+++ b/pengine/test10/ticket-master-9.exp -@@ -18,11 +18,7 @@ - - - -- -- -- -- -- -+ - - - -diff --git a/pengine/test10/ticket-master-9.summary b/pengine/test10/ticket-master-9.summary -index 64a9cbe..b228696 100644 ---- a/pengine/test10/ticket-master-9.summary -+++ b/pengine/test10/ticket-master-9.summary -@@ -12,14 +12,14 @@ Transition Summary: - * Demote rsc1:0 (Master -> Stopped node1) - - Executing cluster transition: -+ * Pseudo action: rsc_stonith_stop_0 - * Pseudo action: ms1_demote_0 - * Fencing node1 (reboot) - * Pseudo action: stonith_complete -- * Pseudo action: rsc_stonith_stop_0 -+ * Resource action: rsc_stonith start on node2 - * Pseudo action: rsc1:1_demote_0 - * Pseudo action: ms1_demoted_0 - * Pseudo action: ms1_stop_0 -- * Resource action: rsc_stonith start on node2 - * Pseudo action: rsc1:1_stop_0 - * Pseudo action: ms1_stopped_0 - * Pseudo action: all_stopped -diff --git a/pengine/test10/whitebox-imply-stop-on-fence.dot b/pengine/test10/whitebox-imply-stop-on-fence.dot -index 66700b8..b3fd40b 100644 ---- a/pengine/test10/whitebox-imply-stop-on-fence.dot -+++ b/pengine/test10/whitebox-imply-stop-on-fence.dot -@@ -69,7 +69,6 @@ - "stonith 'reboot' kiff-01" -> "clvmd_stop_0 kiff-01" [ style = bold] - "stonith 'reboot' kiff-01" -> "dlm-clone_stop_0" [ style = bold] - "stonith 'reboot' kiff-01" -> "dlm_stop_0 kiff-01" [ style = bold] --"stonith 'reboot' kiff-01" -> "fence-kiff-02_stop_0 kiff-01" [ style = bold] - "stonith 'reboot' kiff-01" -> "lxc-01_kiff-01_stop_0 kiff-01" [ style = bold] - "stonith 'reboot' kiff-01" -> "lxc-02_kiff-01_stop_0 kiff-01" [ style = bold] - "stonith 'reboot' kiff-01" -> "shared0-clone_stop_0" [ style = bold] -diff --git a/pengine/test10/whitebox-imply-stop-on-fence.exp b/pengine/test10/whitebox-imply-stop-on-fence.exp -index d13c25f..4a3e757 100644 ---- a/pengine/test10/whitebox-imply-stop-on-fence.exp -+++ b/pengine/test10/whitebox-imply-stop-on-fence.exp -@@ -31,11 +31,7 @@ - - - -- -- -- -- -- -+ - - - -diff --git a/pengine/test10/whitebox-imply-stop-on-fence.summary b/pengine/test10/whitebox-imply-stop-on-fence.summary -index 3bb1572..3ee9570 100644 ---- a/pengine/test10/whitebox-imply-stop-on-fence.summary -+++ b/pengine/test10/whitebox-imply-stop-on-fence.summary -@@ -36,16 +36,16 @@ Transition Summary: - * Move lxc-02_kiff-01 (Started kiff-01 -> kiff-02) - - Executing cluster transition: -+ * Pseudo action: fence-kiff-02_stop_0 - * Fencing kiff-01 (reboot) - * Pseudo action: stonith_complete -- * Pseudo action: fence-kiff-02_stop_0 -+ * Resource action: fence-kiff-02 start on kiff-02 - * Pseudo action: vm-fs_stop_0 - * Pseudo action: lxc-01_kiff-01_stop_0 - * Pseudo action: lxc-02_kiff-01_stop_0 -- * Resource action: fence-kiff-02 start on kiff-02 -+ * Resource action: fence-kiff-02 monitor=60000 on kiff-02 - * Pseudo action: R-lxc-01_kiff-01_stop_0 - * Pseudo action: R-lxc-02_kiff-01_stop_0 -- * Resource action: fence-kiff-02 monitor=60000 on kiff-02 - * Pseudo action: shared0-clone_stop_0 - * Resource action: R-lxc-01_kiff-01 start on kiff-02 - * Resource action: R-lxc-02_kiff-01 start on kiff-02 diff --git a/SPECS/pacemaker.spec b/SPECS/pacemaker.spec index 72df431..b802f50 100644 --- a/SPECS/pacemaker.spec +++ b/SPECS/pacemaker.spec @@ -2,9 +2,18 @@ %global uname hacluster %global pcmk_docdir %{_docdir}/%{name} -%global specversion 10 -%global commit 44eb2ddf8d4f8fc05256aae2abc9fbf3ae4d1fbc -%global shortcommit %(c=%{commit}; echo ${c:0:7}) +%global specversion 11 +%global pcmkversion 1.1.15 +# set following to the actual commit or, for final release, concatenate +# "pcmkversion" macro to "Pacemaker-" (will yield a tag per the convention) +%global commit e174ec84857e087210b9dacee3318f8203176129 +%global lparen ( +%global rparen ) +%global shortcommit %(c=%{commit}; case ${c} in + Pacemaker-*%{rparen} echo ${c:10};; + *%{rparen} echo ${c:0:7};; esac) +%global pre_release %(s=%{shortcommit}; [ ${s: -4:3} != -rc ]; echo $?) +%global post_release %([ %{commit} = Pacemaker-%{shortcommit} ]; echo $?) %global github_owner ClusterLabs %global nagios_hash 105ab8a @@ -15,7 +24,20 @@ %global rawhide %(test ! -e /etc/yum.repos.d/fedora-rawhide.repo; echo $?) %global cs_version %(pkg-config corosync --modversion | awk -F . '{print $1}') -%global py_site %(python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))") +%global py_site %{?python_sitearch}%{!?python_sitearch:%(python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")} +%global cman_native 0%{?el6} || (0%{?fedora} > 0 && 0%{?fedora} < 17) + +# It's desired to apply "license" macro uniformly in "files" sections below, +# but RPM versions not aware of this new classification normally (re)define it +# to the value of "License:", so following is to ensure the macro definition +# is per expectation only after that tag; solution courtesy of Jason Tibbitts: +# https://pkgs.fedoraproject.org/cgit/rpms/epel-rpm-macros.git/tree/macros.zzz-epel?h=el6&id=e1adcb77b0c05a6c29bc0eb0c4e82113180a0a99#n12 +%if !%{defined _licensedir} +%define description %{lua: + rpm.define("license %doc") + print("%description") +} +%endif # Conditionals # Invoke "rpmbuild --without " or "rpmbuild --with " @@ -36,102 +58,62 @@ # Use a different versioning scheme %bcond_with pre_release -# Ship an Upstart job file -%bcond_with upstart_job - -# Turn off cman support on platforms that normally ship with it +# Provide rpm build option to enable CMAN support (off by default in RHEL 7) %bcond_with cman +# Turn off hardening of libraries and daemon executables +%bcond_without hardening + %if %{with profiling} # This disables -debuginfo package creation and also the stripping binaries/libraries # Useful if you want sane profiling data %global debug_package %{nil} %endif -%if %{with pre_release} +%if %{with pre_release} || 0%{pre_release} +%if 0%{pre_release} +%global pcmk_release 0.%{specversion}.%(s=%{shortcommit}; echo ${s: -3}) +%else %global pcmk_release 0.%{specversion}.%{shortcommit}.git +%endif %else %global pcmk_release %{specversion} %endif Name: pacemaker Summary: Scalable High-Availability cluster resource manager -Version: 1.1.13 -Release: %{pcmk_release}%{?dist}.4 +Version: %{pcmkversion} +Release: %{pcmk_release}%{?dist} +%if %{defined _unitdir} License: GPLv2+ and LGPLv2+ +%else +# initscript is Revised BSD +License: GPLv2+ and LGPLv2+ and BSD +%endif Url: http://www.clusterlabs.org Group: System Environment/Daemons -# eg. https://github.com/ClusterLabs/pacemaker/archive/8ae45302394b039fb098e150f156df29fc0cb576/pacemaker-8ae45302394b039fb098e150f156df29fc0cb576.tar.gz -Source0: https://github.com/%{github_owner}/%{name}/archive/%{commit}/%{name}-%{commit}.tar.gz +# eg. https://github.com/ClusterLabs/pacemaker/archive/8ae45302394b039fb098e150f156df29fc0cb576/pacemaker-8ae4530.tar.gz +Source0: https://github.com/%{github_owner}/%{name}/archive/%{commit}/%{name}-%{shortcommit}.tar.gz Source1: nagios-agents-metadata-%{nagios_hash}.tar.gz -Patch1: pacemaker-63f8e9a-rollup.patch -Patch2: pacemaker-rollup-7-1-3d781d3.patch -Patch3: pacemaker-rollup-3a7715d.patch -Patch4: 0004-Fix-crm_resource-Correctly-check-if-a-resource-is-un.patch -Patch5: 0005-Fix-PE-Bug-cl-5247-Imply-resources-running-on-a-cont.patch -Patch6: 0006-Fix-Date-Correctly-set-time-from-seconds-since-epoch.patch -Patch7: 0007-Test-PE-Bug-cl-5247-Imply-resources-running-on-a-con.patch -Patch8: 0008-Fix-tools-memory-leak-in-crm_resource.patch -Patch9: 0009-Fix-pengine-The-failed-action-of-the-resource-that-o.patch -Patch10: 0010-Log-services-Reduce-severity-of-noisy-log-messages.patch -Patch11: 0011-Fix-xml-Mark-xml-nodes-as-dirty-if-any-children-move.patch -Patch12: 0012-Feature-crmd-Implement-reliable-event-notifications.patch -Patch13: 0013-Fix-cman-Suppress-implied-node-names.patch -Patch14: 0014-Fix-crmd-Choose-more-appropriate-names-for-notificat.patch -Patch15: 0015-Fix-crmd-Correctly-enable-disable-notifications.patch -Patch16: 0016-Fix-crmd-Report-the-completion-status-and-output-of-.patch -Patch17: 0017-Fix-cman-Print-the-nodeid-of-nodes-with-fake-names.patch -Patch18: 0018-Refactor-Tools-Isolate-the-paths-which-truely-requir.patch -Patch19: 0019-Fix-corosync-Display-node-state-and-quorum-data-if-a.patch -Patch20: 0020-Fix-pacemakerd-Do-not-forget-about-nodes-that-leave-.patch -Patch21: 0021-Fix-pacemakerd-Track-node-state-in-pacemakerd.patch -Patch22: 0022-Fix-PE-Resolve-memory-leak.patch -Patch23: 0023-Fix-cman-Purge-all-node-caches-for-crm_node-R.patch -Patch24: 0024-Refactor-membership-Safely-autoreap-nodes-without-co.patch -Patch25: 0025-Fix-crmd-Prevent-segfault-by-correctly-detecting-whe.patch -Patch26: 0026-Fix-crmd-don-t-add-node-ID-to-proxied-remote-node-re.patch -Patch27: 0027-Fix-pacemaker_remote-memory-leak-in-ipc_proxy_dispat.patch -Patch28: 0028-Log-The-package-version-is-more-informative.patch -Patch29: 0029-Fix-crm_resource-Allow-the-resource-configuration-to.patch -Patch30: 0030-Log-lrmd-Improved-logging-when-no-pacemaker-remote-a.patch -Patch31: 0031-Fix-liblrmd-don-t-print-error-if-remote-key-environm.patch -Patch32: 0032-Fix-Tools-Repair-the-logging-of-interesting-command-.patch -Patch33: 0033-Feature-Tools-Do-not-send-command-lines-to-syslog.patch -Patch34: 0034-Log-cibadmin-Default-once-again-to-LOG_CRIT.patch -Patch35: 0035-Fix-crm_resource-Correctly-update-existing-meta-attr.patch -Patch36: 0036-Log-crm_resource-restart-Improved-user-feedback-on-f.patch -Patch37: 0037-Fix-crm_resource-Correctly-delete-existing-meta-attr.patch -Patch38: 0038-Fix-crm_resource-Correctly-observe-force-when-deleti.patch -Patch39: 0039-prevent-segfault-when-logging.patch -Patch40: 0040-update-top-format-in-HealthCPU.patch -Patch41: 0041-delete-fence-attributes-correctly.patch -Patch42: 0042-handle-systemd-shutdown.patch -Patch43: 0043-cts-fix-for-command-lines.patch - -# graceful pacemaker_remote stops -Patch100: 0100-Refactor-lrmd-handle-shutdown-a-little-more-cleanly.patch -Patch101: 0101-Refactor-lrmd-make-proxied-IPC-providers-clients-opa.patch -Patch102: 0102-Refactor-crmd-lrmd-liblrmd-use-defined-constants-for.patch -Patch103: 0103-Test-cts-simulate-pacemaker_remote-failure-with-kill.patch -Patch104: 0104-Feature-lrmd-liblrmd-add-lrmd-IPC-operations-for-req.patch -Patch105: 0105-Feature-crmd-support-graceful-pacemaker_remote-stops.patch -Patch106: 0106-Feature-pacemaker_remote-support-graceful-stops.patch -Patch107: 0107-Feature-PE-Honor-the-shutdown-transient-attributes-f.patch -Patch108: 0108-Feature-crmd-Set-the-shutdown-transient-attribute-in.patch -Patch109: 0109-Fix-attrd-Hook-up-the-client-name-so-we-can-track-re.patch -Patch111: 0111-Log-crmd-Graceful-proxy-shutdown-is-now-tested.patch -Patch112: 0112-Fix-crmd-set-remote-flag.patch -Patch113: 0113-Fix-attrd-correct-peer-cache.patch -Patch114: 0114-clear-remote-node-transient.patch - -# later patches -Patch115: 0115-crm_resource-restart-fixes.patch -Patch116: 0116-unrunnable-clones.patch +Patch1: 001-makefile-cleanup.patch +Patch2: 002-build-cleanup.patch +Patch3: 003-harden-toolchain.patch +Patch4: 004-bz1290592.patch +Patch5: 005-avoid-null-dereference.patch +Patch6: 006-alert-snmp-quoting.patch +Patch7: 007-cib-callback-unregistration.patch +Patch8: 008-crm_mon-headings.patch +Patch9: 009-crm_mon-schema.patch +Patch10: 010-memory-checks.patch +Patch11: 011-resend-shutdown.patch +Patch12: 012-invalid-config-loop.patch +Patch13: 013-clear-remote-history.patch +Patch14: 014-crm_report.patch BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) AutoReqProv: on -Requires: python >= 2.4 +Requires: python >= 2.6 Requires: resource-agents Requires: %{name}-libs = %{version}-%{release} Requires: %{name}-cluster-libs = %{version}-%{release} @@ -148,10 +130,10 @@ Provides: pcmk-cluster-manager ExclusiveArch: i686 x86_64 s390x %endif -# Required for core functionality -BuildRequires: automake autoconf libtool pkgconfig python libtool-ltdl-devel -BuildRequires: glib2-devel libxml2-devel libxslt-devel libuuid-devel -BuildRequires: pkgconfig python-devel gcc-c++ bzip2-devel pam-devel +# Required for core functionality (python-devel depends on python) +BuildRequires: automake autoconf libtool pkgconfig libtool-ltdl-devel +BuildRequires: pkgconfig(glib-2.0) libxml2-devel libxslt-devel libuuid-devel +BuildRequires: python-devel bzip2-devel pam-devel # Required for agent_config.h which specifies the correct scratch directory BuildRequires: resource-agents @@ -160,28 +142,22 @@ BuildRequires: resource-agents BuildRequires: libqb-devel > 0.17.0 Requires: libqb > 0.17.0 +# RH patches are created by git, so we need git to apply them +BuildRequires: git + # Enables optional functionality BuildRequires: ncurses-devel openssl-devel libselinux-devel docbook-style-xsl -BuildRequires: bison byacc flex help2man gnutls-devel dbus-devel +BuildRequires: bison byacc flex help2man gnutls-devel pkgconfig(dbus-1) %if %{defined _unitdir} BuildRequires: systemd-devel %endif -%if %{with cman} - -%if 0%{?fedora} > 0 -%if 0%{?fedora} < 17 -BuildRequires: clusterlib-devel -%endif -%endif - -%if 0%{?rhel} > 0 -%if 0%{?rhel} < 7 +%if %{with cman} && %{cman_native} BuildRequires: clusterlib-devel -%endif -%endif - +# pacemaker initscript: cman initscript, fence_tool (+ some soft-dependencies) +# "post" scriptlet: ccs_update_schema +Requires: cman %endif Requires: corosync @@ -214,14 +190,14 @@ when related resources fail and can be configured to periodically check resource health. Available rpmbuild rebuild options: - --with(out) : cman stonithd doc coverage profiling pre_release upstart_job + --with(out) : cman stonithd doc coverage profiling pre_release hardening %package cli -License: GPLv2+ and LGPLv2+ -Summary: Command line tools for controlling Pacemaker clusters -Group: System Environment/Daemons -Requires: %{name}-libs = %{version}-%{release} -Requires: perl-TimeDate +License: GPLv2+ and LGPLv2+ +Summary: Command line tools for controlling Pacemaker clusters +Group: System Environment/Daemons +Requires: %{name}-libs = %{version}-%{release} +Requires: perl-TimeDate %description cli Pacemaker is an advanced, scalable High-Availability cluster resource @@ -232,9 +208,9 @@ to query and control the cluster from machines that may, or may not, be part of the cluster. %package -n %{name}-libs -License: GPLv2+ and LGPLv2+ -Summary: Core Pacemaker libraries -Group: System Environment/Daemons +License: GPLv2+ and LGPLv2+ +Summary: Core Pacemaker libraries +Group: System Environment/Daemons %description -n %{name}-libs Pacemaker is an advanced, scalable High-Availability cluster resource @@ -244,10 +220,10 @@ The %{name}-libs package contains shared libraries needed for cluster nodes and those just running the CLI tools. %package -n %{name}-cluster-libs -License: GPLv2+ and LGPLv2+ -Summary: Cluster Libraries used by Pacemaker -Group: System Environment/Daemons -Requires: %{name}-libs = %{version}-%{release} +License: GPLv2+ and LGPLv2+ +Summary: Cluster Libraries used by Pacemaker +Group: System Environment/Daemons +Requires: %{name}-libs = %{version}-%{release} %description -n %{name}-cluster-libs Pacemaker is an advanced, scalable High-Availability cluster resource @@ -257,13 +233,18 @@ The %{name}-cluster-libs package contains cluster-aware shared libraries needed for nodes that will form part of the cluster nodes. %package remote -License: GPLv2+ and LGPLv2+ -Summary: Pacemaker remote daemon for non-cluster nodes -Group: System Environment/Daemons -Requires: %{name}-libs = %{version}-%{release} -Requires: %{name}-cli = %{version}-%{release} -Requires: resource-agents -Provides: pcmk-cluster-manager +%if %{defined _unitdir} +License: GPLv2+ and LGPLv2+ +%else +# initscript is Revised BSD +License: GPLv2+ and LGPLv2+ and BSD +%endif +Summary: Pacemaker remote daemon for non-cluster nodes +Group: System Environment/Daemons +Requires: %{name}-libs = %{version}-%{release} +Requires: %{name}-cli = %{version}-%{release} +Requires: resource-agents +Provides: pcmk-cluster-manager %if %{defined systemd_requires} %systemd_requires %endif @@ -277,15 +258,15 @@ which is capable of extending pacemaker functionality to remote nodes not running the full corosync/cluster stack. %package -n %{name}-libs-devel -License: GPLv2+ and LGPLv2+ -Summary: Pacemaker development package -Group: Development/Libraries -Requires: %{name}-cts = %{version}-%{release} -Requires: %{name}-libs = %{version}-%{release} -Requires: %{name}-cluster-libs = %{version}-%{release} -Requires: libtool-ltdl-devel libqb-devel libuuid-devel -Requires: libxml2-devel libxslt-devel bzip2-devel glib2-devel -Requires: corosynclib-devel +License: GPLv2+ and LGPLv2+ +Summary: Pacemaker development package +Group: Development/Libraries +Requires: %{name}-cts = %{version}-%{release} +Requires: %{name}-libs = %{version}-%{release} +Requires: %{name}-cluster-libs = %{version}-%{release} +Requires: libtool-ltdl-devel libqb-devel libuuid-devel +Requires: libxml2-devel libxslt-devel bzip2-devel glib2-devel +Requires: corosynclib-devel %description -n %{name}-libs-devel Pacemaker is an advanced, scalable High-Availability cluster resource @@ -294,31 +275,44 @@ manager for Corosync, CMAN and/or Linux-HA. The %{name}-libs-devel package contains headers and shared libraries for developing tools for Pacemaker. -%package cts -License: GPLv2+ and LGPLv2+ -Summary: Test framework for cluster-related technologies like Pacemaker -Group: System Environment/Daemons -Requires: python -Requires: %{name}-libs = %{version}-%{release} +%package cts +License: GPLv2+ and LGPLv2+ +Summary: Test framework for cluster-related technologies like Pacemaker +Group: System Environment/Daemons +Requires: python +Requires: %{name}-libs = %{version}-%{release} + +# systemd python bindings are separate package in some distros +%if %{defined systemd_requires} + +%if 0%{?fedora} > 20 +Requires: systemd-python +%endif + +%if 0%{?rhel} > 6 +Requires: systemd-python +%endif + +%endif -%description cts +%description cts Test framework for cluster-related technologies like Pacemaker -%package doc -License: GPLv2+ and LGPLv2+ -Summary: Documentation for Pacemaker -Group: Documentation +%package doc +License: GPLv2+ and LGPLv2+ +Summary: Documentation for Pacemaker +Group: Documentation -%description doc +%description doc Documentation for Pacemaker. Pacemaker is an advanced, scalable High-Availability cluster resource manager for Corosync, CMAN and/or Linux-HA. -%package nagios-plugins-metadata -License: GPLv2+ and LGPLv2+ -Summary: Pacemaker Nagios Metadata -Group: System Environment/Daemons +%package nagios-plugins-metadata +License: GPLv2+ and LGPLv2+ +Summary: Pacemaker Nagios Metadata +Group: System Environment/Daemons # NOTE below are the plugins this metadata uses. # These plugin packages are currently not requirements # for the nagios metadata because rhel does not ship these @@ -327,47 +321,62 @@ Group: System Environment/Daemons # rpm packages, or source. If rhel ships the nagios plugins # in the future, we should consider enabling the following # required fields. -#Requires: nagios-plugins-http -#Requires: nagios-plugins-ldap -#Requires: nagios-plugins-mysql -#Requires: nagios-plugins-pgsql -#Requires: nagios-plugins-tcp -Requires: pcmk-cluster-manager - -%description nagios-plugins-metadata +#Requires: nagios-plugins-http +#Requires: nagios-plugins-ldap +#Requires: nagios-plugins-mysql +#Requires: nagios-plugins-pgsql +#Requires: nagios-plugins-tcp +Requires: pcmk-cluster-manager + +%description nagios-plugins-metadata The metadata files required for Pacemaker to execute the nagios plugin monitor resources. %prep -%setup -q -a 0 -n %{name}-%{commit} -%setup -q -a 1 -n %{name}-%{commit} -%autopatch -p1 +%autosetup -a 1 -n %{name}-%{commit} -S git_am -p 1 # Force the local time # -# Git sets the file date to the date of the last commit. +# 'git' sets the file date to the date of the last commit. # This can result in files having been created in the future # when building on machines in timezones 'behind' the one the # commit occurred in - which seriously confuses 'make' find . -exec touch \{\} \; %build -./autogen.sh -# For some reason, 'Apache Configuration' is no longer accepted by publican/Kate.pm on RHEL7 -sed -i 's/Apache Configuration/Bash/' doc/Clusters_from_Scratch/en-US/Ch-Apache.txt +export CPPFLAGS="-DRHEL7_COMPAT" + +# Early versions of autotools (e.g. RHEL <= 5) do not support --docdir +export docdir=%{pcmk_docdir} + +%if %{with hardening} +# prefer distro-provided hardening flags in case they are defined +# through _hardening_{c,ld}flags macros, configure script will +# use its own defaults otherwise; if such hardenings are completely +# undesired, rpmbuild using "--without hardening" +# (or "--define '_without_hardening 1'") +export CFLAGS_HARDENED_EXE="%{?_hardening_cflags}" +export CFLAGS_HARDENED_LIB="%{?_hardening_cflags}" +export LDFLAGS_HARDENED_EXE="%{?_hardening_ldflags}" +export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}" +%endif + +./autogen.sh -# RHEL <= 5 does not support --docdir -docdir=%{pcmk_docdir} %{configure} \ +%{configure} \ %{?with_profiling: --with-profiling} \ %{?with_coverage: --with-coverage} \ %{!?with_cman: --without-cman} \ + --without-heartbeat \ + %{!?with_doc: --with-brand=} \ + %{!?with_hardening: --disable-hardening} \ --with-initdir=%{_initrddir} \ --localstatedir=%{_var} \ - --with-version=%{version}-%{release} \ --with-nagios \ --with-nagios-metadata-dir=%{_datadir}/pacemaker/nagios/plugins-metadata/ \ --with-nagios-plugin-dir=%{_libdir}/nagios/plugins/ \ + --with-version=%{version}-%{release} %if 0%{?suse_version} >= 1200 # Fedora handles rpath removal automagically @@ -375,7 +384,7 @@ sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool %endif -make %{_smp_mflags} V=1 docdir=%{pcmk_docdir} all +make %{_smp_mflags} V=1 all %check # Prevent false positives in rpmlint @@ -386,25 +395,16 @@ rm -rf %{buildroot} make DESTDIR=%{buildroot} docdir=%{pcmk_docdir} V=1 install mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig -mkdir -p ${RPM_BUILD_ROOT}%{_var}/lib/pacemaker/cores install -m 644 mcp/pacemaker.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/pacemaker +install -m 644 tools/crm_mon.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/crm_mon mkdir -p %{buildroot}%{_datadir}/pacemaker/nagios/plugins-metadata for file in $(find nagios-agents-metadata-%{nagios_hash}/metadata -type f); do - # fping isn't shipped in rhel - if [ "$file" = "check_fping.xml" ]; then - continue - # udp plugin is not being shipped in rhel - elif [ "$file" = "check_udp.xml" ]; then - continue - fi - install -m 644 $file %{buildroot}%{_datadir}/pacemaker/nagios/plugins-metadata + install -m 644 $file %{buildroot}%{_datadir}/pacemaker/nagios/plugins-metadata done -%if %{with upstart_job} -mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/init -install -m 644 mcp/pacemaker.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.conf -install -m 644 mcp/pacemaker.combined.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.combined.conf +%if %{defined _unitdir} +mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/lib/rpm-state/%{name} %endif # Scripts that should be executable @@ -412,11 +412,8 @@ chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/CTSlab.py # These are not actually scripts find %{buildroot} -name '*.xml' -type f -print0 | xargs -0 chmod a-x -find %{buildroot} -name '*.xsl' -type f -print0 | xargs -0 chmod a-x -find %{buildroot} -name '*.rng' -type f -print0 | xargs -0 chmod a-x -find %{buildroot} -name '*.dtd' -type f -print0 | xargs -0 chmod a-x -# Dont package static libs +# Don't package static libs find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f @@ -424,7 +421,7 @@ find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f rm -f %{buildroot}/%{_libdir}/service_crm.so rm -f %{buildroot}/%{_sbindir}/fence_legacy rm -f %{buildroot}/%{_mandir}/man8/fence_legacy.* -find %{buildroot} -name 'o2cb*' -type f -print0 | xargs -0 rm -f +find %{buildroot} -name '*o2cb*' -type f -print0 | xargs -0 rm -f # Don't ship init scripts for systemd based platforms %if %{defined _unitdir} @@ -432,6 +429,11 @@ rm -f %{buildroot}/%{_initrddir}/pacemaker rm -f %{buildroot}/%{_initrddir}/pacemaker_remote %endif +# Don't ship fence_pcmk where it has no use +%if %{without cman} +rm -f %{buildroot}/%{_sbindir}/fence_pcmk +%endif + %if %{with coverage} GCOV_BASE=%{buildroot}/%{_var}/lib/pacemaker/gcov mkdir -p $GCOV_BASE @@ -445,6 +447,8 @@ done %clean rm -rf %{buildroot} +%if %{defined _unitdir} + %post %systemd_post pacemaker.service @@ -452,7 +456,17 @@ rm -rf %{buildroot} %systemd_preun pacemaker.service %postun -%systemd_postun_with_restart pacemaker.service +%systemd_postun_with_restart pacemaker.service + +%pre remote +systemctl --quiet is-active pacemaker_remote +if [ $? -eq 0 ] ; then + mkdir -p %{_localstatedir}/lib/rpm-state/%{name} + touch %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote + systemctl stop pacemaker_remote >/dev/null 2>&1 +else + rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote +fi %post remote %systemd_post pacemaker_remote.service @@ -461,7 +475,55 @@ rm -rf %{buildroot} %systemd_preun pacemaker_remote.service %postun remote -%systemd_postun_with_restart pacemaker_remote.service +%systemd_postun_with_restart pacemaker_remote.service +if [ $1 -eq 0 ] ; then + rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote +fi + +%posttrans remote +if [ -e %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote ] ; then + systemctl start pacemaker_remote >/dev/null 2>&1 + rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote +fi + +%post cli +%systemd_post crm_mon.service + +%preun cli +%systemd_preun crm_mon.service + +%postun cli +%systemd_postun_with_restart crm_mon.service + +%else + +%post +/sbin/chkconfig --add pacemaker || : +%if %{with cman} && %{cman_native} +# make fence_pcmk in cluster.conf valid instantly otherwise tools like ccs may +# choke (until schema gets auto-regenerated on the next start of cluster), +# per the protocol shared with other packages contributing to cluster.rng +/usr/sbin/ccs_update_schema >/dev/null 2>&1 || : +%endif + +%preun +/sbin/service pacemaker stop || : +if [ $1 -eq 0 ]; then + # Package removal, not upgrade + /sbin/chkconfig --del pacemaker || : +fi + +%post remote +/sbin/chkconfig --add pacemaker_remote || : + +%preun remote +/sbin/service pacemaker_remote stop &>/dev/null || : +if [ $1 -eq 0 ]; then + # Package removal, not upgrade + /sbin/chkconfig --del pacemaker_remote || : +fi + +%endif %pre -n %{name}-libs @@ -481,9 +543,6 @@ exit 0 ########################################################### %defattr(-,root,root) -%exclude %{_datadir}/pacemaker/tests - -%config(noreplace) %{_sysconfdir}/logrotate.d/pacemaker %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %{_sbindir}/pacemakerd @@ -493,12 +552,6 @@ exit 0 %{_initrddir}/pacemaker %endif -%exclude %{_datadir}/pacemaker/report.common -%exclude %{_datadir}/pacemaker/report.collector -%exclude %{_datadir}/pacemaker/nagios/plugins-metadata/* -%{_datadir}/pacemaker -%{_datadir}/snmp/mibs/PCMK-MIB.txt - %exclude %{_libexecdir}/pacemaker/lrmd_test %exclude %{_sbindir}/pacemaker_remoted %{_libexecdir}/pacemaker/* @@ -506,50 +559,58 @@ exit 0 %{_sbindir}/crm_attribute %{_sbindir}/crm_master %{_sbindir}/crm_node -%{_sbindir}/attrd_updater +%if %{with cman} %{_sbindir}/fence_pcmk +%endif %{_sbindir}/stonith_admin -%if %{with cman} -%{_bindir}/ccs2cib -%{_bindir}/ccs_flatten -%{_bindir}/disable_rgmanager +%doc %{_mandir}/man7/crmd.* +%doc %{_mandir}/man7/pengine.* +%doc %{_mandir}/man7/stonithd.* +%if %{without cman} || !%{cman_native} +%doc %{_mandir}/man7/ocf_pacemaker_controld.* %endif - -%doc %{_mandir}/man7/* -%doc %{_mandir}/man8/attrd_updater.* +%doc %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/crm_attribute.* %doc %{_mandir}/man8/crm_node.* %doc %{_mandir}/man8/crm_master.* +%if %{with cman} %doc %{_mandir}/man8/fence_pcmk.* +%endif %doc %{_mandir}/man8/pacemakerd.* %doc %{_mandir}/man8/stonith_admin.* -%doc COPYING +%doc %{_datadir}/pacemaker/alerts + +%license COPYING %doc AUTHORS %doc ChangeLog -%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib -%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cores %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine -%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/blackbox -%dir /usr/lib/ocf -%dir /usr/lib/ocf/resource.d +%if %{without cman} || !%{cman_native} +/usr/lib/ocf/resource.d/pacemaker/controld +%endif +/usr/lib/ocf/resource.d/pacemaker/remote /usr/lib/ocf/resource.d/.isolation -/usr/lib/ocf/resource.d/pacemaker +%if "%{?cs_version}" != "UNKNOWN" %if 0%{?cs_version} < 2 %{_libexecdir}/lcrso/pacemaker.lcrso %endif - -%if %{with upstart_job} -%config(noreplace) %{_sysconfdir}/init/pacemaker.conf -%config(noreplace) %{_sysconfdir}/init/pacemaker.combined.conf %endif %files cli %defattr(-,root,root) + +%config(noreplace) %{_sysconfdir}/logrotate.d/pacemaker +%config(noreplace) %{_sysconfdir}/sysconfig/crm_mon + +%if %{defined _unitdir} +%{_unitdir}/crm_mon.service +%endif + +%{_sbindir}/attrd_updater %{_sbindir}/cibadmin %{_sbindir}/crm_diff %{_sbindir}/crm_error @@ -564,11 +625,26 @@ exit 0 %{_sbindir}/crm_simulate %{_sbindir}/crm_report %{_sbindir}/crm_ticket -%{_datadir}/pacemaker/report.common -%{_datadir}/pacemaker/report.collector +%exclude %{_datadir}/pacemaker/alerts +%exclude %{_datadir}/pacemaker/tests +%exclude %{_datadir}/pacemaker/nagios +%{_datadir}/pacemaker +%{_datadir}/snmp/mibs/PCMK-MIB.txt + +%exclude /usr/lib/ocf/resource.d/pacemaker/controld +%exclude /usr/lib/ocf/resource.d/pacemaker/remote + +%dir /usr/lib/ocf +%dir /usr/lib/ocf/resource.d +/usr/lib/ocf/resource.d/pacemaker +%doc %{_mandir}/man7/* +%exclude %{_mandir}/man7/crmd.* +%exclude %{_mandir}/man7/pengine.* +%exclude %{_mandir}/man7/stonithd.* +%exclude %{_mandir}/man7/ocf_pacemaker_controld.* +%exclude %{_mandir}/man7/ocf_pacemaker_remote.* %doc %{_mandir}/man8/* -%exclude %{_mandir}/man8/attrd_updater.* %exclude %{_mandir}/man8/crm_attribute.* %exclude %{_mandir}/man8/crm_node.* %exclude %{_mandir}/man8/crm_master.* @@ -577,14 +653,14 @@ exit 0 %exclude %{_mandir}/man8/pacemaker_remoted.* %exclude %{_mandir}/man8/stonith_admin.* -%if %{defined _unitdir} -%{_unitdir}/crm_mon.service -%endif - -%doc COPYING +%license COPYING %doc AUTHORS %doc ChangeLog +%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker +%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/blackbox +%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cores + %files -n %{name}-libs %defattr(-,root,root) @@ -597,13 +673,13 @@ exit 0 %{_libdir}/libpengine.so.* %{_libdir}/libstonithd.so.* %{_libdir}/libtransitioner.so.* -%doc COPYING.LIB +%license COPYING.LIB %doc AUTHORS %files -n %{name}-cluster-libs %defattr(-,root,root) %{_libdir}/libcrmcluster.so.* -%doc COPYING.LIB +%license COPYING.LIB %doc AUTHORS %files remote @@ -611,6 +687,7 @@ exit 0 %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %if %{defined _unitdir} +%ghost %{_localstatedir}/lib/rpm-state/%{name} %{_unitdir}/pacemaker_remote.service %else %{_initrddir}/pacemaker_remote @@ -618,7 +695,7 @@ exit 0 %{_sbindir}/pacemaker_remoted %{_mandir}/man8/pacemaker_remoted.* -%doc COPYING.LIB +%license COPYING %doc AUTHORS %files doc @@ -630,7 +707,7 @@ exit 0 %{py_site}/cts %{_datadir}/pacemaker/tests/cts %{_libexecdir}/pacemaker/lrmd_test -%doc COPYING.LIB +%license COPYING %doc AUTHORS %files -n %{name}-libs-devel @@ -643,7 +720,7 @@ exit 0 %{_var}/lib/pacemaker/gcov %endif %{_libdir}/pkgconfig/*.pc -%doc COPYING.LIB +%license COPYING.LIB %doc AUTHORS %files nagios-plugins-metadata @@ -652,32 +729,103 @@ exit 0 %attr(0644,root,root) %{_datadir}/pacemaker/nagios/plugins-metadata/* %changelog -* Fri Jun 17 2016 Ken Gaillot - 1.1.13-10.4 -- Fix multiple issues with crm_resource --restart -- Properly handle descendents of unrunnable clones -- Resolves: rhbz#1347806 -- Resolves: rhbz#1349493 - -* Fri Jun 10 2016 Ken Gaillot - 1.1.13-10.3 -- Properly clear remote node transient attributes on disconnect -- Resolves: rhbz#1344223 - -* Tue Jan 26 2016 Ken Gaillot - 1.1.13-10.2 -- Properly cache remote nodes when adding node attributes -- Resolves: rhbz#1299348 - -* Mon Jan 18 2016 Ken Gaillot - 1.1.13-10.1 -- Prevent lrmd crash when logging certain systemd operation failures -- Handle systemd shutdown properly -- Don't delete fence device when deleting an attribute -- Handle new top output format in HealthCPU resource -- Implement graceful stopping of pacemaker_remote -- Update CTS to match applied code patches -- Resolves: rhbz#1299339 -- Resolves: rhbz#1299340 -- Resolves: rhbz#1299341 -- Resolves: rhbz#1299342 -- Resolves: rhbz#1299348 +* Thu Sep 22 2016 Ken Gaillot - 1.1.15-11 +- Sanitize readable CIB output collected by crm_report +- Document crm_report --sos-mode option +- Speed up crm_report on Pacemaker Remote nodes +- Avoid sbd fencing when upgrading pacemaker_remote package +- Resolves: rhbz#1219188 +- Resolves: rhbz#1235434 +- Resolves: rhbz#1323544 +- Resolves: rhbz#1372009 + +* Mon Aug 15 2016 Ken Gaillot - 1.1.15-10 +- Only clear remote node operation history on startup +- Resend a lost shutdown request +- Correctly detect and report invalid configurations +- Don't include manual page for resource agent that isn't included +- Resolves: rhbz#1288929 +- Resolves: rhbz#1310486 +- Resolves: rhbz#1352039 + +* Fri Aug 5 2016 Ken Gaillot - 1.1.15-9 +- Make crm_mon XML schema handle multiple-active resources +- Resolves: rhbz#1364500 + +* Wed Aug 3 2016 Ken Gaillot - 1.1.15-8 +- Quote timestamp-format correctly in alert_snmp.sh.sample +- Unregister CIB callbacks correctly +- Print resources section heading consistently in crm_mon output +- Resolves: rhbz#773656 +- Resolves: rhbz#1361533 + +* Tue Jul 26 2016 Ken Gaillot - 1.1.15-7 +- Avoid null dereference +- Resolves: rhbz#1290592 + +* Tue Jul 26 2016 Ken Gaillot - 1.1.15-6 +- Fix transition failure with start-then-stop order constraint + unfencing +- Resolves: rhbz#1290592 + +* Fri Jul 1 2016 Ken Gaillot - 1.1.15-5 +- Update spec file for toolchain hardening +- Resolves: rhbz#1242258 + +* Tue Jun 28 2016 Ken Gaillot - 1.1.15-4 +- Take advantage of toolchain hardening +- Resolves: rhbz#1242258 + +* Wed Jun 22 2016 Ken Gaillot - 1.1.15-3 +- Rebase to upstream e174ec84857e087210b9dacee3318f8203176129 (1.1.15) +- Resolves: rhbz#1304771 + Resolves: rhbz#1303765 + Resolves: rhbz#1327469 + Resolves: rhbz#1337688 + Resolves: rhbz#1345876 + Resolves: rhbz#1346726 + +* Fri Jun 10 2016 Ken Gaillot - 1.1.15-2 +- Rebase to upstream 25920dbdbc7594fc944a963036996f724c63a8b8 (1.1.15-rc4) +- Resolves: rhbz#1304771 + Resolves: rhbz#773656 + Resolves: rhbz#1240330 + Resolves: rhbz#1281450 + Resolves: rhbz#1286316 + Resolves: rhbz#1287315 + Resolves: rhbz#1323544 + +* Tue May 31 2016 Ken Gaillot - 1.1.15-1 +- Rebase to upstream 2c148ac30dfcc2cfb91dc367ed469b6f227a8abc (1.1.15-rc3+) +- Resolves: rhbz#1304771 + Resolves: rhbz#1040685 + Resolves: rhbz#1219188 + Resolves: rhbz#1235434 + Resolves: rhbz#1268313 + Resolves: rhbz#1284069 + Resolves: rhbz#1287868 + Resolves: rhbz#1288929 + Resolves: rhbz#1312094 + Resolves: rhbz#1314157 + Resolves: rhbz#1321711 + Resolves: rhbz#1338623 + +* Thu Feb 18 2016 Ken Gaillot - 1.1.14-11 +- Rebase to upstream 2cccd43d6b7f2525d406251e14ef37626e29c51f (1.1.14+) +- Resolves: rhbz#1304771 + Resolves: rhbz#1207388 + Resolves: rhbz#1240330 + Resolves: rhbz#1281450 + Resolves: rhbz#1284069 + Resolves: rhbz#1286316 + Resolves: rhbz#1287315 + Resolves: rhbz#1287868 + Resolves: rhbz#1288929 + Resolves: rhbz#1303765 +- This also updates the packaging to follow upstream more closely, + most importantly moving some files from the pacemaker package to + pacemaker-cli (including XML schemas, SNMP MIB, attrd_updater command, + most ocf:pacemaker resource agents, and related man pages), + and deploying /etc/sysconfig/crm_mon. * Thu Oct 08 2015 Andrew Beekhof - 1.1.13-10 - More improvements when updating and deleting meta attributes