From a07ff469d96312b37f9c0c5ac65e7c1f87394ce5 Mon Sep 17 00:00:00 2001 From: Andrew Beekhof Date: Fri, 12 Oct 2018 12:13:14 +1100 Subject: [PATCH 1/9] Fix: schedulerd: Improve internal bundle ordering If the remote resource is scheduled to stop, we should at least wait to know the state of the underlying container resource before executing it. Otherwise we may end up needlessly tearing it down just because someone ran a cleanup on the container. --- pengine/container.c | 1 + 1 file changed, 1 insertion(+) diff --git a/pengine/container.c b/pengine/container.c index 15d094d..4e77545 100644 --- a/pengine/container.c +++ b/pengine/container.c @@ -278,6 +278,7 @@ container_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) order_start_start(rsc, tuple->docker, pe_order_runnable_left | pe_order_implies_first_printed); if(tuple->child) { + new_rsc_order(tuple->docker, RSC_STATUS, tuple->remote, RSC_STOP, pe_order_optional, data_set); order_stop_stop(rsc, tuple->child, pe_order_implies_first_printed); } order_stop_stop(rsc, tuple->docker, pe_order_implies_first_printed); -- 1.8.3.1 From 35dc265c4577b7fbe46c8e5919b7ef59028c1de5 Mon Sep 17 00:00:00 2001 From: Ken Gaillot Date: Mon, 29 Oct 2018 14:23:06 -0500 Subject: [PATCH 2/9] Test: pengine: Improve internal bundle ordering --- pengine/test10/bundle-nested-colocation.exp | 12 +++--- pengine/test10/bundle-order-fencing.exp | 56 ++++++++++++------------- pengine/test10/bundle-order-partial-start-2.exp | 12 +++--- pengine/test10/bundle-order-partial-start.exp | 12 +++--- pengine/test10/bundle-order-partial-stop.exp | 16 +++---- pengine/test10/bundle-order-stop-clone.exp | 20 ++++----- pengine/test10/bundle-order-stop-on-remote.exp | 44 +++++++++---------- pengine/test10/bundle-order-stop.exp | 16 +++---- 8 files changed, 94 insertions(+), 94 deletions(-) diff --git a/pengine/test10/bundle-nested-colocation.exp b/pengine/test10/bundle-nested-colocation.exp index a50809c..29c2eda 100644 --- a/pengine/test10/bundle-nested-colocation.exp +++ b/pengine/test10/bundle-nested-colocation.exp @@ -1,7 +1,7 @@ - + @@ -65,7 +65,7 @@ - + @@ -132,7 +132,7 @@ - + @@ -208,13 +208,13 @@ - + - + - + diff --git a/pengine/test10/bundle-order-fencing.exp b/pengine/test10/bundle-order-fencing.exp index 599c299..2b8f5cf 100644 --- a/pengine/test10/bundle-order-fencing.exp +++ b/pengine/test10/bundle-order-fencing.exp @@ -46,7 +46,7 @@ - + @@ -75,7 +75,7 @@ - + @@ -146,10 +146,10 @@ - + - + @@ -521,7 +521,7 @@ - + @@ -534,7 +534,7 @@ - + @@ -607,7 +607,7 @@ - + @@ -620,7 +620,7 @@ - + @@ -633,7 +633,7 @@ - + @@ -646,7 +646,7 @@ - + @@ -659,7 +659,7 @@ - + @@ -753,7 +753,7 @@ - + @@ -766,7 +766,7 @@ - + @@ -779,7 +779,7 @@ - + @@ -792,7 +792,7 @@ - + @@ -805,7 +805,7 @@ - + @@ -843,10 +843,10 @@ - + - + @@ -876,10 +876,10 @@ - + - + @@ -932,13 +932,13 @@ - + - + - + @@ -968,13 +968,13 @@ - + - + - + @@ -1076,10 +1076,10 @@ - + - + diff --git a/pengine/test10/bundle-order-partial-start-2.exp b/pengine/test10/bundle-order-partial-start-2.exp index bf9a0b0..168ec7a 100644 --- a/pengine/test10/bundle-order-partial-start-2.exp +++ b/pengine/test10/bundle-order-partial-start-2.exp @@ -1,7 +1,7 @@ - + @@ -61,7 +61,7 @@ - + @@ -315,7 +315,7 @@ - + @@ -328,7 +328,7 @@ - + @@ -379,7 +379,7 @@ - + @@ -409,7 +409,7 @@ - + diff --git a/pengine/test10/bundle-order-partial-start.exp b/pengine/test10/bundle-order-partial-start.exp index 8e28f19..5e80822 100644 --- a/pengine/test10/bundle-order-partial-start.exp +++ b/pengine/test10/bundle-order-partial-start.exp @@ -1,7 +1,7 @@ - + @@ -61,7 +61,7 @@ - + @@ -296,7 +296,7 @@ - + @@ -309,7 +309,7 @@ - + @@ -360,7 +360,7 @@ - + @@ -390,7 +390,7 @@ - + diff --git a/pengine/test10/bundle-order-partial-stop.exp b/pengine/test10/bundle-order-partial-stop.exp index 89d87aa..626fc44 100644 --- a/pengine/test10/bundle-order-partial-stop.exp +++ b/pengine/test10/bundle-order-partial-stop.exp @@ -1,7 +1,7 @@ - + @@ -66,7 +66,7 @@ - + @@ -278,7 +278,7 @@ - + @@ -291,7 +291,7 @@ - + @@ -304,7 +304,7 @@ - + @@ -370,7 +370,7 @@ - + @@ -400,7 +400,7 @@ - + @@ -480,7 +480,7 @@ - + diff --git a/pengine/test10/bundle-order-stop-clone.exp b/pengine/test10/bundle-order-stop-clone.exp index 3e66f54..6ef5dac 100644 --- a/pengine/test10/bundle-order-stop-clone.exp +++ b/pengine/test10/bundle-order-stop-clone.exp @@ -1,7 +1,7 @@ - + @@ -27,7 +27,7 @@ - + @@ -40,7 +40,7 @@ - + @@ -53,7 +53,7 @@ - + @@ -66,7 +66,7 @@ - + @@ -88,10 +88,10 @@ - + - + @@ -121,13 +121,13 @@ - + - + - + diff --git a/pengine/test10/bundle-order-stop-on-remote.exp b/pengine/test10/bundle-order-stop-on-remote.exp index 96588dc..6559dfd 100644 --- a/pengine/test10/bundle-order-stop-on-remote.exp +++ b/pengine/test10/bundle-order-stop-on-remote.exp @@ -588,7 +588,7 @@ - + @@ -601,7 +601,7 @@ - + @@ -614,7 +614,7 @@ - + @@ -627,7 +627,7 @@ - + @@ -675,7 +675,7 @@ - + @@ -688,7 +688,7 @@ - + @@ -701,7 +701,7 @@ - + @@ -780,7 +780,7 @@ - + @@ -793,7 +793,7 @@ - + @@ -806,7 +806,7 @@ - + @@ -819,7 +819,7 @@ - + @@ -841,13 +841,13 @@ - + - + - + @@ -877,13 +877,13 @@ - + - + - + @@ -940,13 +940,13 @@ - + - + - + @@ -976,10 +976,10 @@ - + - + diff --git a/pengine/test10/bundle-order-stop.exp b/pengine/test10/bundle-order-stop.exp index 89d87aa..626fc44 100644 --- a/pengine/test10/bundle-order-stop.exp +++ b/pengine/test10/bundle-order-stop.exp @@ -1,7 +1,7 @@ - + @@ -66,7 +66,7 @@ - + @@ -278,7 +278,7 @@ - + @@ -291,7 +291,7 @@ - + @@ -304,7 +304,7 @@ - + @@ -370,7 +370,7 @@ - + @@ -400,7 +400,7 @@ - + @@ -480,7 +480,7 @@ - + -- 1.8.3.1 From 16fda11606c3cbc432153a8677ab7c378f0cbd2e Mon Sep 17 00:00:00 2001 From: Ken Gaillot Date: Tue, 9 Oct 2018 11:57:43 -0500 Subject: [PATCH 3/9] Log: scheduler: improve bundle address fixing messages Mostly, try to make clear when the bundle container's node is used vs. the bundle connection's node. --- lib/pengine/container.c | 5 +++-- lib/pengine/utils.c | 3 ++- pengine/container.c | 6 ++++-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/lib/pengine/container.c b/lib/pengine/container.c index 1526f37..483d219 100644 --- a/lib/pengine/container.c +++ b/lib/pengine/container.c @@ -814,11 +814,12 @@ container_fix_remote_addr_in(resource_t *rsc, xmlNode *xml, const char *field) } if(node == NULL) { - crm_trace("Cannot fix address for %s", tuple->remote->id); + crm_trace("Cannot determine address for bundle connection %s", rsc->id); return NULL; } - crm_trace("Fixing addr for %s on %s", rsc->id, node->details->uname); + crm_trace("Setting address for bundle connection %s to bundle host %s", + rsc->id, node->details->uname); if(xml != NULL && field != NULL) { crm_xml_add(xml, field, node->details->uname); } diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c index a9ca86f..3f8dc30 100644 --- a/lib/pengine/utils.c +++ b/lib/pengine/utils.c @@ -1971,7 +1971,8 @@ rsc_action_digest(resource_t * rsc, const char *task, const char *key, // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside if (container_fix_remote_addr_in(rsc, data->params_all, "addr")) { - crm_trace("Fixed addr for %s on %s", rsc->id, node->details->uname); + crm_trace("Set address for bundle connection %s (on %s)", + rsc->id, node->details->uname); } g_hash_table_foreach(local_rsc_params, hash2field, data->params_all); diff --git a/pengine/container.c b/pengine/container.c index 4e77545..02ee425 100644 --- a/pengine/container.c +++ b/pengine/container.c @@ -846,10 +846,12 @@ container_expand(resource_t * rsc, pe_working_set_t * data_set) const char *calculated_addr = container_fix_remote_addr_in(tuple->remote, nvpair, "value"); if (calculated_addr) { - crm_trace("Fixed addr for %s on %s", tuple->remote->id, calculated_addr); + crm_trace("Set address for bundle connection %s to bundle host %s", + tuple->remote->id, calculated_addr); g_hash_table_replace(tuple->remote->parameters, strdup("addr"), strdup(calculated_addr)); } else { - crm_err("Could not fix addr for %s", tuple->remote->id); + crm_err("Could not determine address for bundle connection %s", + tuple->remote->id); } } if(tuple->ip) { -- 1.8.3.1 From 0907d0e3061f05d0fdd3a276e8a5c578813d5ea9 Mon Sep 17 00:00:00 2001 From: Ken Gaillot Date: Tue, 9 Oct 2018 16:09:33 -0500 Subject: [PATCH 4/9] Refactor: scheduler: remove redundant code unpack_rsc_op() doesn't need to map status PCMK_LRM_OP_ERROR to PCMK_LRM_OP_DONE because it explicitly looks for both when it uses status check_operation_expiry() doesn't need to check that failure_timeout is positive when expired is true, because expired can be true only if failure_timeout is positive check_action_definition() doesn't need to check whether task is stop because it cannot be called for stops check_actions_for() doesn't need to check whether a status operation is a probe because it also checks for recurring operations in the same place --- lib/pengine/unpack.c | 32 +++++++++++++++----------------- pengine/allocate.c | 14 ++++---------- 2 files changed, 19 insertions(+), 27 deletions(-) diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c index 52518d4..7bc78a3 100644 --- a/lib/pengine/unpack.c +++ b/lib/pengine/unpack.c @@ -2990,21 +2990,24 @@ static bool check_operation_expiry(resource_t *rsc, node_t *node, int rc, xmlNod } if (expired) { - if (failure_timeout > 0) { - if (pe_get_failcount(node, rsc, &last_failure, pe_fc_default, - xml_op, data_set)) { + if (pe_get_failcount(node, rsc, &last_failure, pe_fc_default, xml_op, + data_set)) { - if (pe_get_failcount(node, rsc, &last_failure, pe_fc_effective, - xml_op, data_set) == 0) { - clear_reason = "it expired"; - } else { - expired = FALSE; - } + // There is a fail count ignoring timeout - } else if (rsc->remote_reconnect_interval && strstr(ID(xml_op), "last_failure")) { - /* always clear last failure when reconnect interval is set */ - clear_reason = "reconnect interval is set"; + if (pe_get_failcount(node, rsc, &last_failure, pe_fc_effective, + xml_op, data_set) == 0) { + // There is no fail count considering timeout + clear_reason = "it expired"; + + } else { + expired = FALSE; } + + } else if (rsc->remote_reconnect_interval + && strstr(ID(xml_op), "last_failure")) { + // Always clear last failure when reconnect interval is set + clear_reason = "reconnect interval is set"; } } else if (strstr(ID(xml_op), "last_failure") && @@ -3240,11 +3243,6 @@ unpack_rsc_op(resource_t * rsc, node_t * node, xmlNode * xml_op, xmlNode ** last node->details->uname, rsc->id); } - if (status == PCMK_LRM_OP_ERROR) { - /* Older versions set this if rc != 0 but it's up to us to decide */ - status = PCMK_LRM_OP_DONE; - } - if(status != PCMK_LRM_OP_NOT_INSTALLED) { expired = check_operation_expiry(rsc, node, rc, xml_op, data_set); } diff --git a/pengine/allocate.c b/pengine/allocate.c index dc8017a..5589a2f 100644 --- a/pengine/allocate.c +++ b/pengine/allocate.c @@ -262,9 +262,6 @@ check_action_definition(resource_t * rsc, node_t * active_node, xmlNode * xml_op const char *digest_secure = NULL; CRM_CHECK(active_node != NULL, return FALSE); - if (safe_str_eq(task, RSC_STOP)) { - return FALSE; - } interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL); interval = crm_parse_int(interval_s, "0"); @@ -395,7 +392,6 @@ check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_worki xmlNode *rsc_op = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; - gboolean is_probe = FALSE; gboolean did_change = FALSE; CRM_CHECK(node != NULL, return); @@ -449,22 +445,20 @@ check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_worki continue; } - is_probe = FALSE; did_change = FALSE; task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); interval_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL); interval = crm_parse_int(interval_s, "0"); - if (interval == 0 && safe_str_eq(task, RSC_STATUS)) { - is_probe = TRUE; - } - if (interval > 0 && (is_set(rsc->flags, pe_rsc_maintenance) || node->details->maintenance)) { CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set); - } else if (is_probe || safe_str_eq(task, RSC_START) || safe_str_eq(task, RSC_PROMOTE) || interval > 0 + } else if ((interval > 0) + || safe_str_eq(task, RSC_STATUS) + || safe_str_eq(task, RSC_START) + || safe_str_eq(task, RSC_PROMOTE) || safe_str_eq(task, RSC_MIGRATED)) { did_change = check_action_definition(rsc, node, rsc_op, data_set); } -- 1.8.3.1 From 83b6e8d1453dc6656384bbde3bfb86c5970d54c2 Mon Sep 17 00:00:00 2001 From: Ken Gaillot Date: Tue, 9 Oct 2018 15:04:24 -0500 Subject: [PATCH 5/9] Refactor: scheduler: functionize scheduling clearing of fail count Reduces duplication, allows reuse, and improves consistency (for example, some uses previously set XML_ATTR_TE_NOWAIT and some didn't). --- include/crm/pengine/internal.h | 3 +++ lib/pengine/failcounts.c | 30 +++++++++++++++++++++++++++- lib/pengine/unpack.c | 10 ++-------- pengine/allocate.c | 44 +++++++++++++----------------------------- 4 files changed, 47 insertions(+), 40 deletions(-) diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h index 4aca751..6745ae3 100644 --- a/include/crm/pengine/internal.h +++ b/include/crm/pengine/internal.h @@ -114,6 +114,9 @@ int pe_get_failcount(node_t *node, resource_t *rsc, time_t *last_failure, uint32_t flags, xmlNode *xml_op, pe_working_set_t *data_set); +pe_action_t *pe__clear_failcount(pe_resource_t *rsc, pe_node_t *node, + const char *reason, + pe_working_set_t *data_set); /* Functions for finding/counting a resource's active nodes */ diff --git a/lib/pengine/failcounts.c b/lib/pengine/failcounts.c index e217176..8a7d0e4 100644 --- a/lib/pengine/failcounts.c +++ b/lib/pengine/failcounts.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008-2017 Andrew Beekhof + * Copyright 2008-2018 Andrew Beekhof * * This source code is licensed under the GNU Lesser General Public License * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. @@ -319,3 +319,31 @@ pe_get_failcount(node_t *node, resource_t *rsc, time_t *last_failure, return failcount; } + +/*! + * \brief Schedule a controller operation to clear a fail count + * + * \param[in] rsc Resource with failure + * \param[in] node Node failure occurred on + * \param[in] reason Readable description why needed (for logging) + * \param[in] data_set Working set for cluster + * + * \return Scheduled action + */ +pe_action_t * +pe__clear_failcount(pe_resource_t *rsc, pe_node_t *node, + const char *reason, pe_working_set_t *data_set) +{ + char *key = NULL; + action_t *clear = NULL; + + CRM_CHECK(rsc && node && reason && data_set, return NULL); + + key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0); + clear = custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT, node, FALSE, TRUE, + data_set); + add_hash_param(clear->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); + crm_notice("Clearing failure of %s on %s because %s " CRM_XS " %s", + rsc->id, node->details->uname, reason, clear->uuid); + return clear; +} diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c index 7bc78a3..8f9099c 100644 --- a/lib/pengine/unpack.c +++ b/lib/pengine/unpack.c @@ -3031,14 +3031,8 @@ static bool check_operation_expiry(resource_t *rsc, node_t *node, int rc, xmlNod if (clear_reason != NULL) { node_t *remote_node = pe_find_node(data_set->nodes, rsc->id); - char *key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0); - action_t *clear_op = custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT, - node, FALSE, TRUE, data_set); - - add_hash_param(clear_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); - - crm_notice("Clearing failure of %s on %s because %s " CRM_XS " %s", - rsc->id, node->details->uname, clear_reason, clear_op->uuid); + pe_action_t *clear_op = pe__clear_failcount(rsc, node, clear_reason, + data_set); if (is_set(data_set->flags, pe_flag_stonith_enabled) && rsc->remote_reconnect_interval diff --git a/pengine/allocate.c b/pengine/allocate.c index 5589a2f..569a4a5 100644 --- a/pengine/allocate.c +++ b/pengine/allocate.c @@ -376,7 +376,6 @@ check_action_definition(resource_t * rsc, node_t * active_node, xmlNode * xml_op return did_change; } - static void check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_working_set_t * data_set) { @@ -392,7 +391,6 @@ check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_worki xmlNode *rsc_op = NULL; GListPtr op_list = NULL; GListPtr sorted_op_list = NULL; - gboolean did_change = FALSE; CRM_CHECK(node != NULL, return); @@ -445,7 +443,6 @@ check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_worki continue; } - did_change = FALSE; task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK); interval_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL); @@ -453,6 +450,7 @@ check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_worki if (interval > 0 && (is_set(rsc->flags, pe_rsc_maintenance) || node->details->maintenance)) { + // Maintenance mode cancels recurring operations CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set); } else if ((interval > 0) @@ -460,28 +458,18 @@ check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_worki || safe_str_eq(task, RSC_START) || safe_str_eq(task, RSC_PROMOTE) || safe_str_eq(task, RSC_MIGRATED)) { - did_change = check_action_definition(rsc, node, rsc_op, data_set); - } - - if (did_change && pe_get_failcount(node, rsc, NULL, pe_fc_effective, - NULL, data_set)) { - - char *key = NULL; - action_t *action_clear = NULL; - - key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0); - action_clear = - custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT, node, FALSE, TRUE, data_set); - set_bit(action_clear->flags, pe_action_runnable); - - crm_notice("Clearing failure of %s on %s " - "because action definition changed " CRM_XS " %s", - rsc->id, node->details->uname, action_clear->uuid); + /* If a resource operation failed, and the operation's definition + * has changed, clear any fail count so they can be retried fresh. + */ + if (check_action_definition(rsc, node, rsc_op, data_set) + && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL, + data_set)) { + pe__clear_failcount(rsc, node, "action definition changed", + data_set); + } } } - g_list_free(sorted_op_list); - } static GListPtr @@ -1254,16 +1242,10 @@ cleanup_orphans(resource_t * rsc, pe_working_set_t * data_set) && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL, data_set)) { - char *key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0); - action_t *clear_op = custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT, - node, FALSE, TRUE, data_set); - - add_hash_param(clear_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE); + pe_action_t *clear_op = NULL; - pe_rsc_info(rsc, - "Clearing failure of %s on %s because it is orphaned " - CRM_XS " %s", - rsc->id, node->details->uname, clear_op->uuid); + clear_op = pe__clear_failcount(rsc, node, "it is orphaned", + data_set); /* We can't use order_action_then_stop() here because its * pe_order_preserve breaks things -- 1.8.3.1 From fade22882d52ac743b99adc4cbd98780b21e250b Mon Sep 17 00:00:00 2001 From: Ken Gaillot Date: Mon, 15 Oct 2018 10:26:46 -0500 Subject: [PATCH 6/9] Fix: scheduler: avoid unnecessary recovery of cleaned guest nodes Generalize Andrew Beekhof's fix for bundle nodes in 407f524 to all guest nodes. Fixes RHBZ#1448467 --- pengine/container.c | 1 - pengine/native.c | 39 +++++++++++++++++++++++++-------------- 2 files changed, 25 insertions(+), 15 deletions(-) diff --git a/pengine/container.c b/pengine/container.c index 02ee425..a35763b 100644 --- a/pengine/container.c +++ b/pengine/container.c @@ -278,7 +278,6 @@ container_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) order_start_start(rsc, tuple->docker, pe_order_runnable_left | pe_order_implies_first_printed); if(tuple->child) { - new_rsc_order(tuple->docker, RSC_STATUS, tuple->remote, RSC_STOP, pe_order_optional, data_set); order_stop_stop(rsc, tuple->child, pe_order_implies_first_printed); } order_stop_stop(rsc, tuple->docker, pe_order_implies_first_printed); diff --git a/pengine/native.c b/pengine/native.c index 6447234..cd746b6 100644 --- a/pengine/native.c +++ b/pengine/native.c @@ -1486,6 +1486,26 @@ native_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) if (rsc->container) { resource_t *remote_rsc = NULL; + if (rsc->is_remote_node) { + // rsc is the implicit remote connection for a guest or bundle node + + /* Do not allow a guest resource to live on a Pacemaker Remote node, + * to avoid nesting remotes. However, allow bundles to run on remote + * nodes. + */ + if (is_not_set(rsc->flags, pe_rsc_allow_remote_remotes)) { + rsc_avoids_remote_nodes(rsc->container); + } + + /* If someone cleans up a guest or bundle node's container, we will + * likely schedule a (re-)probe of the container and recovery of the + * connection. Order the connection stop after the container probe, + * so that if we detect the container running, we will trigger a new + * transition and avoid the unnecessary recovery. + */ + new_rsc_order(rsc->container, RSC_STATUS, rsc, RSC_STOP, + pe_order_optional, data_set); + /* A user can specify that a resource must start on a Pacemaker Remote * node by explicitly configuring it with the container=NODENAME * meta-attribute. This is of questionable merit, since location @@ -1493,16 +1513,15 @@ native_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) * we check whether a resource (that is not itself a remote connection) * has container set to a remote node or guest node resource. */ - if (rsc->container->is_remote_node) { + } else if (rsc->container->is_remote_node) { remote_rsc = rsc->container; - } else if (rsc->is_remote_node == FALSE) { + } else { remote_rsc = rsc_contains_remote_node(data_set, rsc->container); } if (remote_rsc) { - /* The container represents a Pacemaker Remote node, so force the - * resource on the Pacemaker Remote node instead of colocating the - * resource with the container resource. + /* Force the resource on the Pacemaker Remote node instead of + * colocating the resource with the container resource. */ GHashTableIter iter; node_t *node = NULL; @@ -1512,6 +1531,7 @@ native_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) node->weight = -INFINITY; } } + } else { /* This resource is either a filler for a container that does NOT * represent a Pacemaker Remote node, or a Pacemaker Remote @@ -1545,15 +1565,6 @@ native_internal_constraints(resource_t * rsc, pe_working_set_t * data_set) * or remote connection resources.*/ rsc_avoids_remote_nodes(rsc); } - - /* If this is a guest node's implicit remote connection, do not allow the - * guest resource to live on a Pacemaker Remote node, to avoid nesting - * remotes. However, allow bundles to run on remote nodes. - */ - if (rsc->is_remote_node && rsc->container - && is_not_set(rsc->flags, pe_rsc_allow_remote_remotes)) { - rsc_avoids_remote_nodes(rsc->container); - } } void -- 1.8.3.1 From af4f6a1cc13b92bba8109007a1fac809e0d80887 Mon Sep 17 00:00:00 2001 From: Ken Gaillot Date: Mon, 15 Oct 2018 12:21:14 -0500 Subject: [PATCH 7/9] Fix: scheduler: order guest pseudo-fencing properly after clean-up If the resource history of a guest node's container has been cleaned, we will schedule a (pseudo-)fence of the guest node, and a stop of the guest node's connection resource, but not a stop of the container (which appears already stopped). In this case, order the pseudo-fence after the connection stop, so we don't call remote_node_down() unless the container is really down (the connection stop will be avoided if the container is really up). --- pengine/allocate.c | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/pengine/allocate.c b/pengine/allocate.c index 569a4a5..e867368 100644 --- a/pengine/allocate.c +++ b/pengine/allocate.c @@ -1446,12 +1446,30 @@ fence_guest(pe_node_t *node, pe_action_t *done, pe_working_set_t *data_set) node->details->uname, stonith_op->id, container->id, stop->id); } else { - crm_info("Implying guest node %s is down (action %d) ", - node->details->uname, stonith_op->id); + /* If we're fencing the guest node but there's no stop for the guest + * resource, we must think the guest is already stopped. However, we may + * think so because its resource history was just cleaned. To avoid + * unnecessarily considering the guest node down if it's really up, + * order the pseudo-fencing after any stop of the connection resource, + * which will be ordered after any container (re-)probe. + */ + stop = find_first_action(node->details->remote_rsc->actions, NULL, + RSC_STOP, NULL); + + if (stop) { + order_actions(stop, stonith_op, pe_order_optional); + crm_info("Implying guest node %s is down (action %d) " + "after connection is stopped (action %d)", + node->details->uname, stonith_op->id, stop->id); + } else { + /* Not sure why we're fencing, but everything must already be + * cleanly stopped. + */ + crm_info("Implying guest node %s is down (action %d) ", + node->details->uname, stonith_op->id); + } } - /* @TODO: Order pseudo-fence after any (optional) fence of guest's host */ - /* Order/imply other actions relative to pseudo-fence as with real fence */ stonith_constraints(node, stonith_op, data_set); if(done) { -- 1.8.3.1 From 4c771013389f395e16165312993597064d06d149 Mon Sep 17 00:00:00 2001 From: Ken Gaillot Date: Mon, 29 Oct 2018 14:56:47 -0500 Subject: [PATCH 8/9] Test: pengine: update regression tests for recent changes clear-failcount change: operations in generated graphs now consistently have XML_ATTR_TE_NOWAIT guest node change: insignificant change in op numbering due to newly added optional constraint --- pengine/test10/bug-5025-1.exp | 2 +- pengine/test10/bug-5025-3.exp | 2 +- pengine/test10/bug-5069-op-disabled.exp | 2 +- pengine/test10/bug-cl-5247.exp | 12 ++++++------ 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/pengine/test10/bug-5025-1.exp b/pengine/test10/bug-5025-1.exp index 053ece4..2a82e79 100644 --- a/pengine/test10/bug-5025-1.exp +++ b/pengine/test10/bug-5025-1.exp @@ -3,7 +3,7 @@ - + diff --git a/pengine/test10/bug-5025-3.exp b/pengine/test10/bug-5025-3.exp index eb2e2e6..9360ca7 100644 --- a/pengine/test10/bug-5025-3.exp +++ b/pengine/test10/bug-5025-3.exp @@ -12,7 +12,7 @@ - + diff --git a/pengine/test10/bug-5069-op-disabled.exp b/pengine/test10/bug-5069-op-disabled.exp index 9653af1..fffb8c4 100644 --- a/pengine/test10/bug-5069-op-disabled.exp +++ b/pengine/test10/bug-5069-op-disabled.exp @@ -3,7 +3,7 @@ - + diff --git a/pengine/test10/bug-cl-5247.exp b/pengine/test10/bug-cl-5247.exp index c21ed7d..5315360 100644 --- a/pengine/test10/bug-cl-5247.exp +++ b/pengine/test10/bug-cl-5247.exp @@ -451,7 +451,7 @@ - + @@ -464,7 +464,7 @@ - + @@ -477,7 +477,7 @@ - + @@ -534,7 +534,7 @@ - + @@ -564,7 +564,7 @@ - + @@ -644,7 +644,7 @@ - + -- 1.8.3.1 From 6ab7a2cb1ed28548706db3bc8c006cea3b605681 Mon Sep 17 00:00:00 2001 From: Ken Gaillot Date: Mon, 15 Oct 2018 11:24:46 -0500 Subject: [PATCH 9/9] Test: scheduler: order guest node connection recovery after container probe --- pengine/regression.sh | 1 + pengine/test10/guest-node-cleanup.dot | 64 ++++++ pengine/test10/guest-node-cleanup.exp | 319 ++++++++++++++++++++++++++++++ pengine/test10/guest-node-cleanup.scores | 81 ++++++++ pengine/test10/guest-node-cleanup.summary | 55 ++++++ pengine/test10/guest-node-cleanup.xml | 304 ++++++++++++++++++++++++++++ 6 files changed, 824 insertions(+) create mode 100644 pengine/test10/guest-node-cleanup.dot create mode 100644 pengine/test10/guest-node-cleanup.exp create mode 100644 pengine/test10/guest-node-cleanup.scores create mode 100644 pengine/test10/guest-node-cleanup.summary create mode 100644 pengine/test10/guest-node-cleanup.xml diff --git a/pengine/regression.sh b/pengine/regression.sh index deca1b6..ead5fd8 100755 --- a/pengine/regression.sh +++ b/pengine/regression.sh @@ -854,6 +854,7 @@ do_test whitebox-migrate1 "Migrate both container and connection resource" do_test whitebox-imply-stop-on-fence "imply stop action on container node rsc when host node is fenced" do_test whitebox-nested-group "Verify guest remote-node works nested in a group" do_test guest-node-host-dies "Verify guest node is recovered if host goes away" +do_test guest-node-cleanup "Order guest node connection recovery after container probe" echo "" do_test remote-startup-probes "Baremetal remote-node startup probes" diff --git a/pengine/test10/guest-node-cleanup.dot b/pengine/test10/guest-node-cleanup.dot new file mode 100644 index 0000000..45fe4be --- /dev/null +++ b/pengine/test10/guest-node-cleanup.dot @@ -0,0 +1,64 @@ +digraph "g" { +"all_stopped" -> "lxc1_start_0 rhel7-1" [ style = bold] +"all_stopped" [ style=bold color="green" fontcolor="orange"] +"container1_monitor_0 rhel7-1" -> "container1_start_0 rhel7-1" [ style = bold] +"container1_monitor_0 rhel7-1" -> "lxc1_stop_0 rhel7-1" [ style = bold] +"container1_monitor_0 rhel7-1" [ style=bold color="green" fontcolor="black"] +"container1_start_0 rhel7-1" -> "lxc-ms_promote_0 lxc1" [ style = bold] +"container1_start_0 rhel7-1" -> "lxc-ms_start_0 lxc1" [ style = bold] +"container1_start_0 rhel7-1" -> "lxc1_start_0 rhel7-1" [ style = bold] +"container1_start_0 rhel7-1" [ style=bold color="green" fontcolor="black"] +"lxc-ms-master_demote_0" -> "lxc-ms-master_demoted_0" [ style = bold] +"lxc-ms-master_demote_0" -> "lxc-ms_demote_0 lxc1" [ style = bold] +"lxc-ms-master_demote_0" [ style=bold color="green" fontcolor="orange"] +"lxc-ms-master_demoted_0" -> "lxc-ms-master_promote_0" [ style = bold] +"lxc-ms-master_demoted_0" -> "lxc-ms-master_start_0" [ style = bold] +"lxc-ms-master_demoted_0" -> "lxc-ms-master_stop_0" [ style = bold] +"lxc-ms-master_demoted_0" [ style=bold color="green" fontcolor="orange"] +"lxc-ms-master_promote_0" -> "lxc-ms_promote_0 lxc1" [ style = bold] +"lxc-ms-master_promote_0" [ style=bold color="green" fontcolor="orange"] +"lxc-ms-master_promoted_0" [ style=bold color="green" fontcolor="orange"] +"lxc-ms-master_running_0" -> "lxc-ms-master_promote_0" [ style = bold] +"lxc-ms-master_running_0" [ style=bold color="green" fontcolor="orange"] +"lxc-ms-master_start_0" -> "lxc-ms-master_running_0" [ style = bold] +"lxc-ms-master_start_0" -> "lxc-ms_start_0 lxc1" [ style = bold] +"lxc-ms-master_start_0" [ style=bold color="green" fontcolor="orange"] +"lxc-ms-master_stop_0" -> "lxc-ms-master_stopped_0" [ style = bold] +"lxc-ms-master_stop_0" -> "lxc-ms_stop_0 lxc1" [ style = bold] +"lxc-ms-master_stop_0" [ style=bold color="green" fontcolor="orange"] +"lxc-ms-master_stopped_0" -> "lxc-ms-master_promote_0" [ style = bold] +"lxc-ms-master_stopped_0" -> "lxc-ms-master_start_0" [ style = bold] +"lxc-ms-master_stopped_0" [ style=bold color="green" fontcolor="orange"] +"lxc-ms_demote_0 lxc1" -> "lxc-ms-master_demoted_0" [ style = bold] +"lxc-ms_demote_0 lxc1" -> "lxc-ms_promote_0 lxc1" [ style = bold] +"lxc-ms_demote_0 lxc1" -> "lxc-ms_stop_0 lxc1" [ style = bold] +"lxc-ms_demote_0 lxc1" [ style=bold color="green" fontcolor="orange"] +"lxc-ms_promote_0 lxc1" -> "lxc-ms-master_promoted_0" [ style = bold] +"lxc-ms_promote_0 lxc1" [ style=bold color="green" fontcolor="black"] +"lxc-ms_start_0 lxc1" -> "lxc-ms-master_running_0" [ style = bold] +"lxc-ms_start_0 lxc1" -> "lxc-ms_promote_0 lxc1" [ style = bold] +"lxc-ms_start_0 lxc1" [ style=bold color="green" fontcolor="black"] +"lxc-ms_stop_0 lxc1" -> "all_stopped" [ style = bold] +"lxc-ms_stop_0 lxc1" -> "lxc-ms-master_stopped_0" [ style = bold] +"lxc-ms_stop_0 lxc1" -> "lxc-ms_start_0 lxc1" [ style = bold] +"lxc-ms_stop_0 lxc1" [ style=bold color="green" fontcolor="orange"] +"lxc1_monitor_30000 rhel7-1" [ style=bold color="green" fontcolor="black"] +"lxc1_start_0 rhel7-1" -> "lxc-ms_promote_0 lxc1" [ style = bold] +"lxc1_start_0 rhel7-1" -> "lxc-ms_start_0 lxc1" [ style = bold] +"lxc1_start_0 rhel7-1" -> "lxc1_monitor_30000 rhel7-1" [ style = bold] +"lxc1_start_0 rhel7-1" [ style=bold color="green" fontcolor="black"] +"lxc1_stop_0 rhel7-1" -> "all_stopped" [ style = bold] +"lxc1_stop_0 rhel7-1" -> "lxc1_start_0 rhel7-1" [ style = bold] +"lxc1_stop_0 rhel7-1" -> "stonith 'reboot' lxc1" [ style = bold] +"lxc1_stop_0 rhel7-1" [ style=bold color="green" fontcolor="black"] +"stonith 'reboot' lxc1" -> "lxc-ms-master_stop_0" [ style = bold] +"stonith 'reboot' lxc1" -> "lxc-ms_demote_0 lxc1" [ style = bold] +"stonith 'reboot' lxc1" -> "lxc-ms_stop_0 lxc1" [ style = bold] +"stonith 'reboot' lxc1" -> "stonith_complete" [ style = bold] +"stonith 'reboot' lxc1" [ style=bold color="green" fontcolor="orange"] +"stonith_complete" -> "all_stopped" [ style = bold] +"stonith_complete" -> "container1_start_0 rhel7-1" [ style = bold] +"stonith_complete" -> "lxc-ms_promote_0 lxc1" [ style = bold] +"stonith_complete" -> "lxc-ms_start_0 lxc1" [ style = bold] +"stonith_complete" [ style=bold color="green" fontcolor="orange"] +} diff --git a/pengine/test10/guest-node-cleanup.exp b/pengine/test10/guest-node-cleanup.exp new file mode 100644 index 0000000..9503a03 --- /dev/null +++ b/pengine/test10/guest-node-cleanup.exp @@ -0,0 +1,319 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pengine/test10/guest-node-cleanup.scores b/pengine/test10/guest-node-cleanup.scores new file mode 100644 index 0000000..9bc8250 --- /dev/null +++ b/pengine/test10/guest-node-cleanup.scores @@ -0,0 +1,81 @@ +Allocation scores: +Using the original execution date of: 2018-10-15 16:02:04Z +clone_color: lxc-ms-master allocation score on lxc1: INFINITY +clone_color: lxc-ms-master allocation score on lxc2: INFINITY +clone_color: lxc-ms-master allocation score on rhel7-1: 0 +clone_color: lxc-ms-master allocation score on rhel7-2: 0 +clone_color: lxc-ms-master allocation score on rhel7-3: 0 +clone_color: lxc-ms-master allocation score on rhel7-4: 0 +clone_color: lxc-ms-master allocation score on rhel7-5: 0 +clone_color: lxc-ms:0 allocation score on lxc1: INFINITY +clone_color: lxc-ms:0 allocation score on lxc2: INFINITY +clone_color: lxc-ms:0 allocation score on rhel7-1: 0 +clone_color: lxc-ms:0 allocation score on rhel7-2: 0 +clone_color: lxc-ms:0 allocation score on rhel7-3: 0 +clone_color: lxc-ms:0 allocation score on rhel7-4: 0 +clone_color: lxc-ms:0 allocation score on rhel7-5: 0 +clone_color: lxc-ms:1 allocation score on lxc1: INFINITY +clone_color: lxc-ms:1 allocation score on lxc2: INFINITY +clone_color: lxc-ms:1 allocation score on rhel7-1: 0 +clone_color: lxc-ms:1 allocation score on rhel7-2: 0 +clone_color: lxc-ms:1 allocation score on rhel7-3: 0 +clone_color: lxc-ms:1 allocation score on rhel7-4: 0 +clone_color: lxc-ms:1 allocation score on rhel7-5: 0 +lxc-ms:0 promotion score on lxc2: INFINITY +lxc-ms:1 promotion score on lxc1: INFINITY +native_color: Fencing allocation score on lxc1: -INFINITY +native_color: Fencing allocation score on lxc2: -INFINITY +native_color: Fencing allocation score on rhel7-1: 0 +native_color: Fencing allocation score on rhel7-2: 0 +native_color: Fencing allocation score on rhel7-3: 0 +native_color: Fencing allocation score on rhel7-4: 0 +native_color: Fencing allocation score on rhel7-5: 0 +native_color: FencingPass allocation score on lxc1: -INFINITY +native_color: FencingPass allocation score on lxc2: -INFINITY +native_color: FencingPass allocation score on rhel7-1: 0 +native_color: FencingPass allocation score on rhel7-2: 0 +native_color: FencingPass allocation score on rhel7-3: 0 +native_color: FencingPass allocation score on rhel7-4: 0 +native_color: FencingPass allocation score on rhel7-5: 0 +native_color: container1 allocation score on lxc1: -INFINITY +native_color: container1 allocation score on lxc2: -INFINITY +native_color: container1 allocation score on rhel7-1: INFINITY +native_color: container1 allocation score on rhel7-2: 0 +native_color: container1 allocation score on rhel7-3: 0 +native_color: container1 allocation score on rhel7-4: 0 +native_color: container1 allocation score on rhel7-5: 0 +native_color: container2 allocation score on lxc1: -INFINITY +native_color: container2 allocation score on lxc2: -INFINITY +native_color: container2 allocation score on rhel7-1: INFINITY +native_color: container2 allocation score on rhel7-2: 0 +native_color: container2 allocation score on rhel7-3: 0 +native_color: container2 allocation score on rhel7-4: 0 +native_color: container2 allocation score on rhel7-5: 0 +native_color: lxc-ms:0 allocation score on lxc1: INFINITY +native_color: lxc-ms:0 allocation score on lxc2: INFINITY +native_color: lxc-ms:0 allocation score on rhel7-1: 0 +native_color: lxc-ms:0 allocation score on rhel7-2: 0 +native_color: lxc-ms:0 allocation score on rhel7-3: 0 +native_color: lxc-ms:0 allocation score on rhel7-4: 0 +native_color: lxc-ms:0 allocation score on rhel7-5: 0 +native_color: lxc-ms:1 allocation score on lxc1: INFINITY +native_color: lxc-ms:1 allocation score on lxc2: -INFINITY +native_color: lxc-ms:1 allocation score on rhel7-1: 0 +native_color: lxc-ms:1 allocation score on rhel7-2: 0 +native_color: lxc-ms:1 allocation score on rhel7-3: 0 +native_color: lxc-ms:1 allocation score on rhel7-4: 0 +native_color: lxc-ms:1 allocation score on rhel7-5: 0 +native_color: lxc1 allocation score on lxc1: -INFINITY +native_color: lxc1 allocation score on lxc2: -INFINITY +native_color: lxc1 allocation score on rhel7-1: 0 +native_color: lxc1 allocation score on rhel7-2: -INFINITY +native_color: lxc1 allocation score on rhel7-3: -INFINITY +native_color: lxc1 allocation score on rhel7-4: -INFINITY +native_color: lxc1 allocation score on rhel7-5: -INFINITY +native_color: lxc2 allocation score on lxc1: -INFINITY +native_color: lxc2 allocation score on lxc2: -INFINITY +native_color: lxc2 allocation score on rhel7-1: 0 +native_color: lxc2 allocation score on rhel7-2: -INFINITY +native_color: lxc2 allocation score on rhel7-3: -INFINITY +native_color: lxc2 allocation score on rhel7-4: -INFINITY +native_color: lxc2 allocation score on rhel7-5: -INFINITY diff --git a/pengine/test10/guest-node-cleanup.summary b/pengine/test10/guest-node-cleanup.summary new file mode 100644 index 0000000..6378f48 --- /dev/null +++ b/pengine/test10/guest-node-cleanup.summary @@ -0,0 +1,55 @@ +Using the original execution date of: 2018-10-15 16:02:04Z + +Current cluster status: +Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] +Containers: [ lxc2:container2 ] + + Fencing (stonith:fence_xvm): Started rhel7-2 + FencingPass (stonith:fence_dummy): Started rhel7-3 + container1 (ocf::heartbeat:VirtualDomain): FAILED + container2 (ocf::heartbeat:VirtualDomain): Started rhel7-1 + Master/Slave Set: lxc-ms-master [lxc-ms] + Slaves: [ lxc2 ] + Stopped: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] + +Transition Summary: + * Fence (reboot) lxc1 (resource: container1) 'guest is unclean' + * Start container1 ( rhel7-1 ) + * Recover lxc-ms:1 ( Master lxc1 ) + * Restart lxc1 ( rhel7-1 ) due to required container1 start + +Executing cluster transition: + * Resource action: container1 monitor on rhel7-1 + * Pseudo action: lxc-ms-master_demote_0 + * Resource action: lxc1 stop on rhel7-1 + * Pseudo action: stonith-lxc1-reboot on lxc1 + * Pseudo action: stonith_complete + * Resource action: container1 start on rhel7-1 + * Pseudo action: lxc-ms_demote_0 + * Pseudo action: lxc-ms-master_demoted_0 + * Pseudo action: lxc-ms-master_stop_0 + * Pseudo action: lxc-ms_stop_0 + * Pseudo action: lxc-ms-master_stopped_0 + * Pseudo action: lxc-ms-master_start_0 + * Pseudo action: all_stopped + * Resource action: lxc1 start on rhel7-1 + * Resource action: lxc1 monitor=30000 on rhel7-1 + * Resource action: lxc-ms start on lxc1 + * Pseudo action: lxc-ms-master_running_0 + * Pseudo action: lxc-ms-master_promote_0 + * Resource action: lxc-ms promote on lxc1 + * Pseudo action: lxc-ms-master_promoted_0 +Using the original execution date of: 2018-10-15 16:02:04Z + +Revised cluster status: +Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] +Containers: [ lxc1:container1 lxc2:container2 ] + + Fencing (stonith:fence_xvm): Started rhel7-2 + FencingPass (stonith:fence_dummy): Started rhel7-3 + container1 (ocf::heartbeat:VirtualDomain): Started rhel7-1 + container2 (ocf::heartbeat:VirtualDomain): Started rhel7-1 + Master/Slave Set: lxc-ms-master [lxc-ms] + Masters: [ lxc1 ] + Slaves: [ lxc2 ] + diff --git a/pengine/test10/guest-node-cleanup.xml b/pengine/test10/guest-node-cleanup.xml new file mode 100644 index 0000000..35835bc --- /dev/null +++ b/pengine/test10/guest-node-cleanup.xml @@ -0,0 +1,304 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + -- 1.8.3.1