|
 |
fcdfbd |
From e953591a9796edebd4796c344df0eddcbc7a2dff Mon Sep 17 00:00:00 2001
|
|
 |
fcdfbd |
From: Ken Gaillot <kgaillot@redhat.com>
|
|
 |
fcdfbd |
Date: Mon, 30 Jan 2023 16:34:32 -0600
|
|
 |
fcdfbd |
Subject: [PATCH 01/14] Refactor: scheduler: drop unneeded arguments from
|
|
 |
fcdfbd |
process_rsc_state()
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
migrate_op has been unused since at least 2011
|
|
 |
fcdfbd |
---
|
|
 |
fcdfbd |
lib/pengine/unpack.c | 36 +++++++++++++++---------------------
|
|
 |
fcdfbd |
1 file changed, 15 insertions(+), 21 deletions(-)
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
index 5fcba3b..9524def 100644
|
|
 |
fcdfbd |
--- a/lib/pengine/unpack.c
|
|
 |
fcdfbd |
+++ b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
@@ -1963,8 +1963,7 @@ process_orphan_resource(xmlNode * rsc_entry, pe_node_t * node, pe_working_set_t
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
static void
|
|
 |
fcdfbd |
process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
|
|
 |
fcdfbd |
- enum action_fail_response on_fail,
|
|
 |
fcdfbd |
- xmlNode * migrate_op, pe_working_set_t * data_set)
|
|
 |
fcdfbd |
+ enum action_fail_response on_fail)
|
|
 |
fcdfbd |
{
|
|
 |
fcdfbd |
pe_node_t *tmpnode = NULL;
|
|
 |
fcdfbd |
char *reason = NULL;
|
|
 |
fcdfbd |
@@ -2016,7 +2015,7 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
|
|
 |
fcdfbd |
pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
|
|
 |
fcdfbd |
should_fence = TRUE;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- } else if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
|
|
 |
fcdfbd |
+ } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
|
|
 |
fcdfbd |
if (pe__is_remote_node(node) && node->details->remote_rsc
|
|
 |
fcdfbd |
&& !pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_failed)) {
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
@@ -2039,7 +2038,7 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
|
|
 |
fcdfbd |
if (reason == NULL) {
|
|
 |
fcdfbd |
reason = crm_strdup_printf("%s is thought to be active there", rsc->id);
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
- pe_fence_node(data_set, node, reason, FALSE);
|
|
 |
fcdfbd |
+ pe_fence_node(rsc->cluster, node, reason, FALSE);
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
free(reason);
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
@@ -2069,7 +2068,7 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
|
|
 |
fcdfbd |
* but also mark the node as unclean
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
reason = crm_strdup_printf("%s failed there", rsc->id);
|
|
 |
fcdfbd |
- pe_fence_node(data_set, node, reason, FALSE);
|
|
 |
fcdfbd |
+ pe_fence_node(rsc->cluster, node, reason, FALSE);
|
|
 |
fcdfbd |
free(reason);
|
|
 |
fcdfbd |
break;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
@@ -2090,7 +2089,8 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
|
|
 |
fcdfbd |
/* make sure it comes up somewhere else
|
|
 |
fcdfbd |
* or not at all
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
- resource_location(rsc, node, -INFINITY, "__action_migration_auto__", data_set);
|
|
 |
fcdfbd |
+ resource_location(rsc, node, -INFINITY, "__action_migration_auto__",
|
|
 |
fcdfbd |
+ rsc->cluster);
|
|
 |
fcdfbd |
break;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
case action_fail_stop:
|
|
 |
fcdfbd |
@@ -2112,8 +2112,8 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
|
|
 |
fcdfbd |
* container is running yet, so remember it and add a stop
|
|
 |
fcdfbd |
* action for it later.
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
- data_set->stop_needed = g_list_prepend(data_set->stop_needed,
|
|
 |
fcdfbd |
- rsc->container);
|
|
 |
fcdfbd |
+ rsc->cluster->stop_needed =
|
|
 |
fcdfbd |
+ g_list_prepend(rsc->cluster->stop_needed, rsc->container);
|
|
 |
fcdfbd |
} else if (rsc->container) {
|
|
 |
fcdfbd |
stop_action(rsc->container, node, FALSE);
|
|
 |
fcdfbd |
} else if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
|
|
 |
fcdfbd |
@@ -2123,10 +2123,10 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
case action_fail_reset_remote:
|
|
 |
fcdfbd |
pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
|
|
 |
fcdfbd |
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
|
|
 |
fcdfbd |
+ if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
|
|
 |
fcdfbd |
tmpnode = NULL;
|
|
 |
fcdfbd |
if (rsc->is_remote_node) {
|
|
 |
fcdfbd |
- tmpnode = pe_find_node(data_set->nodes, rsc->id);
|
|
 |
fcdfbd |
+ tmpnode = pe_find_node(rsc->cluster->nodes, rsc->id);
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
if (tmpnode &&
|
|
 |
fcdfbd |
pe__is_remote_node(tmpnode) &&
|
|
 |
fcdfbd |
@@ -2135,7 +2135,7 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
|
|
 |
fcdfbd |
/* The remote connection resource failed in a way that
|
|
 |
fcdfbd |
* should result in fencing the remote node.
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
- pe_fence_node(data_set, tmpnode,
|
|
 |
fcdfbd |
+ pe_fence_node(rsc->cluster, tmpnode,
|
|
 |
fcdfbd |
"remote connection is unrecoverable", FALSE);
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
@@ -2158,7 +2158,7 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
|
|
 |
fcdfbd |
* result in a fencing operation regardless if we're going to attempt to
|
|
 |
fcdfbd |
* reconnect to the remote-node in this transition or not. */
|
|
 |
fcdfbd |
if (pcmk_is_set(rsc->flags, pe_rsc_failed) && rsc->is_remote_node) {
|
|
 |
fcdfbd |
- tmpnode = pe_find_node(data_set->nodes, rsc->id);
|
|
 |
fcdfbd |
+ tmpnode = pe_find_node(rsc->cluster->nodes, rsc->id);
|
|
 |
fcdfbd |
if (tmpnode && tmpnode->details->unclean) {
|
|
 |
fcdfbd |
tmpnode->details->unseen = FALSE;
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
@@ -2177,7 +2177,8 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- native_add_running(rsc, node, data_set, (save_on_fail != action_fail_ignore));
|
|
 |
fcdfbd |
+ native_add_running(rsc, node, rsc->cluster,
|
|
 |
fcdfbd |
+ (save_on_fail != action_fail_ignore));
|
|
 |
fcdfbd |
switch (on_fail) {
|
|
 |
fcdfbd |
case action_fail_ignore:
|
|
 |
fcdfbd |
break;
|
|
 |
fcdfbd |
@@ -2376,14 +2377,12 @@ unpack_lrm_resource(pe_node_t *node, xmlNode *lrm_resource,
|
|
 |
fcdfbd |
int start_index = -1;
|
|
 |
fcdfbd |
enum rsc_role_e req_role = RSC_ROLE_UNKNOWN;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- const char *task = NULL;
|
|
 |
fcdfbd |
const char *rsc_id = ID(lrm_resource);
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
pe_resource_t *rsc = NULL;
|
|
 |
fcdfbd |
GList *op_list = NULL;
|
|
 |
fcdfbd |
GList *sorted_op_list = NULL;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- xmlNode *migrate_op = NULL;
|
|
 |
fcdfbd |
xmlNode *rsc_op = NULL;
|
|
 |
fcdfbd |
xmlNode *last_failure = NULL;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
@@ -2437,11 +2436,6 @@ unpack_lrm_resource(pe_node_t *node, xmlNode *lrm_resource,
|
|
 |
fcdfbd |
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
|
|
 |
fcdfbd |
xmlNode *rsc_op = (xmlNode *) gIter->data;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
|
|
 |
fcdfbd |
- if (pcmk__str_eq(task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
|
|
 |
fcdfbd |
- migrate_op = rsc_op;
|
|
 |
fcdfbd |
- }
|
|
 |
fcdfbd |
-
|
|
 |
fcdfbd |
unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail, data_set);
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
@@ -2452,7 +2446,7 @@ unpack_lrm_resource(pe_node_t *node, xmlNode *lrm_resource,
|
|
 |
fcdfbd |
/* no need to free the contents */
|
|
 |
fcdfbd |
g_list_free(sorted_op_list);
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- process_rsc_state(rsc, node, on_fail, migrate_op, data_set);
|
|
 |
fcdfbd |
+ process_rsc_state(rsc, node, on_fail);
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
if (get_target_role(rsc, &req_role)) {
|
|
 |
fcdfbd |
if (rsc->next_role == RSC_ROLE_UNKNOWN || req_role < rsc->next_role) {
|
|
 |
fcdfbd |
--
|
|
 |
fcdfbd |
2.31.1
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
From 6f4e34cccc4864961d2020a2dd547450ac53a44e Mon Sep 17 00:00:00 2001
|
|
 |
fcdfbd |
From: Ken Gaillot <kgaillot@redhat.com>
|
|
 |
fcdfbd |
Date: Wed, 1 Feb 2023 16:30:20 -0600
|
|
 |
fcdfbd |
Subject: [PATCH 02/14] Log: scheduler: improve trace logs when unpacking
|
|
 |
fcdfbd |
resource history
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
---
|
|
 |
fcdfbd |
lib/pengine/unpack.c | 112 +++++++++++++++++++++++++++----------------
|
|
 |
fcdfbd |
1 file changed, 71 insertions(+), 41 deletions(-)
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
index 9524def..b7b2873 100644
|
|
 |
fcdfbd |
--- a/lib/pengine/unpack.c
|
|
 |
fcdfbd |
+++ b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
@@ -3363,6 +3363,24 @@ check_recoverable(pe_resource_t *rsc, pe_node_t *node, const char *task,
|
|
 |
fcdfbd |
pe__set_resource_flags(rsc, pe_rsc_block);
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
+/*!
|
|
 |
fcdfbd |
+ * \internal
|
|
 |
fcdfbd |
+ * \brief Update an integer value and why
|
|
 |
fcdfbd |
+ *
|
|
 |
fcdfbd |
+ * \param[in,out] i Pointer to integer to update
|
|
 |
fcdfbd |
+ * \param[in,out] why Where to store reason for update
|
|
 |
fcdfbd |
+ * \param[in] value New value
|
|
 |
fcdfbd |
+ * \param[in,out] reason Description of why value was changed
|
|
 |
fcdfbd |
+ */
|
|
 |
fcdfbd |
+static inline void
|
|
 |
fcdfbd |
+remap_because(int *i, const char **why, int value, const char *reason)
|
|
 |
fcdfbd |
+{
|
|
 |
fcdfbd |
+ if (*i != value) {
|
|
 |
fcdfbd |
+ *i = value;
|
|
 |
fcdfbd |
+ *why = reason;
|
|
 |
fcdfbd |
+ }
|
|
 |
fcdfbd |
+}
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
/*!
|
|
 |
fcdfbd |
* \internal
|
|
 |
fcdfbd |
* \brief Remap informational monitor results and operation status
|
|
 |
fcdfbd |
@@ -3393,29 +3411,34 @@ check_recoverable(pe_resource_t *rsc, pe_node_t *node, const char *task,
|
|
 |
fcdfbd |
static void
|
|
 |
fcdfbd |
remap_operation(xmlNode *xml_op, pe_resource_t *rsc, pe_node_t *node,
|
|
 |
fcdfbd |
pe_working_set_t *data_set, enum action_fail_response *on_fail,
|
|
 |
fcdfbd |
- int target_rc, int *rc, int *status) {
|
|
 |
fcdfbd |
+ int target_rc, int *rc, int *status)
|
|
 |
fcdfbd |
+{
|
|
 |
fcdfbd |
bool is_probe = false;
|
|
 |
fcdfbd |
+ int orig_exit_status = *rc;
|
|
 |
fcdfbd |
+ int orig_exec_status = *status;
|
|
 |
fcdfbd |
+ const char *why = NULL;
|
|
 |
fcdfbd |
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
|
|
 |
fcdfbd |
const char *key = get_op_key(xml_op);
|
|
 |
fcdfbd |
const char *exit_reason = crm_element_value(xml_op,
|
|
 |
fcdfbd |
XML_LRM_ATTR_EXIT_REASON);
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_none)) {
|
|
 |
fcdfbd |
- int remapped_rc = pcmk__effective_rc(*rc);
|
|
 |
fcdfbd |
-
|
|
 |
fcdfbd |
- if (*rc != remapped_rc) {
|
|
 |
fcdfbd |
- crm_trace("Remapping monitor result %d to %d", *rc, remapped_rc);
|
|
 |
fcdfbd |
+ // Remap degraded results to their usual counterparts
|
|
 |
fcdfbd |
+ *rc = pcmk__effective_rc(*rc);
|
|
 |
fcdfbd |
+ if (*rc != orig_exit_status) {
|
|
 |
fcdfbd |
+ why = "degraded monitor result";
|
|
 |
fcdfbd |
if (!node->details->shutdown || node->details->online) {
|
|
 |
fcdfbd |
record_failed_op(xml_op, node, rsc, data_set);
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
-
|
|
 |
fcdfbd |
- *rc = remapped_rc;
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
if (!pe_rsc_is_bundled(rsc) && pcmk_xe_mask_probe_failure(xml_op)) {
|
|
 |
fcdfbd |
- *status = PCMK_EXEC_DONE;
|
|
 |
fcdfbd |
- *rc = PCMK_OCF_NOT_RUNNING;
|
|
 |
fcdfbd |
+ if ((*status != PCMK_EXEC_DONE) || (*rc != PCMK_OCF_NOT_RUNNING)) {
|
|
 |
fcdfbd |
+ *status = PCMK_EXEC_DONE;
|
|
 |
fcdfbd |
+ *rc = PCMK_OCF_NOT_RUNNING;
|
|
 |
fcdfbd |
+ why = "irrelevant probe result";
|
|
 |
fcdfbd |
+ }
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
/* If the executor reported an operation status of anything but done or
|
|
 |
fcdfbd |
@@ -3423,22 +3446,19 @@ remap_operation(xmlNode *xml_op, pe_resource_t *rsc, pe_node_t *node,
|
|
 |
fcdfbd |
* it should be treated as a failure or not, because we know the expected
|
|
 |
fcdfbd |
* result.
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
- if (*status != PCMK_EXEC_DONE && *status != PCMK_EXEC_ERROR) {
|
|
 |
fcdfbd |
- return;
|
|
 |
fcdfbd |
+ switch (*status) {
|
|
 |
fcdfbd |
+ case PCMK_EXEC_DONE:
|
|
 |
fcdfbd |
+ case PCMK_EXEC_ERROR:
|
|
 |
fcdfbd |
+ break;
|
|
 |
fcdfbd |
+ default:
|
|
 |
fcdfbd |
+ goto remap_done;
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- CRM_ASSERT(rsc);
|
|
 |
fcdfbd |
- CRM_CHECK(task != NULL,
|
|
 |
fcdfbd |
- *status = PCMK_EXEC_ERROR; return);
|
|
 |
fcdfbd |
-
|
|
 |
fcdfbd |
- *status = PCMK_EXEC_DONE;
|
|
 |
fcdfbd |
-
|
|
 |
fcdfbd |
if (exit_reason == NULL) {
|
|
 |
fcdfbd |
exit_reason = "";
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
is_probe = pcmk_xe_is_probe(xml_op);
|
|
 |
fcdfbd |
-
|
|
 |
fcdfbd |
if (is_probe) {
|
|
 |
fcdfbd |
task = "probe";
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
@@ -3452,12 +3472,15 @@ remap_operation(xmlNode *xml_op, pe_resource_t *rsc, pe_node_t *node,
|
|
 |
fcdfbd |
* those versions or processing of saved CIB files from those versions,
|
|
 |
fcdfbd |
* so we do not need to care much about this case.
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
- *status = PCMK_EXEC_ERROR;
|
|
 |
fcdfbd |
+ remap_because(status, &why, PCMK_EXEC_ERROR, "obsolete history format");
|
|
 |
fcdfbd |
crm_warn("Expected result not found for %s on %s (corrupt or obsolete CIB?)",
|
|
 |
fcdfbd |
key, pe__node_name(node));
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- } else if (target_rc != *rc) {
|
|
 |
fcdfbd |
- *status = PCMK_EXEC_ERROR;
|
|
 |
fcdfbd |
+ } else if (*rc == target_rc) {
|
|
 |
fcdfbd |
+ remap_because(status, &why, PCMK_EXEC_DONE, "expected result");
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
+ } else {
|
|
 |
fcdfbd |
+ remap_because(status, &why, PCMK_EXEC_ERROR, "unexpected result");
|
|
 |
fcdfbd |
pe_rsc_debug(rsc, "%s on %s: expected %d (%s), got %d (%s%s%s)",
|
|
 |
fcdfbd |
key, pe__node_name(node),
|
|
 |
fcdfbd |
target_rc, services_ocf_exitcode_str(target_rc),
|
|
 |
fcdfbd |
@@ -3468,7 +3491,7 @@ remap_operation(xmlNode *xml_op, pe_resource_t *rsc, pe_node_t *node,
|
|
 |
fcdfbd |
switch (*rc) {
|
|
 |
fcdfbd |
case PCMK_OCF_OK:
|
|
 |
fcdfbd |
if (is_probe && (target_rc == PCMK_OCF_NOT_RUNNING)) {
|
|
 |
fcdfbd |
- *status = PCMK_EXEC_DONE;
|
|
 |
fcdfbd |
+ remap_because(status, &why,PCMK_EXEC_DONE, "probe");
|
|
 |
fcdfbd |
pe_rsc_info(rsc, "Probe found %s active on %s at %s",
|
|
 |
fcdfbd |
rsc->id, pe__node_name(node),
|
|
 |
fcdfbd |
last_change_str(xml_op));
|
|
 |
fcdfbd |
@@ -3479,7 +3502,7 @@ remap_operation(xmlNode *xml_op, pe_resource_t *rsc, pe_node_t *node,
|
|
 |
fcdfbd |
if (is_probe || (target_rc == *rc)
|
|
 |
fcdfbd |
|| !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- *status = PCMK_EXEC_DONE;
|
|
 |
fcdfbd |
+ remap_because(status, &why, PCMK_EXEC_DONE, "exit status");
|
|
 |
fcdfbd |
rsc->role = RSC_ROLE_STOPPED;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
/* clear any previous failure actions */
|
|
 |
fcdfbd |
@@ -3490,7 +3513,7 @@ remap_operation(xmlNode *xml_op, pe_resource_t *rsc, pe_node_t *node,
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
case PCMK_OCF_RUNNING_PROMOTED:
|
|
 |
fcdfbd |
if (is_probe && (*rc != target_rc)) {
|
|
 |
fcdfbd |
- *status = PCMK_EXEC_DONE;
|
|
 |
fcdfbd |
+ remap_because(status, &why, PCMK_EXEC_DONE, "probe");
|
|
 |
fcdfbd |
pe_rsc_info(rsc,
|
|
 |
fcdfbd |
"Probe found %s active and promoted on %s at %s",
|
|
 |
fcdfbd |
rsc->id, pe__node_name(node),
|
|
 |
fcdfbd |
@@ -3502,11 +3525,11 @@ remap_operation(xmlNode *xml_op, pe_resource_t *rsc, pe_node_t *node,
|
|
 |
fcdfbd |
case PCMK_OCF_DEGRADED_PROMOTED:
|
|
 |
fcdfbd |
case PCMK_OCF_FAILED_PROMOTED:
|
|
 |
fcdfbd |
rsc->role = RSC_ROLE_PROMOTED;
|
|
 |
fcdfbd |
- *status = PCMK_EXEC_ERROR;
|
|
 |
fcdfbd |
+ remap_because(status, &why, PCMK_EXEC_ERROR, "exit status");
|
|
 |
fcdfbd |
break;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
case PCMK_OCF_NOT_CONFIGURED:
|
|
 |
fcdfbd |
- *status = PCMK_EXEC_ERROR_FATAL;
|
|
 |
fcdfbd |
+ remap_because(status, &why, PCMK_EXEC_ERROR_FATAL, "exit status");
|
|
 |
fcdfbd |
break;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
case PCMK_OCF_UNIMPLEMENT_FEATURE:
|
|
 |
fcdfbd |
@@ -3517,9 +3540,11 @@ remap_operation(xmlNode *xml_op, pe_resource_t *rsc, pe_node_t *node,
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
if (interval_ms == 0) {
|
|
 |
fcdfbd |
check_recoverable(rsc, node, task, *rc, xml_op);
|
|
 |
fcdfbd |
- *status = PCMK_EXEC_ERROR_HARD;
|
|
 |
fcdfbd |
+ remap_because(status, &why, PCMK_EXEC_ERROR_HARD,
|
|
 |
fcdfbd |
+ "exit status");
|
|
 |
fcdfbd |
} else {
|
|
 |
fcdfbd |
- *status = PCMK_EXEC_NOT_SUPPORTED;
|
|
 |
fcdfbd |
+ remap_because(status, &why, PCMK_EXEC_NOT_SUPPORTED,
|
|
 |
fcdfbd |
+ "exit status");
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
break;
|
|
 |
fcdfbd |
@@ -3528,7 +3553,7 @@ remap_operation(xmlNode *xml_op, pe_resource_t *rsc, pe_node_t *node,
|
|
 |
fcdfbd |
case PCMK_OCF_INVALID_PARAM:
|
|
 |
fcdfbd |
case PCMK_OCF_INSUFFICIENT_PRIV:
|
|
 |
fcdfbd |
check_recoverable(rsc, node, task, *rc, xml_op);
|
|
 |
fcdfbd |
- *status = PCMK_EXEC_ERROR_HARD;
|
|
 |
fcdfbd |
+ remap_because(status, &why, PCMK_EXEC_ERROR_HARD, "exit status");
|
|
 |
fcdfbd |
break;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
default:
|
|
 |
fcdfbd |
@@ -3537,13 +3562,21 @@ remap_operation(xmlNode *xml_op, pe_resource_t *rsc, pe_node_t *node,
|
|
 |
fcdfbd |
"on %s at %s as failure",
|
|
 |
fcdfbd |
*rc, task, rsc->id, pe__node_name(node),
|
|
 |
fcdfbd |
last_change_str(xml_op));
|
|
 |
fcdfbd |
- *status = PCMK_EXEC_ERROR;
|
|
 |
fcdfbd |
+ remap_because(status, &why, PCMK_EXEC_ERROR,
|
|
 |
fcdfbd |
+ "unknown exit status");
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
break;
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- pe_rsc_trace(rsc, "Remapped %s status to '%s'",
|
|
 |
fcdfbd |
- key, pcmk_exec_status_str(*status));
|
|
 |
fcdfbd |
+remap_done:
|
|
 |
fcdfbd |
+ if (why != NULL) {
|
|
 |
fcdfbd |
+ pe_rsc_trace(rsc,
|
|
 |
fcdfbd |
+ "Remapped %s result from [%s: %s] to [%s: %s] "
|
|
 |
fcdfbd |
+ "because of %s",
|
|
 |
fcdfbd |
+ key, pcmk_exec_status_str(orig_exec_status),
|
|
 |
fcdfbd |
+ crm_exit_str(orig_exit_status),
|
|
 |
fcdfbd |
+ pcmk_exec_status_str(*status), crm_exit_str(*rc), why);
|
|
 |
fcdfbd |
+ }
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
// return TRUE if start or monitor last failure but parameters changed
|
|
 |
fcdfbd |
@@ -3947,9 +3980,9 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
parent = uber_parent(rsc);
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- pe_rsc_trace(rsc, "Unpacking task %s/%s (call_id=%d, status=%d, rc=%d) on %s (role=%s)",
|
|
 |
fcdfbd |
- task_key, task, task_id, status, rc, pe__node_name(node),
|
|
 |
fcdfbd |
- role2text(rsc->role));
|
|
 |
fcdfbd |
+ pe_rsc_trace(rsc, "Unpacking %s (%s call %d on %s): %s (%s)",
|
|
 |
fcdfbd |
+ ID(xml_op), task, task_id, pe__node_name(node),
|
|
 |
fcdfbd |
+ pcmk_exec_status_str(status), crm_exit_str(rc));
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
if (node->details->unclean) {
|
|
 |
fcdfbd |
pe_rsc_trace(rsc,
|
|
 |
fcdfbd |
@@ -4077,9 +4110,6 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
goto done;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
case PCMK_EXEC_DONE:
|
|
 |
fcdfbd |
- pe_rsc_trace(rsc, "%s of %s on %s completed at %s " CRM_XS " id=%s",
|
|
 |
fcdfbd |
- task, rsc->id, pe__node_name(node),
|
|
 |
fcdfbd |
- last_change_str(xml_op), ID(xml_op));
|
|
 |
fcdfbd |
update_resource_state(rsc, node, xml_op, task, rc, *last_failure, on_fail, data_set);
|
|
 |
fcdfbd |
goto done;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
@@ -4175,9 +4205,9 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
done:
|
|
 |
fcdfbd |
- pe_rsc_trace(rsc, "Resource %s after %s: role=%s, next=%s",
|
|
 |
fcdfbd |
- rsc->id, task, role2text(rsc->role),
|
|
 |
fcdfbd |
- role2text(rsc->next_role));
|
|
 |
fcdfbd |
+ pe_rsc_trace(rsc, "%s role on %s after %s is %s (next %s)",
|
|
 |
fcdfbd |
+ rsc->id, pe__node_name(node), ID(xml_op),
|
|
 |
fcdfbd |
+ role2text(rsc->role), role2text(rsc->next_role));
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
static void
|
|
 |
fcdfbd |
--
|
|
 |
fcdfbd |
2.31.1
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
From 5a1d2a3ba58fa73225433dab40cee0a6e0ef9bda Mon Sep 17 00:00:00 2001
|
|
 |
fcdfbd |
From: Ken Gaillot <kgaillot@redhat.com>
|
|
 |
fcdfbd |
Date: Wed, 1 Feb 2023 12:08:55 -0600
|
|
 |
fcdfbd |
Subject: [PATCH 03/14] Low: scheduler: improve migration history validation
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
Instead of a simple CRM_CHECK(), functionize parsing the source and target node
|
|
 |
fcdfbd |
names from a migration action's resource history entry. This reduces
|
|
 |
fcdfbd |
duplication and allows us to log more helpful errors.
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
Also, CRM_CHECK() tries to dump core for debugging, and that's not helpful for
|
|
 |
fcdfbd |
corrupted CIB entries.
|
|
 |
fcdfbd |
---
|
|
 |
fcdfbd |
lib/pengine/unpack.c | 87 ++++++++++++++++++++++++++++++++++++++------
|
|
 |
fcdfbd |
1 file changed, 75 insertions(+), 12 deletions(-)
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
index b7b2873..cd1b038 100644
|
|
 |
fcdfbd |
--- a/lib/pengine/unpack.c
|
|
 |
fcdfbd |
+++ b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
@@ -2786,6 +2786,60 @@ newer_state_after_migrate(const char *rsc_id, const char *node_name,
|
|
 |
fcdfbd |
|| monitor_not_running_after(rsc_id, node_name, xml_op, same_node,
|
|
 |
fcdfbd |
data_set);
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
+/*!
|
|
 |
fcdfbd |
+ * \internal
|
|
 |
fcdfbd |
+ * \brief Parse migration source and target node names from history entry
|
|
 |
fcdfbd |
+ *
|
|
 |
fcdfbd |
+ * \param[in] entry Resource history entry for a migration action
|
|
 |
fcdfbd |
+ * \param[in] source_node If not NULL, source must match this node
|
|
 |
fcdfbd |
+ * \param[in] target_node If not NULL, target must match this node
|
|
 |
fcdfbd |
+ * \param[out] source_name Where to store migration source node name
|
|
 |
fcdfbd |
+ * \param[out] target_name Where to store migration target node name
|
|
 |
fcdfbd |
+ *
|
|
 |
fcdfbd |
+ * \return Standard Pacemaker return code
|
|
 |
fcdfbd |
+ */
|
|
 |
fcdfbd |
+static int
|
|
 |
fcdfbd |
+get_migration_node_names(const xmlNode *entry, const pe_node_t *source_node,
|
|
 |
fcdfbd |
+ const pe_node_t *target_node,
|
|
 |
fcdfbd |
+ const char **source_name, const char **target_name)
|
|
 |
fcdfbd |
+{
|
|
 |
fcdfbd |
+ const char *id = ID(entry);
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
+ if (id == NULL) {
|
|
 |
fcdfbd |
+ crm_err("Ignoring resource history entry without ID");
|
|
 |
fcdfbd |
+ return pcmk_rc_unpack_error;
|
|
 |
fcdfbd |
+ }
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
+ *source_name = crm_element_value(entry, XML_LRM_ATTR_MIGRATE_SOURCE);
|
|
 |
fcdfbd |
+ *target_name = crm_element_value(entry, XML_LRM_ATTR_MIGRATE_TARGET);
|
|
 |
fcdfbd |
+ if ((*source_name == NULL) || (*target_name == NULL)) {
|
|
 |
fcdfbd |
+ crm_err("Ignoring resource history entry %s without "
|
|
 |
fcdfbd |
+ XML_LRM_ATTR_MIGRATE_SOURCE " and " XML_LRM_ATTR_MIGRATE_TARGET,
|
|
 |
fcdfbd |
+ id);
|
|
 |
fcdfbd |
+ return pcmk_rc_unpack_error;
|
|
 |
fcdfbd |
+ }
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
+ if ((source_node != NULL)
|
|
 |
fcdfbd |
+ && !pcmk__str_eq(*source_name, source_node->details->uname,
|
|
 |
fcdfbd |
+ pcmk__str_casei|pcmk__str_null_matches)) {
|
|
 |
fcdfbd |
+ crm_err("Ignoring resource history entry %s because "
|
|
 |
fcdfbd |
+ XML_LRM_ATTR_MIGRATE_SOURCE "='%s' does not match %s",
|
|
 |
fcdfbd |
+ id, pcmk__s(*source_name, ""), pe__node_name(source_node));
|
|
 |
fcdfbd |
+ return pcmk_rc_unpack_error;
|
|
 |
fcdfbd |
+ }
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
+ if ((target_node != NULL)
|
|
 |
fcdfbd |
+ && !pcmk__str_eq(*target_name, target_node->details->uname,
|
|
 |
fcdfbd |
+ pcmk__str_casei|pcmk__str_null_matches)) {
|
|
 |
fcdfbd |
+ crm_err("Ignoring resource history entry %s because "
|
|
 |
fcdfbd |
+ XML_LRM_ATTR_MIGRATE_TARGET "='%s' does not match %s",
|
|
 |
fcdfbd |
+ id, pcmk__s(*target_name, ""), pe__node_name(target_node));
|
|
 |
fcdfbd |
+ return pcmk_rc_unpack_error;
|
|
 |
fcdfbd |
+ }
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
+ return pcmk_rc_ok;
|
|
 |
fcdfbd |
+}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
static void
|
|
 |
fcdfbd |
unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
@@ -2834,13 +2888,16 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
pe_node_t *target_node = NULL;
|
|
 |
fcdfbd |
pe_node_t *source_node = NULL;
|
|
 |
fcdfbd |
xmlNode *migrate_from = NULL;
|
|
 |
fcdfbd |
- const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
|
|
 |
fcdfbd |
- const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
|
|
 |
fcdfbd |
+ const char *source = NULL;
|
|
 |
fcdfbd |
+ const char *target = NULL;
|
|
 |
fcdfbd |
bool source_newer_op = false;
|
|
 |
fcdfbd |
bool target_newer_state = false;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- // Sanity check
|
|
 |
fcdfbd |
- CRM_CHECK(source && target && !strcmp(source, node->details->uname), return);
|
|
 |
fcdfbd |
+ // Get source and target node names from XML
|
|
 |
fcdfbd |
+ if (get_migration_node_names(xml_op, node, NULL, &source,
|
|
 |
fcdfbd |
+ &target) != pcmk_rc_ok) {
|
|
 |
fcdfbd |
+ return;
|
|
 |
fcdfbd |
+ }
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
/* If there's any newer non-monitor operation on the source, this migrate_to
|
|
 |
fcdfbd |
* potentially no longer matters for the source.
|
|
 |
fcdfbd |
@@ -2949,11 +3006,14 @@ unpack_migrate_to_failure(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
pe_working_set_t *data_set)
|
|
 |
fcdfbd |
{
|
|
 |
fcdfbd |
xmlNode *target_migrate_from = NULL;
|
|
 |
fcdfbd |
- const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
|
|
 |
fcdfbd |
- const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
|
|
 |
fcdfbd |
+ const char *source = NULL;
|
|
 |
fcdfbd |
+ const char *target = NULL;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- // Sanity check
|
|
 |
fcdfbd |
- CRM_CHECK(source && target && !strcmp(source, node->details->uname), return);
|
|
 |
fcdfbd |
+ // Get source and target node names from XML
|
|
 |
fcdfbd |
+ if (get_migration_node_names(xml_op, node, NULL, &source,
|
|
 |
fcdfbd |
+ &target) != pcmk_rc_ok) {
|
|
 |
fcdfbd |
+ return;
|
|
 |
fcdfbd |
+ }
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
/* If a migration failed, we have to assume the resource is active. Clones
|
|
 |
fcdfbd |
* are not allowed to migrate, so role can't be promoted.
|
|
 |
fcdfbd |
@@ -3001,11 +3061,14 @@ unpack_migrate_from_failure(pe_resource_t *rsc, pe_node_t *node,
|
|
 |
fcdfbd |
xmlNode *xml_op, pe_working_set_t *data_set)
|
|
 |
fcdfbd |
{
|
|
 |
fcdfbd |
xmlNode *source_migrate_to = NULL;
|
|
 |
fcdfbd |
- const char *source = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_SOURCE);
|
|
 |
fcdfbd |
- const char *target = crm_element_value(xml_op, XML_LRM_ATTR_MIGRATE_TARGET);
|
|
 |
fcdfbd |
+ const char *source = NULL;
|
|
 |
fcdfbd |
+ const char *target = NULL;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- // Sanity check
|
|
 |
fcdfbd |
- CRM_CHECK(source && target && !strcmp(target, node->details->uname), return);
|
|
 |
fcdfbd |
+ // Get source and target node names from XML
|
|
 |
fcdfbd |
+ if (get_migration_node_names(xml_op, NULL, node, &source,
|
|
 |
fcdfbd |
+ &target) != pcmk_rc_ok) {
|
|
 |
fcdfbd |
+ return;
|
|
 |
fcdfbd |
+ }
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
/* If a migration failed, we have to assume the resource is active. Clones
|
|
 |
fcdfbd |
* are not allowed to migrate, so role can't be promoted.
|
|
 |
fcdfbd |
--
|
|
 |
fcdfbd |
2.31.1
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
From 5139e5369769e733b05bc28940d3dccb4f7fca95 Mon Sep 17 00:00:00 2001
|
|
 |
fcdfbd |
From: Ken Gaillot <kgaillot@redhat.com>
|
|
 |
fcdfbd |
Date: Tue, 31 Jan 2023 14:30:16 -0600
|
|
 |
fcdfbd |
Subject: [PATCH 04/14] Refactor: scheduler: functionize adding a dangling
|
|
 |
fcdfbd |
migration
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
... for code isolation and readability
|
|
 |
fcdfbd |
---
|
|
 |
fcdfbd |
lib/pengine/unpack.c | 31 +++++++++++++++++++++++--------
|
|
 |
fcdfbd |
1 file changed, 23 insertions(+), 8 deletions(-)
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
index cd1b038..fa7c2cc 100644
|
|
 |
fcdfbd |
--- a/lib/pengine/unpack.c
|
|
 |
fcdfbd |
+++ b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
@@ -2841,6 +2841,28 @@ get_migration_node_names(const xmlNode *entry, const pe_node_t *source_node,
|
|
 |
fcdfbd |
return pcmk_rc_ok;
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
+/*
|
|
 |
fcdfbd |
+ * \internal
|
|
 |
fcdfbd |
+ * \brief Add a migration source to a resource's list of dangling migrations
|
|
 |
fcdfbd |
+ *
|
|
 |
fcdfbd |
+ * If the migrate_to and migrate_from actions in a live migration both
|
|
 |
fcdfbd |
+ * succeeded, but there is no stop on the source, the migration is considered
|
|
 |
fcdfbd |
+ * "dangling." Add the source to the resource's dangling migration list, which
|
|
 |
fcdfbd |
+ * will be used to schedule a stop on the source without affecting the target.
|
|
 |
fcdfbd |
+ *
|
|
 |
fcdfbd |
+ * \param[in,out] rsc Resource involved in migration
|
|
 |
fcdfbd |
+ * \param[in] node Migration source
|
|
 |
fcdfbd |
+ */
|
|
 |
fcdfbd |
+static void
|
|
 |
fcdfbd |
+add_dangling_migration(pe_resource_t *rsc, const pe_node_t *node)
|
|
 |
fcdfbd |
+{
|
|
 |
fcdfbd |
+ pe_rsc_trace(rsc, "Dangling migration of %s requires stop on %s",
|
|
 |
fcdfbd |
+ rsc->id, pe__node_name(node));
|
|
 |
fcdfbd |
+ rsc->role = RSC_ROLE_STOPPED;
|
|
 |
fcdfbd |
+ rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations,
|
|
 |
fcdfbd |
+ (gpointer) node);
|
|
 |
fcdfbd |
+}
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
static void
|
|
 |
fcdfbd |
unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
pe_working_set_t *data_set)
|
|
 |
fcdfbd |
@@ -2941,14 +2963,7 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
if (migrate_from && from_rc == PCMK_OCF_OK
|
|
 |
fcdfbd |
&& (from_status == PCMK_EXEC_DONE)) {
|
|
 |
fcdfbd |
- /* The migrate_to and migrate_from both succeeded, so mark the migration
|
|
 |
fcdfbd |
- * as "dangling". This will be used to schedule a stop action on the
|
|
 |
fcdfbd |
- * source without affecting the target.
|
|
 |
fcdfbd |
- */
|
|
 |
fcdfbd |
- pe_rsc_trace(rsc, "Detected dangling migration op: %s on %s", ID(xml_op),
|
|
 |
fcdfbd |
- source);
|
|
 |
fcdfbd |
- rsc->role = RSC_ROLE_STOPPED;
|
|
 |
fcdfbd |
- rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations, node);
|
|
 |
fcdfbd |
+ add_dangling_migration(rsc, node);
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
} else if (migrate_from && (from_status != PCMK_EXEC_PENDING)) { // Failed
|
|
 |
fcdfbd |
/* If the resource has newer state on the target, this migrate_to no
|
|
 |
fcdfbd |
--
|
|
 |
fcdfbd |
2.31.1
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
From da71c04463d31338dd5da54d1d48b53e413716dc Mon Sep 17 00:00:00 2001
|
|
 |
fcdfbd |
From: Ken Gaillot <kgaillot@redhat.com>
|
|
 |
fcdfbd |
Date: Tue, 31 Jan 2023 16:57:55 -0600
|
|
 |
fcdfbd |
Subject: [PATCH 05/14] Refactor: scheduler: check for dangling migration
|
|
 |
fcdfbd |
before setting role
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
Previously, unpack_migrate_to_success() set rsc->role = RSC_ROLE_STARTED
|
|
 |
fcdfbd |
then checked for dangling migration, which would reset it to RSC_ROLE_STOPPED.
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
For clarity, do the dangling migration check first.
|
|
 |
fcdfbd |
---
|
|
 |
fcdfbd |
lib/pengine/unpack.c | 47 ++++++++++++++++++++++++--------------------
|
|
 |
fcdfbd |
1 file changed, 26 insertions(+), 21 deletions(-)
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
index fa7c2cc..b858b59 100644
|
|
 |
fcdfbd |
--- a/lib/pengine/unpack.c
|
|
 |
fcdfbd |
+++ b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
@@ -2905,8 +2905,8 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
* migration is considered to be "dangling". Schedule a stop on the source
|
|
 |
fcdfbd |
* in this case.
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
- int from_rc = 0;
|
|
 |
fcdfbd |
- int from_status = 0;
|
|
 |
fcdfbd |
+ int from_rc = PCMK_OCF_OK;
|
|
 |
fcdfbd |
+ int from_status = PCMK_EXEC_PENDING;
|
|
 |
fcdfbd |
pe_node_t *target_node = NULL;
|
|
 |
fcdfbd |
pe_node_t *source_node = NULL;
|
|
 |
fcdfbd |
xmlNode *migrate_from = NULL;
|
|
 |
fcdfbd |
@@ -2930,12 +2930,17 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
// Check whether there was a migrate_from action on the target
|
|
 |
fcdfbd |
migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, target,
|
|
 |
fcdfbd |
source, -1, data_set);
|
|
 |
fcdfbd |
-
|
|
 |
fcdfbd |
- /* Even if there's a newer non-monitor operation on the source, we still
|
|
 |
fcdfbd |
- * need to check how this migrate_to might matter for the target.
|
|
 |
fcdfbd |
- */
|
|
 |
fcdfbd |
- if (source_newer_op && migrate_from) {
|
|
 |
fcdfbd |
- return;
|
|
 |
fcdfbd |
+ if (migrate_from != NULL) {
|
|
 |
fcdfbd |
+ if (source_newer_op) {
|
|
 |
fcdfbd |
+ /* There's a newer non-monitor operation on the source and a
|
|
 |
fcdfbd |
+ * migrate_from on the target, so this migrate_to is irrelevant to
|
|
 |
fcdfbd |
+ * the resource's state.
|
|
 |
fcdfbd |
+ */
|
|
 |
fcdfbd |
+ return;
|
|
 |
fcdfbd |
+ }
|
|
 |
fcdfbd |
+ crm_element_value_int(migrate_from, XML_LRM_ATTR_RC, &from_rc);
|
|
 |
fcdfbd |
+ crm_element_value_int(migrate_from, XML_LRM_ATTR_OPSTATUS,
|
|
 |
fcdfbd |
+ &from_status);
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
/* If the resource has newer state on the target after the migration
|
|
 |
fcdfbd |
@@ -2948,24 +2953,24 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
return;
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- // Clones are not allowed to migrate, so role can't be promoted
|
|
 |
fcdfbd |
+ /* Check for dangling migration (migrate_from succeeded but stop not done).
|
|
 |
fcdfbd |
+ * We know there's no stop because we already returned if the target has a
|
|
 |
fcdfbd |
+ * migrate_from and the source has any newer non-monitor operation.
|
|
 |
fcdfbd |
+ */
|
|
 |
fcdfbd |
+ if ((from_rc == PCMK_OCF_OK) && (from_status == PCMK_EXEC_DONE)) {
|
|
 |
fcdfbd |
+ add_dangling_migration(rsc, node);
|
|
 |
fcdfbd |
+ return;
|
|
 |
fcdfbd |
+ }
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
+ /* Without newer state, this migrate_to implies the resource is active.
|
|
 |
fcdfbd |
+ * (Clones are not allowed to migrate, so role can't be promoted.)
|
|
 |
fcdfbd |
+ */
|
|
 |
fcdfbd |
rsc->role = RSC_ROLE_STARTED;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
target_node = pe_find_node(data_set->nodes, target);
|
|
 |
fcdfbd |
source_node = pe_find_node(data_set->nodes, source);
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- if (migrate_from) {
|
|
 |
fcdfbd |
- crm_element_value_int(migrate_from, XML_LRM_ATTR_RC, &from_rc);
|
|
 |
fcdfbd |
- crm_element_value_int(migrate_from, XML_LRM_ATTR_OPSTATUS, &from_status);
|
|
 |
fcdfbd |
- pe_rsc_trace(rsc, "%s op on %s exited with status=%d, rc=%d",
|
|
 |
fcdfbd |
- ID(migrate_from), target, from_status, from_rc);
|
|
 |
fcdfbd |
- }
|
|
 |
fcdfbd |
-
|
|
 |
fcdfbd |
- if (migrate_from && from_rc == PCMK_OCF_OK
|
|
 |
fcdfbd |
- && (from_status == PCMK_EXEC_DONE)) {
|
|
 |
fcdfbd |
- add_dangling_migration(rsc, node);
|
|
 |
fcdfbd |
-
|
|
 |
fcdfbd |
- } else if (migrate_from && (from_status != PCMK_EXEC_PENDING)) { // Failed
|
|
 |
fcdfbd |
+ if (from_status != PCMK_EXEC_PENDING) { // migrate_from failed on target
|
|
 |
fcdfbd |
/* If the resource has newer state on the target, this migrate_to no
|
|
 |
fcdfbd |
* longer matters for the target.
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
--
|
|
 |
fcdfbd |
2.31.1
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
From d98a2687d68747b0598554939dea05c420456a12 Mon Sep 17 00:00:00 2001
|
|
 |
fcdfbd |
From: Ken Gaillot <kgaillot@redhat.com>
|
|
 |
fcdfbd |
Date: Tue, 31 Jan 2023 17:05:50 -0600
|
|
 |
fcdfbd |
Subject: [PATCH 06/14] Refactor: scheduler: avoid duplication of
|
|
 |
fcdfbd |
active-on-target check
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
---
|
|
 |
fcdfbd |
lib/pengine/unpack.c | 24 ++++++------------------
|
|
 |
fcdfbd |
1 file changed, 6 insertions(+), 18 deletions(-)
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
index b858b59..8cfc0ef 100644
|
|
 |
fcdfbd |
--- a/lib/pengine/unpack.c
|
|
 |
fcdfbd |
+++ b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
@@ -2914,6 +2914,7 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
const char *target = NULL;
|
|
 |
fcdfbd |
bool source_newer_op = false;
|
|
 |
fcdfbd |
bool target_newer_state = false;
|
|
 |
fcdfbd |
+ bool active_on_target = false;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
// Get source and target node names from XML
|
|
 |
fcdfbd |
if (get_migration_node_names(xml_op, node, NULL, &source,
|
|
 |
fcdfbd |
@@ -2969,23 +2970,14 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
target_node = pe_find_node(data_set->nodes, target);
|
|
 |
fcdfbd |
source_node = pe_find_node(data_set->nodes, source);
|
|
 |
fcdfbd |
+ active_on_target = !target_newer_state && (target_node != NULL)
|
|
 |
fcdfbd |
+ && target_node->details->online;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
if (from_status != PCMK_EXEC_PENDING) { // migrate_from failed on target
|
|
 |
fcdfbd |
- /* If the resource has newer state on the target, this migrate_to no
|
|
 |
fcdfbd |
- * longer matters for the target.
|
|
 |
fcdfbd |
- */
|
|
 |
fcdfbd |
- if (!target_newer_state
|
|
 |
fcdfbd |
- && target_node && target_node->details->online) {
|
|
 |
fcdfbd |
- pe_rsc_trace(rsc, "Marking active on %s %p %d", target, target_node,
|
|
 |
fcdfbd |
- target_node->details->online);
|
|
 |
fcdfbd |
+ if (active_on_target) {
|
|
 |
fcdfbd |
native_add_running(rsc, target_node, data_set, TRUE);
|
|
 |
fcdfbd |
-
|
|
 |
fcdfbd |
} else {
|
|
 |
fcdfbd |
- /* With the earlier bail logic, migrate_from != NULL here implies
|
|
 |
fcdfbd |
- * source_newer_op is false, meaning this migrate_to still matters
|
|
 |
fcdfbd |
- * for the source.
|
|
 |
fcdfbd |
- * Consider it failed here - forces a restart, prevents migration
|
|
 |
fcdfbd |
- */
|
|
 |
fcdfbd |
+ // Mark resource as failed, require recovery, and prevent migration
|
|
 |
fcdfbd |
pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
|
|
 |
fcdfbd |
pe__clear_resource_flags(rsc, pe_rsc_allow_migrate);
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
@@ -2994,11 +2986,7 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
/* If the resource has newer state on the target, this migrate_to no
|
|
 |
fcdfbd |
* longer matters for the target.
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
- if (!target_newer_state
|
|
 |
fcdfbd |
- && target_node && target_node->details->online) {
|
|
 |
fcdfbd |
- pe_rsc_trace(rsc, "Marking active on %s %p %d", target, target_node,
|
|
 |
fcdfbd |
- target_node->details->online);
|
|
 |
fcdfbd |
-
|
|
 |
fcdfbd |
+ if (active_on_target) {
|
|
 |
fcdfbd |
native_add_running(rsc, target_node, data_set, FALSE);
|
|
 |
fcdfbd |
if (source_node && source_node->details->online) {
|
|
 |
fcdfbd |
/* This is a partial migration: the migrate_to completed
|
|
 |
fcdfbd |
--
|
|
 |
fcdfbd |
2.31.1
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
From ae145309e3fdb26608e99f6d1fe1a7859d98efd0 Mon Sep 17 00:00:00 2001
|
|
 |
fcdfbd |
From: Ken Gaillot <kgaillot@redhat.com>
|
|
 |
fcdfbd |
Date: Tue, 31 Jan 2023 17:07:58 -0600
|
|
 |
fcdfbd |
Subject: [PATCH 07/14] Refactor: scheduler: improve unpacking of successful
|
|
 |
fcdfbd |
migrate_to
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
Improve log messages, comments, and formatting, and avoid doing things until
|
|
 |
fcdfbd |
needed, to improve efficiency of early returns.
|
|
 |
fcdfbd |
---
|
|
 |
fcdfbd |
lib/pengine/unpack.c | 109 +++++++++++++++++++------------------------
|
|
 |
fcdfbd |
1 file changed, 48 insertions(+), 61 deletions(-)
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
index 8cfc0ef..224b7b5 100644
|
|
 |
fcdfbd |
--- a/lib/pengine/unpack.c
|
|
 |
fcdfbd |
+++ b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
@@ -2867,48 +2867,40 @@ static void
|
|
 |
fcdfbd |
unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
pe_working_set_t *data_set)
|
|
 |
fcdfbd |
{
|
|
 |
fcdfbd |
- /* A successful migration sequence is:
|
|
 |
fcdfbd |
- * migrate_to on source node
|
|
 |
fcdfbd |
- * migrate_from on target node
|
|
 |
fcdfbd |
- * stop on source node
|
|
 |
fcdfbd |
+ /* A complete migration sequence is:
|
|
 |
fcdfbd |
+ * 1. migrate_to on source node (which succeeded if we get to this function)
|
|
 |
fcdfbd |
+ * 2. migrate_from on target node
|
|
 |
fcdfbd |
+ * 3. stop on source node
|
|
 |
fcdfbd |
*
|
|
 |
fcdfbd |
- * But there could be scenarios like (It's easier to produce with cluster
|
|
 |
fcdfbd |
- * property batch-limit=1):
|
|
 |
fcdfbd |
- *
|
|
 |
fcdfbd |
- * - rscA is live-migrating from node1 to node2.
|
|
 |
fcdfbd |
- *
|
|
 |
fcdfbd |
- * - Before migrate_to on node1 returns, put node2 into standby.
|
|
 |
fcdfbd |
- *
|
|
 |
fcdfbd |
- * - Transition aborts upon return of successful migrate_to on node1. New
|
|
 |
fcdfbd |
- * transition is going to stop the rscA on both nodes and start it on
|
|
 |
fcdfbd |
- * node1.
|
|
 |
fcdfbd |
+ * If no migrate_from has happened, the migration is considered to be
|
|
 |
fcdfbd |
+ * "partial". If the migrate_from succeeded but no stop has happened, the
|
|
 |
fcdfbd |
+ * migration is considered to be "dangling".
|
|
 |
fcdfbd |
*
|
|
 |
fcdfbd |
- * - While it is stopping on node1, run something that is going to make
|
|
 |
fcdfbd |
- * the transition abort again like:
|
|
 |
fcdfbd |
- * crm_resource --resource rscA --ban --node node2
|
|
 |
fcdfbd |
+ * If a successful migrate_to and stop have happened on the source node, we
|
|
 |
fcdfbd |
+ * still need to check for a partial migration, due to scenarios (easier to
|
|
 |
fcdfbd |
+ * produce with batch-limit=1) like:
|
|
 |
fcdfbd |
*
|
|
 |
fcdfbd |
- * - Transition aborts upon return of stop on node1.
|
|
 |
fcdfbd |
+ * - A resource is migrating from node1 to node2, and a migrate_to is
|
|
 |
fcdfbd |
+ * initiated for it on node1.
|
|
 |
fcdfbd |
*
|
|
 |
fcdfbd |
- * Now although there's a stop on node1, it's still a partial migration and
|
|
 |
fcdfbd |
- * rscA is still potentially active on node2.
|
|
 |
fcdfbd |
+ * - node2 goes into standby mode while the migrate_to is pending, which
|
|
 |
fcdfbd |
+ * aborts the transition.
|
|
 |
fcdfbd |
*
|
|
 |
fcdfbd |
- * So even if a migrate_to is followed by a stop, we still need to check
|
|
 |
fcdfbd |
- * whether there's a corresponding migrate_from or any newer operation on
|
|
 |
fcdfbd |
- * the target.
|
|
 |
fcdfbd |
+ * - Upon completion of the migrate_to, a new transition schedules a stop
|
|
 |
fcdfbd |
+ * on both nodes and a start on node1.
|
|
 |
fcdfbd |
*
|
|
 |
fcdfbd |
- * If no migrate_from has happened, the migration is considered to be
|
|
 |
fcdfbd |
- * "partial". If the migrate_from failed, make sure the resource gets
|
|
 |
fcdfbd |
- * stopped on both source and target (if up).
|
|
 |
fcdfbd |
+ * - If the new transition is aborted for any reason while the resource is
|
|
 |
fcdfbd |
+ * stopping on node1, the transition after that stop completes will see
|
|
 |
fcdfbd |
+ * the migrate_from and stop on the source, but it's still a partial
|
|
 |
fcdfbd |
+ * migration, and the resource must be stopped on node2 because it is
|
|
 |
fcdfbd |
+ * potentially active there due to the migrate_to.
|
|
 |
fcdfbd |
*
|
|
 |
fcdfbd |
- * If the migrate_to and migrate_from both succeeded (which also implies the
|
|
 |
fcdfbd |
- * resource is no longer running on the source), but there is no stop, the
|
|
 |
fcdfbd |
- * migration is considered to be "dangling". Schedule a stop on the source
|
|
 |
fcdfbd |
- * in this case.
|
|
 |
fcdfbd |
+ * We also need to take into account that either node's history may be
|
|
 |
fcdfbd |
+ * cleared at any point in the migration process.
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
int from_rc = PCMK_OCF_OK;
|
|
 |
fcdfbd |
int from_status = PCMK_EXEC_PENDING;
|
|
 |
fcdfbd |
pe_node_t *target_node = NULL;
|
|
 |
fcdfbd |
- pe_node_t *source_node = NULL;
|
|
 |
fcdfbd |
xmlNode *migrate_from = NULL;
|
|
 |
fcdfbd |
const char *source = NULL;
|
|
 |
fcdfbd |
const char *target = NULL;
|
|
 |
fcdfbd |
@@ -2922,13 +2914,11 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
return;
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- /* If there's any newer non-monitor operation on the source, this migrate_to
|
|
 |
fcdfbd |
- * potentially no longer matters for the source.
|
|
 |
fcdfbd |
- */
|
|
 |
fcdfbd |
+ // Check for newer state on the source
|
|
 |
fcdfbd |
source_newer_op = non_monitor_after(rsc->id, source, xml_op, true,
|
|
 |
fcdfbd |
data_set);
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- // Check whether there was a migrate_from action on the target
|
|
 |
fcdfbd |
+ // Check for a migrate_from action from this source on the target
|
|
 |
fcdfbd |
migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, target,
|
|
 |
fcdfbd |
source, -1, data_set);
|
|
 |
fcdfbd |
if (migrate_from != NULL) {
|
|
 |
fcdfbd |
@@ -2944,12 +2934,11 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
&from_status);
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- /* If the resource has newer state on the target after the migration
|
|
 |
fcdfbd |
- * events, this migrate_to no longer matters for the target.
|
|
 |
fcdfbd |
+ /* If the resource has newer state on both the source and target after the
|
|
 |
fcdfbd |
+ * migration events, this migrate_to is irrelevant to the resource's state.
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
target_newer_state = newer_state_after_migrate(rsc->id, target, xml_op,
|
|
 |
fcdfbd |
migrate_from, data_set);
|
|
 |
fcdfbd |
-
|
|
 |
fcdfbd |
if (source_newer_op && target_newer_state) {
|
|
 |
fcdfbd |
return;
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
@@ -2969,7 +2958,6 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
rsc->role = RSC_ROLE_STARTED;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
target_node = pe_find_node(data_set->nodes, target);
|
|
 |
fcdfbd |
- source_node = pe_find_node(data_set->nodes, source);
|
|
 |
fcdfbd |
active_on_target = !target_newer_state && (target_node != NULL)
|
|
 |
fcdfbd |
&& target_node->details->online;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
@@ -2981,31 +2969,30 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
|
|
 |
fcdfbd |
pe__clear_resource_flags(rsc, pe_rsc_allow_migrate);
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
+ return;
|
|
 |
fcdfbd |
+ }
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- } else { // Pending, or complete but erased
|
|
 |
fcdfbd |
- /* If the resource has newer state on the target, this migrate_to no
|
|
 |
fcdfbd |
- * longer matters for the target.
|
|
 |
fcdfbd |
- */
|
|
 |
fcdfbd |
- if (active_on_target) {
|
|
 |
fcdfbd |
- native_add_running(rsc, target_node, data_set, FALSE);
|
|
 |
fcdfbd |
- if (source_node && source_node->details->online) {
|
|
 |
fcdfbd |
- /* This is a partial migration: the migrate_to completed
|
|
 |
fcdfbd |
- * successfully on the source, but the migrate_from has not
|
|
 |
fcdfbd |
- * completed. Remember the source and target; if the newly
|
|
 |
fcdfbd |
- * chosen target remains the same when we schedule actions
|
|
 |
fcdfbd |
- * later, we may continue with the migration.
|
|
 |
fcdfbd |
- */
|
|
 |
fcdfbd |
- rsc->partial_migration_target = target_node;
|
|
 |
fcdfbd |
- rsc->partial_migration_source = source_node;
|
|
 |
fcdfbd |
- }
|
|
 |
fcdfbd |
- } else if (!source_newer_op) {
|
|
 |
fcdfbd |
- /* This migrate_to matters for the source only if it's the last
|
|
 |
fcdfbd |
- * non-monitor operation here.
|
|
 |
fcdfbd |
- * Consider it failed here - forces a restart, prevents migration
|
|
 |
fcdfbd |
+ // The migrate_from is pending, complete but erased, or to be scheduled
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
+ if (active_on_target) {
|
|
 |
fcdfbd |
+ pe_node_t *source_node = pe_find_node(data_set->nodes, source);
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
+ native_add_running(rsc, target_node, data_set, FALSE);
|
|
 |
fcdfbd |
+ if ((source_node != NULL) && source_node->details->online) {
|
|
 |
fcdfbd |
+ /* This is a partial migration: the migrate_to completed
|
|
 |
fcdfbd |
+ * successfully on the source, but the migrate_from has not
|
|
 |
fcdfbd |
+ * completed. Remember the source and target; if the newly
|
|
 |
fcdfbd |
+ * chosen target remains the same when we schedule actions
|
|
 |
fcdfbd |
+ * later, we may continue with the migration.
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
|
|
 |
fcdfbd |
- pe__clear_resource_flags(rsc, pe_rsc_allow_migrate);
|
|
 |
fcdfbd |
+ rsc->partial_migration_target = target_node;
|
|
 |
fcdfbd |
+ rsc->partial_migration_source = source_node;
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
+ } else if (!source_newer_op) {
|
|
 |
fcdfbd |
+ // Mark resource as failed, require recovery, and prevent migration
|
|
 |
fcdfbd |
+ pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
|
|
 |
fcdfbd |
+ pe__clear_resource_flags(rsc, pe_rsc_allow_migrate);
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
--
|
|
 |
fcdfbd |
2.31.1
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
From 7d63ed8d52f64d2523367cff36bf77bd85296bd9 Mon Sep 17 00:00:00 2001
|
|
 |
fcdfbd |
From: Ken Gaillot <kgaillot@redhat.com>
|
|
 |
fcdfbd |
Date: Tue, 31 Jan 2023 17:14:57 -0600
|
|
 |
fcdfbd |
Subject: [PATCH 08/14] Refactor: scheduler: drop redundant argument from
|
|
 |
fcdfbd |
unpack_migrate_to_success()
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
---
|
|
 |
fcdfbd |
lib/pengine/unpack.c | 19 +++++++++----------
|
|
 |
fcdfbd |
1 file changed, 9 insertions(+), 10 deletions(-)
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
index 224b7b5..6222115 100644
|
|
 |
fcdfbd |
--- a/lib/pengine/unpack.c
|
|
 |
fcdfbd |
+++ b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
@@ -2864,8 +2864,7 @@ add_dangling_migration(pe_resource_t *rsc, const pe_node_t *node)
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
static void
|
|
 |
fcdfbd |
-unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
- pe_working_set_t *data_set)
|
|
 |
fcdfbd |
+unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op)
|
|
 |
fcdfbd |
{
|
|
 |
fcdfbd |
/* A complete migration sequence is:
|
|
 |
fcdfbd |
* 1. migrate_to on source node (which succeeded if we get to this function)
|
|
 |
fcdfbd |
@@ -2916,11 +2915,11 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
// Check for newer state on the source
|
|
 |
fcdfbd |
source_newer_op = non_monitor_after(rsc->id, source, xml_op, true,
|
|
 |
fcdfbd |
- data_set);
|
|
 |
fcdfbd |
+ rsc->cluster);
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
// Check for a migrate_from action from this source on the target
|
|
 |
fcdfbd |
migrate_from = find_lrm_op(rsc->id, CRMD_ACTION_MIGRATED, target,
|
|
 |
fcdfbd |
- source, -1, data_set);
|
|
 |
fcdfbd |
+ source, -1, rsc->cluster);
|
|
 |
fcdfbd |
if (migrate_from != NULL) {
|
|
 |
fcdfbd |
if (source_newer_op) {
|
|
 |
fcdfbd |
/* There's a newer non-monitor operation on the source and a
|
|
 |
fcdfbd |
@@ -2938,7 +2937,7 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
* migration events, this migrate_to is irrelevant to the resource's state.
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
target_newer_state = newer_state_after_migrate(rsc->id, target, xml_op,
|
|
 |
fcdfbd |
- migrate_from, data_set);
|
|
 |
fcdfbd |
+ migrate_from, rsc->cluster);
|
|
 |
fcdfbd |
if (source_newer_op && target_newer_state) {
|
|
 |
fcdfbd |
return;
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
@@ -2957,13 +2956,13 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
rsc->role = RSC_ROLE_STARTED;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- target_node = pe_find_node(data_set->nodes, target);
|
|
 |
fcdfbd |
+ target_node = pe_find_node(rsc->cluster->nodes, target);
|
|
 |
fcdfbd |
active_on_target = !target_newer_state && (target_node != NULL)
|
|
 |
fcdfbd |
&& target_node->details->online;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
if (from_status != PCMK_EXEC_PENDING) { // migrate_from failed on target
|
|
 |
fcdfbd |
if (active_on_target) {
|
|
 |
fcdfbd |
- native_add_running(rsc, target_node, data_set, TRUE);
|
|
 |
fcdfbd |
+ native_add_running(rsc, target_node, rsc->cluster, TRUE);
|
|
 |
fcdfbd |
} else {
|
|
 |
fcdfbd |
// Mark resource as failed, require recovery, and prevent migration
|
|
 |
fcdfbd |
pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
|
|
 |
fcdfbd |
@@ -2975,9 +2974,9 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
// The migrate_from is pending, complete but erased, or to be scheduled
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
if (active_on_target) {
|
|
 |
fcdfbd |
- pe_node_t *source_node = pe_find_node(data_set->nodes, source);
|
|
 |
fcdfbd |
+ pe_node_t *source_node = pe_find_node(rsc->cluster->nodes, source);
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- native_add_running(rsc, target_node, data_set, FALSE);
|
|
 |
fcdfbd |
+ native_add_running(rsc, target_node, rsc->cluster, FALSE);
|
|
 |
fcdfbd |
if ((source_node != NULL) && source_node->details->online) {
|
|
 |
fcdfbd |
/* This is a partial migration: the migrate_to completed
|
|
 |
fcdfbd |
* successfully on the source, but the migrate_from has not
|
|
 |
fcdfbd |
@@ -3946,7 +3945,7 @@ update_resource_state(pe_resource_t * rsc, pe_node_t * node, xmlNode * xml_op, c
|
|
 |
fcdfbd |
clear_past_failure = TRUE;
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
} else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
|
|
 |
fcdfbd |
- unpack_migrate_to_success(rsc, node, xml_op, data_set);
|
|
 |
fcdfbd |
+ unpack_migrate_to_success(rsc, node, xml_op);
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
} else if (rsc->role < RSC_ROLE_STARTED) {
|
|
 |
fcdfbd |
pe_rsc_trace(rsc, "%s active on %s", rsc->id, pe__node_name(node));
|
|
 |
fcdfbd |
--
|
|
 |
fcdfbd |
2.31.1
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
From 3be487f87bf5e26277379148922525fd98d29681 Mon Sep 17 00:00:00 2001
|
|
 |
fcdfbd |
From: Ken Gaillot <kgaillot@redhat.com>
|
|
 |
fcdfbd |
Date: Thu, 2 Feb 2023 09:13:30 -0600
|
|
 |
fcdfbd |
Subject: [PATCH 09/14] Doc: scheduler: clarify comments about unpacking
|
|
 |
fcdfbd |
migration history
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
per review
|
|
 |
fcdfbd |
---
|
|
 |
fcdfbd |
lib/pengine/unpack.c | 20 ++++++++++----------
|
|
 |
fcdfbd |
1 file changed, 10 insertions(+), 10 deletions(-)
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
index 6222115..ec2cf26 100644
|
|
 |
fcdfbd |
--- a/lib/pengine/unpack.c
|
|
 |
fcdfbd |
+++ b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
@@ -2791,9 +2791,9 @@ newer_state_after_migrate(const char *rsc_id, const char *node_name,
|
|
 |
fcdfbd |
* \internal
|
|
 |
fcdfbd |
* \brief Parse migration source and target node names from history entry
|
|
 |
fcdfbd |
*
|
|
 |
fcdfbd |
- * \param[in] entry Resource history entry for a migration action
|
|
 |
fcdfbd |
- * \param[in] source_node If not NULL, source must match this node
|
|
 |
fcdfbd |
- * \param[in] target_node If not NULL, target must match this node
|
|
 |
fcdfbd |
+ * \param[in] entry Resource history entry for a migration action
|
|
 |
fcdfbd |
+ * \param[in] source_node If not NULL, source must match this node
|
|
 |
fcdfbd |
+ * \param[in] target_node If not NULL, target must match this node
|
|
 |
fcdfbd |
* \param[out] source_name Where to store migration source node name
|
|
 |
fcdfbd |
* \param[out] target_name Where to store migration target node name
|
|
 |
fcdfbd |
*
|
|
 |
fcdfbd |
@@ -2825,7 +2825,7 @@ get_migration_node_names(const xmlNode *entry, const pe_node_t *source_node,
|
|
 |
fcdfbd |
pcmk__str_casei|pcmk__str_null_matches)) {
|
|
 |
fcdfbd |
crm_err("Ignoring resource history entry %s because "
|
|
 |
fcdfbd |
XML_LRM_ATTR_MIGRATE_SOURCE "='%s' does not match %s",
|
|
 |
fcdfbd |
- id, pcmk__s(*source_name, ""), pe__node_name(source_node));
|
|
 |
fcdfbd |
+ id, *source_name, pe__node_name(source_node));
|
|
 |
fcdfbd |
return pcmk_rc_unpack_error;
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
@@ -2834,7 +2834,7 @@ get_migration_node_names(const xmlNode *entry, const pe_node_t *source_node,
|
|
 |
fcdfbd |
pcmk__str_casei|pcmk__str_null_matches)) {
|
|
 |
fcdfbd |
crm_err("Ignoring resource history entry %s because "
|
|
 |
fcdfbd |
XML_LRM_ATTR_MIGRATE_TARGET "='%s' does not match %s",
|
|
 |
fcdfbd |
- id, pcmk__s(*target_name, ""), pe__node_name(target_node));
|
|
 |
fcdfbd |
+ id, *target_name, pe__node_name(target_node));
|
|
 |
fcdfbd |
return pcmk_rc_unpack_error;
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
@@ -2890,7 +2890,7 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op)
|
|
 |
fcdfbd |
*
|
|
 |
fcdfbd |
* - If the new transition is aborted for any reason while the resource is
|
|
 |
fcdfbd |
* stopping on node1, the transition after that stop completes will see
|
|
 |
fcdfbd |
- * the migrate_from and stop on the source, but it's still a partial
|
|
 |
fcdfbd |
+ * the migrate_to and stop on the source, but it's still a partial
|
|
 |
fcdfbd |
* migration, and the resource must be stopped on node2 because it is
|
|
 |
fcdfbd |
* potentially active there due to the migrate_to.
|
|
 |
fcdfbd |
*
|
|
 |
fcdfbd |
@@ -3425,9 +3425,9 @@ check_recoverable(pe_resource_t *rsc, pe_node_t *node, const char *task,
|
|
 |
fcdfbd |
* \brief Update an integer value and why
|
|
 |
fcdfbd |
*
|
|
 |
fcdfbd |
* \param[in,out] i Pointer to integer to update
|
|
 |
fcdfbd |
- * \param[in,out] why Where to store reason for update
|
|
 |
fcdfbd |
+ * \param[out] why Where to store reason for update
|
|
 |
fcdfbd |
* \param[in] value New value
|
|
 |
fcdfbd |
- * \param[in,out] reason Description of why value was changed
|
|
 |
fcdfbd |
+ * \param[in] reason Description of why value was changed
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
static inline void
|
|
 |
fcdfbd |
remap_because(int *i, const char **why, int value, const char *reason)
|
|
 |
fcdfbd |
@@ -3456,7 +3456,7 @@ remap_because(int *i, const char **why, int value, const char *reason)
|
|
 |
fcdfbd |
* \param[in] data_set Current cluster working set
|
|
 |
fcdfbd |
* \param[in,out] on_fail What should be done about the result
|
|
 |
fcdfbd |
* \param[in] target_rc Expected return code of operation
|
|
 |
fcdfbd |
- * \param[in,out] rc Actual return code of operation
|
|
 |
fcdfbd |
+ * \param[in,out] rc Actual return code of operation (treated as OCF)
|
|
 |
fcdfbd |
* \param[in,out] status Operation execution status
|
|
 |
fcdfbd |
*
|
|
 |
fcdfbd |
* \note If the result is remapped and the node is not shutting down or failed,
|
|
 |
fcdfbd |
@@ -3548,7 +3548,7 @@ remap_operation(xmlNode *xml_op, pe_resource_t *rsc, pe_node_t *node,
|
|
 |
fcdfbd |
switch (*rc) {
|
|
 |
fcdfbd |
case PCMK_OCF_OK:
|
|
 |
fcdfbd |
if (is_probe && (target_rc == PCMK_OCF_NOT_RUNNING)) {
|
|
 |
fcdfbd |
- remap_because(status, &why,PCMK_EXEC_DONE, "probe");
|
|
 |
fcdfbd |
+ remap_because(status, &why, PCMK_EXEC_DONE, "probe");
|
|
 |
fcdfbd |
pe_rsc_info(rsc, "Probe found %s active on %s at %s",
|
|
 |
fcdfbd |
rsc->id, pe__node_name(node),
|
|
 |
fcdfbd |
last_change_str(xml_op));
|
|
 |
fcdfbd |
--
|
|
 |
fcdfbd |
2.31.1
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
From 3ef6c84a7b0dd434731e72d91f2724bdb52e292e Mon Sep 17 00:00:00 2001
|
|
 |
fcdfbd |
From: Ken Gaillot <kgaillot@redhat.com>
|
|
 |
fcdfbd |
Date: Thu, 2 Feb 2023 09:42:01 -0600
|
|
 |
fcdfbd |
Subject: [PATCH 10/14] Refactor: scheduler: improve xpath efficiency when
|
|
 |
fcdfbd |
unpacking
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
Using "//" means that every child must be searched recursively. If we know the
|
|
 |
fcdfbd |
exact path, we should explicitly specify it.
|
|
 |
fcdfbd |
---
|
|
 |
fcdfbd |
lib/pengine/unpack.c | 20 ++++++++++++--------
|
|
 |
fcdfbd |
1 file changed, 12 insertions(+), 8 deletions(-)
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
index ec2cf26..8aead58 100644
|
|
 |
fcdfbd |
--- a/lib/pengine/unpack.c
|
|
 |
fcdfbd |
+++ b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
@@ -2571,6 +2571,13 @@ set_node_score(gpointer key, gpointer value, gpointer user_data)
|
|
 |
fcdfbd |
node->weight = *score;
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
+#define XPATH_NODE_STATE "/" XML_TAG_CIB "/" XML_CIB_TAG_STATUS \
|
|
 |
fcdfbd |
+ "/" XML_CIB_TAG_STATE
|
|
 |
fcdfbd |
+#define SUB_XPATH_LRM_RESOURCE "/" XML_CIB_TAG_LRM \
|
|
 |
fcdfbd |
+ "/" XML_LRM_TAG_RESOURCES \
|
|
 |
fcdfbd |
+ "/" XML_LRM_TAG_RESOURCE
|
|
 |
fcdfbd |
+#define SUB_XPATH_LRM_RSC_OP "/" XML_LRM_TAG_RSC_OP
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
static xmlNode *
|
|
 |
fcdfbd |
find_lrm_op(const char *resource, const char *op, const char *node, const char *source,
|
|
 |
fcdfbd |
int target_rc, pe_working_set_t *data_set)
|
|
 |
fcdfbd |
@@ -2583,10 +2590,9 @@ find_lrm_op(const char *resource, const char *op, const char *node, const char *
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
xpath = g_string_sized_new(256);
|
|
 |
fcdfbd |
pcmk__g_strcat(xpath,
|
|
 |
fcdfbd |
- "//" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='", node, "']"
|
|
 |
fcdfbd |
- "//" XML_LRM_TAG_RESOURCE
|
|
 |
fcdfbd |
- "[@" XML_ATTR_ID "='", resource, "']"
|
|
 |
fcdfbd |
- "/" XML_LRM_TAG_RSC_OP "[@" XML_LRM_ATTR_TASK "='", op, "'",
|
|
 |
fcdfbd |
+ XPATH_NODE_STATE "[@" XML_ATTR_UNAME "='", node, "']"
|
|
 |
fcdfbd |
+ SUB_XPATH_LRM_RESOURCE "[@" XML_ATTR_ID "='", resource, "']"
|
|
 |
fcdfbd |
+ SUB_XPATH_LRM_RSC_OP "[@" XML_LRM_ATTR_TASK "='", op, "'",
|
|
 |
fcdfbd |
NULL);
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
/* Need to check against transition_magic too? */
|
|
 |
fcdfbd |
@@ -2631,10 +2637,8 @@ find_lrm_resource(const char *rsc_id, const char *node_name,
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
xpath = g_string_sized_new(256);
|
|
 |
fcdfbd |
pcmk__g_strcat(xpath,
|
|
 |
fcdfbd |
- "//" XML_CIB_TAG_STATE
|
|
 |
fcdfbd |
- "[@" XML_ATTR_UNAME "='", node_name, "']"
|
|
 |
fcdfbd |
- "//" XML_LRM_TAG_RESOURCE
|
|
 |
fcdfbd |
- "[@" XML_ATTR_ID "='", rsc_id, "']",
|
|
 |
fcdfbd |
+ XPATH_NODE_STATE "[@" XML_ATTR_UNAME "='", node_name, "']"
|
|
 |
fcdfbd |
+ SUB_XPATH_LRM_RESOURCE "[@" XML_ATTR_ID "='", rsc_id, "']",
|
|
 |
fcdfbd |
NULL);
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
xml = get_xpath_object((const char *) xpath->str, data_set->input,
|
|
 |
fcdfbd |
--
|
|
 |
fcdfbd |
2.31.1
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
From 1869f99bc8eeedb976f96f0f1cc3d4dd86735504 Mon Sep 17 00:00:00 2001
|
|
 |
fcdfbd |
From: Ken Gaillot <kgaillot@redhat.com>
|
|
 |
fcdfbd |
Date: Thu, 2 Feb 2023 10:25:53 -0600
|
|
 |
fcdfbd |
Subject: [PATCH 11/14] Low: scheduler: unknown_on_node() should ignore pending
|
|
 |
fcdfbd |
actions
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
Previously, unknown_on_node() looked for any lrm_rsc_op at all to decide
|
|
 |
fcdfbd |
whether a resource is known on a node. However if the only action is pending,
|
|
 |
fcdfbd |
the resource is not yet known.
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
Also drop a redundant argument and add a doxygen block. (The rsc argument is
|
|
 |
fcdfbd |
not const due to a getDocPtr() call in the chain, as well as libxml2 calls that
|
|
 |
fcdfbd |
are likely const in practice but aren't marked as such.)
|
|
 |
fcdfbd |
---
|
|
 |
fcdfbd |
lib/pengine/unpack.c | 37 +++++++++++++++++++++++++------------
|
|
 |
fcdfbd |
1 file changed, 25 insertions(+), 12 deletions(-)
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
index 8aead58..14dc202 100644
|
|
 |
fcdfbd |
--- a/lib/pengine/unpack.c
|
|
 |
fcdfbd |
+++ b/lib/pengine/unpack.c
|
|
 |
fcdfbd |
@@ -2648,19 +2648,32 @@ find_lrm_resource(const char *rsc_id, const char *node_name,
|
|
 |
fcdfbd |
return xml;
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
+/*!
|
|
 |
fcdfbd |
+ * \internal
|
|
 |
fcdfbd |
+ * \brief Check whether a resource has no completed action history on a node
|
|
 |
fcdfbd |
+ *
|
|
 |
fcdfbd |
+ * \param[in,out] rsc Resource to check
|
|
 |
fcdfbd |
+ * \param[in] node_name Node to check
|
|
 |
fcdfbd |
+ *
|
|
 |
fcdfbd |
+ * \return true if \p rsc_id is unknown on \p node_name, otherwise false
|
|
 |
fcdfbd |
+ */
|
|
 |
fcdfbd |
static bool
|
|
 |
fcdfbd |
-unknown_on_node(const char *rsc_id, const char *node_name,
|
|
 |
fcdfbd |
- pe_working_set_t *data_set)
|
|
 |
fcdfbd |
+unknown_on_node(pe_resource_t *rsc, const char *node_name)
|
|
 |
fcdfbd |
{
|
|
 |
fcdfbd |
- xmlNode *lrm_resource = NULL;
|
|
 |
fcdfbd |
-
|
|
 |
fcdfbd |
- lrm_resource = find_lrm_resource(rsc_id, node_name, data_set);
|
|
 |
fcdfbd |
+ bool result = false;
|
|
 |
fcdfbd |
+ xmlXPathObjectPtr search;
|
|
 |
fcdfbd |
+ GString *xpath = g_string_sized_new(256);
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
- /* If the resource has no lrm_rsc_op history on the node, that means its
|
|
 |
fcdfbd |
- * state is unknown there.
|
|
 |
fcdfbd |
- */
|
|
 |
fcdfbd |
- return (lrm_resource == NULL
|
|
 |
fcdfbd |
- || first_named_child(lrm_resource, XML_LRM_TAG_RSC_OP) == NULL);
|
|
 |
fcdfbd |
+ pcmk__g_strcat(xpath,
|
|
 |
fcdfbd |
+ XPATH_NODE_STATE "[@" XML_ATTR_UNAME "='", node_name, "']"
|
|
 |
fcdfbd |
+ SUB_XPATH_LRM_RESOURCE "[@" XML_ATTR_ID "='", rsc->id, "']"
|
|
 |
fcdfbd |
+ SUB_XPATH_LRM_RSC_OP "[@" XML_LRM_ATTR_RC "!='193']",
|
|
 |
fcdfbd |
+ NULL);
|
|
 |
fcdfbd |
+ search = xpath_search(rsc->cluster->input, (const char *) xpath->str);
|
|
 |
fcdfbd |
+ result = (numXpathResults(search) == 0);
|
|
 |
fcdfbd |
+ freeXpathObject(search);
|
|
 |
fcdfbd |
+ g_string_free(xpath, TRUE);
|
|
 |
fcdfbd |
+ return result;
|
|
 |
fcdfbd |
}
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
/*!
|
|
 |
fcdfbd |
@@ -3027,7 +3040,7 @@ unpack_migrate_to_failure(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
|
|
 |
fcdfbd |
* Don't just consider it running there. We will get back here anyway in
|
|
 |
fcdfbd |
* case the probe detects it's running there.
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
- !unknown_on_node(rsc->id, target, data_set)
|
|
 |
fcdfbd |
+ !unknown_on_node(rsc, target)
|
|
 |
fcdfbd |
/* If the resource has newer state on the target after the migration
|
|
 |
fcdfbd |
* events, this migrate_to no longer matters for the target.
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
@@ -3082,7 +3095,7 @@ unpack_migrate_from_failure(pe_resource_t *rsc, pe_node_t *node,
|
|
 |
fcdfbd |
* Don't just consider it running there. We will get back here anyway in
|
|
 |
fcdfbd |
* case the probe detects it's running there.
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
- !unknown_on_node(rsc->id, source, data_set)
|
|
 |
fcdfbd |
+ !unknown_on_node(rsc, source)
|
|
 |
fcdfbd |
/* If the resource has newer state on the source after the migration
|
|
 |
fcdfbd |
* events, this migrate_from no longer matters for the source.
|
|
 |
fcdfbd |
*/
|
|
 |
fcdfbd |
--
|
|
 |
fcdfbd |
2.31.1
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
From 22fbab8e0d449d2accb231dfcec94294ded27f4e Mon Sep 17 00:00:00 2001
|
|
 |
fcdfbd |
From: Ken Gaillot <kgaillot@redhat.com>
|
|
 |
fcdfbd |
Date: Tue, 31 Jan 2023 12:11:19 -0600
|
|
 |
fcdfbd |
Subject: [PATCH 12/14] Test: scheduler: add regression test for migration
|
|
 |
fcdfbd |
intermediary
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
As of this commit, the cluster wrongly restarts the migrated resource
|
|
 |
fcdfbd |
---
|
|
 |
fcdfbd |
cts/cts-scheduler.in | 3 +
|
|
 |
fcdfbd |
.../dot/migration-intermediary-cleaned.dot | 46 ++
|
|
 |
fcdfbd |
.../exp/migration-intermediary-cleaned.exp | 316 +++++++++++
|
|
 |
fcdfbd |
.../migration-intermediary-cleaned.scores | 201 +++++++
|
|
 |
fcdfbd |
.../migration-intermediary-cleaned.summary | 94 ++++
|
|
 |
fcdfbd |
.../xml/migration-intermediary-cleaned.xml | 513 ++++++++++++++++++
|
|
 |
fcdfbd |
6 files changed, 1173 insertions(+)
|
|
 |
fcdfbd |
create mode 100644 cts/scheduler/dot/migration-intermediary-cleaned.dot
|
|
 |
fcdfbd |
create mode 100644 cts/scheduler/exp/migration-intermediary-cleaned.exp
|
|
 |
fcdfbd |
create mode 100644 cts/scheduler/scores/migration-intermediary-cleaned.scores
|
|
 |
fcdfbd |
create mode 100644 cts/scheduler/summary/migration-intermediary-cleaned.summary
|
|
 |
fcdfbd |
create mode 100644 cts/scheduler/xml/migration-intermediary-cleaned.xml
|
|
 |
fcdfbd |
|
|
 |
fcdfbd |
diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
|
|
 |
fcdfbd |
index feb5dc8..9899c36 100644
|
|
 |
fcdfbd |
--- a/cts/cts-scheduler.in
|
|
 |
fcdfbd |
+++ b/cts/cts-scheduler.in
|
|
 |
fcdfbd |
@@ -387,6 +387,9 @@ TESTS = [
|
|
 |
fcdfbd |
[ "probe-target-of-failed-migrate_to-1", "Failed migrate_to, target rejoins" ],
|
|
 |
fcdfbd |
[ "probe-target-of-failed-migrate_to-2", "Failed migrate_to, target rejoined and probed" ],
|
|
 |
fcdfbd |
[ "partial-live-migration-multiple-active", "Prevent running on multiple nodes due to partial live migration" ],
|
|
 |
fcdfbd |
+ [ "migration-intermediary-cleaned",
|
|
 |
fcdfbd |
+ "Probe live-migration intermediary with no history"
|
|
 |
fcdfbd |
+ ],
|
|
 |
fcdfbd |
[ "bug-lf-2422", "Dependency on partially active group - stop ocfs:*" ],
|
|
 |
fcdfbd |
],
|
|
 |
fcdfbd |
[
|
|
 |
fcdfbd |
diff --git a/cts/scheduler/dot/migration-intermediary-cleaned.dot b/cts/scheduler/dot/migration-intermediary-cleaned.dot
|
|
 |
fcdfbd |
new file mode 100644
|
|
 |
fcdfbd |
index 0000000..09568d0
|
|
 |
fcdfbd |
--- /dev/null
|
|
 |
fcdfbd |
+++ b/cts/scheduler/dot/migration-intermediary-cleaned.dot
|
|
 |
fcdfbd |
@@ -0,0 +1,46 @@
|
|
 |
fcdfbd |
+ digraph "g" {
|
|
 |
fcdfbd |
+"Connectivity_running_0" [ style=bold color="green" fontcolor="orange"]
|
|
 |
fcdfbd |
+"Connectivity_start_0" -> "Connectivity_running_0" [ style = bold]
|
|
 |
fcdfbd |
+"Connectivity_start_0" -> "ping-1_start_0 rhel8-2" [ style = bold]
|
|
 |
fcdfbd |
+"Connectivity_start_0" [ style=bold color="green" fontcolor="orange"]
|
|
 |
fcdfbd |
+"FencingFail_monitor_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"FencingPass_monitor_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"Fencing_monitor_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"lsb-dummy_monitor_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"migrator_monitor_0 rhel8-2" -> "migrator_start_0 rhel8-5" [ style = bold]
|
|
 |
fcdfbd |
+"migrator_monitor_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"migrator_monitor_10000 rhel8-5" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"migrator_start_0 rhel8-5" -> "migrator_monitor_10000 rhel8-5" [ style = bold]
|
|
 |
fcdfbd |
+"migrator_start_0 rhel8-5" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"migrator_stop_0 rhel8-2" -> "migrator_start_0 rhel8-5" [ style = bold]
|
|
 |
fcdfbd |
+"migrator_stop_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"migrator_stop_0 rhel8-5" -> "migrator_start_0 rhel8-5" [ style = bold]
|
|
 |
fcdfbd |
+"migrator_stop_0 rhel8-5" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"petulant_monitor_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"ping-1_monitor_0 rhel8-2" -> "Connectivity_start_0" [ style = bold]
|
|
 |
fcdfbd |
+"ping-1_monitor_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"ping-1_monitor_60000 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"ping-1_start_0 rhel8-2" -> "Connectivity_running_0" [ style = bold]
|
|
 |
fcdfbd |
+"ping-1_start_0 rhel8-2" -> "ping-1_monitor_60000 rhel8-2" [ style = bold]
|
|
 |
fcdfbd |
+"ping-1_start_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"r192.168.122.207_monitor_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"r192.168.122.208_monitor_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"rsc_rhel8-1_monitor_0 rhel8-2" -> "rsc_rhel8-1_start_0 rhel8-2" [ style = bold]
|
|
 |
fcdfbd |
+"rsc_rhel8-1_monitor_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"rsc_rhel8-1_monitor_5000 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"rsc_rhel8-1_start_0 rhel8-2" -> "rsc_rhel8-1_monitor_5000 rhel8-2" [ style = bold]
|
|
 |
fcdfbd |
+"rsc_rhel8-1_start_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"rsc_rhel8-1_stop_0 rhel8-3" -> "rsc_rhel8-1_start_0 rhel8-2" [ style = bold]
|
|
 |
fcdfbd |
+"rsc_rhel8-1_stop_0 rhel8-3" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"rsc_rhel8-2_monitor_0 rhel8-2" -> "rsc_rhel8-2_start_0 rhel8-2" [ style = bold]
|
|
 |
fcdfbd |
+"rsc_rhel8-2_monitor_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"rsc_rhel8-2_monitor_5000 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"rsc_rhel8-2_start_0 rhel8-2" -> "rsc_rhel8-2_monitor_5000 rhel8-2" [ style = bold]
|
|
 |
fcdfbd |
+"rsc_rhel8-2_start_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"rsc_rhel8-2_stop_0 rhel8-4" -> "rsc_rhel8-2_start_0 rhel8-2" [ style = bold]
|
|
 |
fcdfbd |
+"rsc_rhel8-2_stop_0 rhel8-4" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"rsc_rhel8-3_monitor_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"rsc_rhel8-4_monitor_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"rsc_rhel8-5_monitor_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+"stateful-1_monitor_0 rhel8-2" [ style=bold color="green" fontcolor="black"]
|
|
 |
fcdfbd |
+}
|
|
 |
fcdfbd |
diff --git a/cts/scheduler/exp/migration-intermediary-cleaned.exp b/cts/scheduler/exp/migration-intermediary-cleaned.exp
|
|
 |
fcdfbd |
new file mode 100644
|
|
 |
fcdfbd |
index 0000000..28fa776
|
|
 |
fcdfbd |
--- /dev/null
|
|
 |
fcdfbd |
+++ b/cts/scheduler/exp/migration-intermediary-cleaned.exp
|
|
 |
fcdfbd |
@@ -0,0 +1,316 @@
|
|
 |
fcdfbd |
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="1" transition_id="0">
|
|
 |
fcdfbd |
+ <synapse id="0">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="18" operation="monitor" operation_key="Fencing_monitor_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="Fencing" class="stonith" type="fence_xvm"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="120000" key_file="/etc/pacemaker/fence_xvm.key" multicast_address="239.255.100.100" pcmk_host_list="rhel8-1 remote-rhel8-1 rhel8-2 remote-rhel8-2 rhel8-3 remote-rhel8-3 rhel8-4 remote-rhel8-4 rhel8-5 remote-rhel8-5" pcmk_host_map="remote-rhel8-1:rhel8-1;remote-rhel8-2:rhel8-2;remote-rhel8-3:rhel8-3;remote-rhel8-4:rhel8-4;remote-rhel8-5:rhel8-5;"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="1">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="19" operation="monitor" operation_key="FencingPass_monitor_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="FencingPass" class="stonith" type="fence_dummy"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="90000" mode="pass" pcmk_host_list="rhel8-2 remote-rhel8-2 rhel8-3 remote-rhel8-3 rhel8-5 remote-rhel8-5" random_sleep_range="30"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="2">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="20" operation="monitor" operation_key="FencingFail_monitor_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="FencingFail" class="stonith" type="fence_dummy"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="90000" mode="fail" pcmk_host_list="rhel8-4 remote-rhel8-4" random_sleep_range="30"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="3">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="41" operation="monitor" operation_key="rsc_rhel8-1_monitor_5000" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="rsc_rhel8-1" class="ocf" provider="heartbeat" type="IPaddr2"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_interval="5000" CRM_meta_name="monitor" CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="90000" cidr_netmask="32" ip="192.168.122.202"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs>
|
|
 |
fcdfbd |
+ <trigger>
|
|
 |
fcdfbd |
+ <rsc_op id="40" operation="start" operation_key="rsc_rhel8-1_start_0" on_node="rhel8-2" on_node_uuid="2"/>
|
|
 |
fcdfbd |
+ </trigger>
|
|
 |
fcdfbd |
+ </inputs>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="4">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="40" operation="start" operation_key="rsc_rhel8-1_start_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="rsc_rhel8-1" class="ocf" provider="heartbeat" type="IPaddr2"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="90000" cidr_netmask="32" ip="192.168.122.202"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs>
|
|
 |
fcdfbd |
+ <trigger>
|
|
 |
fcdfbd |
+ <rsc_op id="21" operation="monitor" operation_key="rsc_rhel8-1_monitor_0" on_node="rhel8-2" on_node_uuid="2"/>
|
|
 |
fcdfbd |
+ </trigger>
|
|
 |
fcdfbd |
+ <trigger>
|
|
 |
fcdfbd |
+ <rsc_op id="39" operation="stop" operation_key="rsc_rhel8-1_stop_0" on_node="rhel8-3" on_node_uuid="3"/>
|
|
 |
fcdfbd |
+ </trigger>
|
|
 |
fcdfbd |
+ </inputs>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="5">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="39" operation="stop" operation_key="rsc_rhel8-1_stop_0" on_node="rhel8-3" on_node_uuid="3">
|
|
 |
fcdfbd |
+ <primitive id="rsc_rhel8-1" class="ocf" provider="heartbeat" type="IPaddr2"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="90000" cidr_netmask="32" ip="192.168.122.202"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="6">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="21" operation="monitor" operation_key="rsc_rhel8-1_monitor_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="rsc_rhel8-1" class="ocf" provider="heartbeat" type="IPaddr2"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="90000" cidr_netmask="32" ip="192.168.122.202"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="7">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="44" operation="monitor" operation_key="rsc_rhel8-2_monitor_5000" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="rsc_rhel8-2" class="ocf" provider="heartbeat" type="IPaddr2"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_interval="5000" CRM_meta_name="monitor" CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="90000" cidr_netmask="32" ip="192.168.122.203"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs>
|
|
 |
fcdfbd |
+ <trigger>
|
|
 |
fcdfbd |
+ <rsc_op id="43" operation="start" operation_key="rsc_rhel8-2_start_0" on_node="rhel8-2" on_node_uuid="2"/>
|
|
 |
fcdfbd |
+ </trigger>
|
|
 |
fcdfbd |
+ </inputs>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="8">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="43" operation="start" operation_key="rsc_rhel8-2_start_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="rsc_rhel8-2" class="ocf" provider="heartbeat" type="IPaddr2"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="90000" cidr_netmask="32" ip="192.168.122.203"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs>
|
|
 |
fcdfbd |
+ <trigger>
|
|
 |
fcdfbd |
+ <rsc_op id="22" operation="monitor" operation_key="rsc_rhel8-2_monitor_0" on_node="rhel8-2" on_node_uuid="2"/>
|
|
 |
fcdfbd |
+ </trigger>
|
|
 |
fcdfbd |
+ <trigger>
|
|
 |
fcdfbd |
+ <rsc_op id="42" operation="stop" operation_key="rsc_rhel8-2_stop_0" on_node="rhel8-4" on_node_uuid="4"/>
|
|
 |
fcdfbd |
+ </trigger>
|
|
 |
fcdfbd |
+ </inputs>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="9">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="42" operation="stop" operation_key="rsc_rhel8-2_stop_0" on_node="rhel8-4" on_node_uuid="4">
|
|
 |
fcdfbd |
+ <primitive id="rsc_rhel8-2" class="ocf" provider="heartbeat" type="IPaddr2"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-4" CRM_meta_on_node_uuid="4" CRM_meta_timeout="90000" cidr_netmask="32" ip="192.168.122.203"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="10">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="22" operation="monitor" operation_key="rsc_rhel8-2_monitor_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="rsc_rhel8-2" class="ocf" provider="heartbeat" type="IPaddr2"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="90000" cidr_netmask="32" ip="192.168.122.203"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="11">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="23" operation="monitor" operation_key="rsc_rhel8-3_monitor_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="rsc_rhel8-3" class="ocf" provider="heartbeat" type="IPaddr2"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="90000" cidr_netmask="32" ip="192.168.122.204"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="12">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="24" operation="monitor" operation_key="rsc_rhel8-4_monitor_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="rsc_rhel8-4" class="ocf" provider="heartbeat" type="IPaddr2"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="90000" cidr_netmask="32" ip="192.168.122.205"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="13">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="25" operation="monitor" operation_key="rsc_rhel8-5_monitor_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="rsc_rhel8-5" class="ocf" provider="heartbeat" type="IPaddr2"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="90000" cidr_netmask="32" ip="192.168.122.206"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="14">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="53" operation="start" operation_key="migrator_start_0" on_node="rhel8-5" on_node_uuid="5">
|
|
 |
fcdfbd |
+ <primitive id="migrator" class="ocf" provider="pacemaker" type="Dummy"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-5" CRM_meta_on_node_uuid="5" CRM_meta_timeout="90000" passwd="whatever"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs>
|
|
 |
fcdfbd |
+ <trigger>
|
|
 |
fcdfbd |
+ <rsc_op id="26" operation="monitor" operation_key="migrator_monitor_0" on_node="rhel8-2" on_node_uuid="2"/>
|
|
 |
fcdfbd |
+ </trigger>
|
|
 |
fcdfbd |
+ <trigger>
|
|
 |
fcdfbd |
+ <rsc_op id="51" operation="stop" operation_key="migrator_stop_0" on_node="rhel8-5" on_node_uuid="5"/>
|
|
 |
fcdfbd |
+ </trigger>
|
|
 |
fcdfbd |
+ <trigger>
|
|
 |
fcdfbd |
+ <rsc_op id="52" operation="stop" operation_key="migrator_stop_0" on_node="rhel8-2" on_node_uuid="2"/>
|
|
 |
fcdfbd |
+ </trigger>
|
|
 |
fcdfbd |
+ </inputs>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="15">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="52" operation="stop" operation_key="migrator_stop_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="migrator" class="ocf" provider="pacemaker" type="Dummy"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="90000" passwd="whatever"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="16">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="51" operation="stop" operation_key="migrator_stop_0" on_node="rhel8-5" on_node_uuid="5">
|
|
 |
fcdfbd |
+ <primitive id="migrator" class="ocf" provider="pacemaker" type="Dummy"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-5" CRM_meta_on_node_uuid="5" CRM_meta_timeout="90000" passwd="whatever"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="17">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="26" operation="monitor" operation_key="migrator_monitor_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="migrator" class="ocf" provider="pacemaker" type="Dummy"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="90000" passwd="whatever"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="18">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="17" operation="monitor" operation_key="migrator_monitor_10000" on_node="rhel8-5" on_node_uuid="5">
|
|
 |
fcdfbd |
+ <primitive id="migrator" class="ocf" provider="pacemaker" type="Dummy"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="rhel8-5" CRM_meta_on_node_uuid="5" CRM_meta_timeout="90000" passwd="whatever"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs>
|
|
 |
fcdfbd |
+ <trigger>
|
|
 |
fcdfbd |
+ <rsc_op id="53" operation="start" operation_key="migrator_start_0" on_node="rhel8-5" on_node_uuid="5"/>
|
|
 |
fcdfbd |
+ </trigger>
|
|
 |
fcdfbd |
+ </inputs>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="19">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="61" operation="monitor" operation_key="ping-1_monitor_60000" internal_operation_key="ping-1:3_monitor_60000" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="ping-1" long-id="ping-1:3" class="ocf" provider="pacemaker" type="ping"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_clone="3" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="90000" debug="true" host_list="192.168.122.80" name="connected"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs>
|
|
 |
fcdfbd |
+ <trigger>
|
|
 |
fcdfbd |
+ <rsc_op id="60" operation="start" operation_key="ping-1_start_0" internal_operation_key="ping-1:3_start_0" on_node="rhel8-2" on_node_uuid="2"/>
|
|
 |
fcdfbd |
+ </trigger>
|
|
 |
fcdfbd |
+ </inputs>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="20">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="60" operation="start" operation_key="ping-1_start_0" internal_operation_key="ping-1:3_start_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="ping-1" long-id="ping-1:3" class="ocf" provider="pacemaker" type="ping"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_clone="3" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="90000" debug="true" host_list="192.168.122.80" name="connected"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs>
|
|
 |
fcdfbd |
+ <trigger>
|
|
 |
fcdfbd |
+ <pseudo_event id="62" operation="start" operation_key="Connectivity_start_0"/>
|
|
 |
fcdfbd |
+ </trigger>
|
|
 |
fcdfbd |
+ </inputs>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="21">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="27" operation="monitor" operation_key="ping-1_monitor_0" internal_operation_key="ping-1:3_monitor_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="ping-1" long-id="ping-1:3" class="ocf" provider="pacemaker" type="ping"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_clone="3" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="90000" debug="true" host_list="192.168.122.80" name="connected"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="22" priority="1000000">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <pseudo_event id="63" operation="running" operation_key="Connectivity_running_0">
|
|
 |
fcdfbd |
+ <attributes CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="90000" />
|
|
 |
fcdfbd |
+ </pseudo_event>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs>
|
|
 |
fcdfbd |
+ <trigger>
|
|
 |
fcdfbd |
+ <rsc_op id="60" operation="start" operation_key="ping-1_start_0" internal_operation_key="ping-1:3_start_0" on_node="rhel8-2" on_node_uuid="2"/>
|
|
 |
fcdfbd |
+ </trigger>
|
|
 |
fcdfbd |
+ <trigger>
|
|
 |
fcdfbd |
+ <pseudo_event id="62" operation="start" operation_key="Connectivity_start_0"/>
|
|
 |
fcdfbd |
+ </trigger>
|
|
 |
fcdfbd |
+ </inputs>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="23">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <pseudo_event id="62" operation="start" operation_key="Connectivity_start_0">
|
|
 |
fcdfbd |
+ <attributes CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="90000" />
|
|
 |
fcdfbd |
+ </pseudo_event>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs>
|
|
 |
fcdfbd |
+ <trigger>
|
|
 |
fcdfbd |
+ <rsc_op id="27" operation="monitor" operation_key="ping-1_monitor_0" internal_operation_key="ping-1:3_monitor_0" on_node="rhel8-2" on_node_uuid="2"/>
|
|
 |
fcdfbd |
+ </trigger>
|
|
 |
fcdfbd |
+ </inputs>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="24">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="28" operation="monitor" operation_key="stateful-1_monitor_0" internal_operation_key="stateful-1:0_monitor_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="stateful-1" long-id="stateful-1:0" class="ocf" provider="pacemaker" type="Stateful"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="60000" />
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="25">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="29" operation="monitor" operation_key="r192.168.122.207_monitor_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="r192.168.122.207" class="ocf" provider="heartbeat" type="IPaddr2"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="90000" cidr_netmask="32" ip="192.168.122.207"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="26">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="30" operation="monitor" operation_key="petulant_monitor_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="petulant" class="service" type="pacemaker-cts-dummyd@10"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="90000" />
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="27">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="31" operation="monitor" operation_key="r192.168.122.208_monitor_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="r192.168.122.208" class="ocf" provider="heartbeat" type="IPaddr2"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="90000" cidr_netmask="32" ip="192.168.122.208"/>
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+ <synapse id="28">
|
|
 |
fcdfbd |
+ <action_set>
|
|
 |
fcdfbd |
+ <rsc_op id="32" operation="monitor" operation_key="lsb-dummy_monitor_0" on_node="rhel8-2" on_node_uuid="2">
|
|
 |
fcdfbd |
+ <primitive id="lsb-dummy" class="lsb" type="LSBDummy"/>
|
|
 |
fcdfbd |
+ <attributes CRM_meta_on_node="rhel8-2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="90000" />
|
|
 |
fcdfbd |
+ </rsc_op>
|
|
 |
fcdfbd |
+ </action_set>
|
|
 |
fcdfbd |
+ <inputs/>
|
|
 |
fcdfbd |
+ </synapse>
|
|
 |
fcdfbd |
+</transition_graph>
|
|
 |
fcdfbd |
diff --git a/cts/scheduler/scores/migration-intermediary-cleaned.scores b/cts/scheduler/scores/migration-intermediary-cleaned.scores
|
|
 |
fcdfbd |
new file mode 100644
|
|
 |
fcdfbd |
index 0000000..b3b8dff
|
|
 |
fcdfbd |
--- /dev/null
|
|
 |
fcdfbd |
+++ b/cts/scheduler/scores/migration-intermediary-cleaned.scores
|
|
 |
fcdfbd |
@@ -0,0 +1,201 @@
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
+pcmk__clone_allocate: Connectivity allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: Connectivity allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: Connectivity allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: Connectivity allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: Connectivity allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:0 allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:0 allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:0 allocation score on rhel8-3: 1
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:0 allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:0 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:1 allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:1 allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:1 allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:1 allocation score on rhel8-4: 1
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:1 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:2 allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:2 allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:2 allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:2 allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:2 allocation score on rhel8-5: 1
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:3 allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:3 allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:3 allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:3 allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:3 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:4 allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:4 allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:4 allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:4 allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: ping-1:4 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: promotable-1 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__clone_allocate: promotable-1 allocation score on rhel8-2: -INFINITY
|
|
 |
fcdfbd |
+pcmk__clone_allocate: promotable-1 allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: promotable-1 allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: promotable-1 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:0 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:0 allocation score on rhel8-2: -INFINITY
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:0 allocation score on rhel8-3: 11
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:0 allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:0 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:1 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:1 allocation score on rhel8-2: -INFINITY
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:1 allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:1 allocation score on rhel8-4: 6
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:1 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:2 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:2 allocation score on rhel8-2: -INFINITY
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:2 allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:2 allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:2 allocation score on rhel8-5: 6
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:3 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:3 allocation score on rhel8-2: -INFINITY
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:3 allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:3 allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:3 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:4 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:4 allocation score on rhel8-2: -INFINITY
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:4 allocation score on rhel8-3: 10
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:4 allocation score on rhel8-4: 5
|
|
 |
fcdfbd |
+pcmk__clone_allocate: stateful-1:4 allocation score on rhel8-5: 5
|
|
 |
fcdfbd |
+pcmk__group_assign: group-1 allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: group-1 allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: group-1 allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: group-1 allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: group-1 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: petulant allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: petulant allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: petulant allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: petulant allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: petulant allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: r192.168.122.207 allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: r192.168.122.207 allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: r192.168.122.207 allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: r192.168.122.207 allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: r192.168.122.207 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: r192.168.122.208 allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: r192.168.122.208 allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: r192.168.122.208 allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: r192.168.122.208 allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__group_assign: r192.168.122.208 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: Fencing allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: Fencing allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: Fencing allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: Fencing allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: Fencing allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: FencingFail allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: FencingFail allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: FencingFail allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: FencingFail allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: FencingFail allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: FencingPass allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: FencingPass allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: FencingPass allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: FencingPass allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: FencingPass allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: lsb-dummy allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: lsb-dummy allocation score on rhel8-2: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: lsb-dummy allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: lsb-dummy allocation score on rhel8-4: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: lsb-dummy allocation score on rhel8-5: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: migrator allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: migrator allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: migrator allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: migrator allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: migrator allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: petulant allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: petulant allocation score on rhel8-2: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: petulant allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: petulant allocation score on rhel8-4: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: petulant allocation score on rhel8-5: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:0 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:0 allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:0 allocation score on rhel8-3: 1
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:0 allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:0 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:1 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:1 allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:1 allocation score on rhel8-3: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:1 allocation score on rhel8-4: 1
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:1 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:2 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:2 allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:2 allocation score on rhel8-3: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:2 allocation score on rhel8-4: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:2 allocation score on rhel8-5: 1
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:3 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:3 allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:3 allocation score on rhel8-3: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:3 allocation score on rhel8-4: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:3 allocation score on rhel8-5: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:4 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:4 allocation score on rhel8-2: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:4 allocation score on rhel8-3: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:4 allocation score on rhel8-4: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: ping-1:4 allocation score on rhel8-5: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: r192.168.122.207 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: r192.168.122.207 allocation score on rhel8-2: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: r192.168.122.207 allocation score on rhel8-3: 11
|
|
 |
fcdfbd |
+pcmk__primitive_assign: r192.168.122.207 allocation score on rhel8-4: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: r192.168.122.207 allocation score on rhel8-5: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: r192.168.122.208 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: r192.168.122.208 allocation score on rhel8-2: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: r192.168.122.208 allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: r192.168.122.208 allocation score on rhel8-4: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: r192.168.122.208 allocation score on rhel8-5: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-1 allocation score on rhel8-1: 100
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-1 allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-1 allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-1 allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-1 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-2 allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-2 allocation score on rhel8-2: 100
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-2 allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-2 allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-2 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-3 allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-3 allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-3 allocation score on rhel8-3: 100
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-3 allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-3 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-4 allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-4 allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-4 allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-4 allocation score on rhel8-4: 100
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-4 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-5 allocation score on rhel8-1: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-5 allocation score on rhel8-2: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-5 allocation score on rhel8-3: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-5 allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: rsc_rhel8-5 allocation score on rhel8-5: 100
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:0 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:0 allocation score on rhel8-2: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:0 allocation score on rhel8-3: 11
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:0 allocation score on rhel8-4: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:0 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:1 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:1 allocation score on rhel8-2: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:1 allocation score on rhel8-3: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:1 allocation score on rhel8-4: 6
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:1 allocation score on rhel8-5: 0
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:2 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:2 allocation score on rhel8-2: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:2 allocation score on rhel8-3: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:2 allocation score on rhel8-4: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:2 allocation score on rhel8-5: 6
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:3 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:3 allocation score on rhel8-2: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:3 allocation score on rhel8-3: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:3 allocation score on rhel8-4: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:3 allocation score on rhel8-5: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:4 allocation score on rhel8-1: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:4 allocation score on rhel8-2: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:4 allocation score on rhel8-3: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:4 allocation score on rhel8-4: -INFINITY
|
|
 |
fcdfbd |
+pcmk__primitive_assign: stateful-1:4 allocation score on rhel8-5: -INFINITY
|
|
 |
fcdfbd |
+stateful-1:0 promotion score on rhel8-3: 10
|
|
 |
fcdfbd |
+stateful-1:1 promotion score on rhel8-4: 5
|
|
 |
fcdfbd |
+stateful-1:2 promotion score on rhel8-5: 5
|
|
 |
fcdfbd |
+stateful-1:3 promotion score on none: 0
|
|
 |
fcdfbd |
+stateful-1:4 promotion score on none: 0
|
|
 |
fcdfbd |
diff --git a/cts/scheduler/summary/migration-intermediary-cleaned.summary b/cts/scheduler/summary/migration-intermediary-cleaned.summary
|
|
 |
fcdfbd |
new file mode 100644
|
|
 |
fcdfbd |
index 0000000..5de1355
|
|
 |
fcdfbd |
--- /dev/null
|
|
 |
fcdfbd |
+++ b/cts/scheduler/summary/migration-intermediary-cleaned.summary
|
|
 |
fcdfbd |
@@ -0,0 +1,94 @@
|
|
 |
fcdfbd |
+Using the original execution date of: 2023-01-19 21:05:59Z
|
|
 |
fcdfbd |
+Current cluster status:
|
|
 |
fcdfbd |
+ * Node List:
|
|
 |
fcdfbd |
+ * Online: [ rhel8-2 rhel8-3 rhel8-4 rhel8-5 ]
|
|
 |
fcdfbd |
+ * OFFLINE: [ rhel8-1 ]
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
+ * Full List of Resources:
|
|
 |
fcdfbd |
+ * Fencing (stonith:fence_xvm): Started rhel8-3
|
|
 |
fcdfbd |
+ * FencingPass (stonith:fence_dummy): Started rhel8-4
|
|
 |
fcdfbd |
+ * FencingFail (stonith:fence_dummy): Started rhel8-5
|
|
 |
fcdfbd |
+ * rsc_rhel8-1 (ocf:heartbeat:IPaddr2): Started rhel8-3
|
|
 |
fcdfbd |
+ * rsc_rhel8-2 (ocf:heartbeat:IPaddr2): Started rhel8-4
|
|
 |
fcdfbd |
+ * rsc_rhel8-3 (ocf:heartbeat:IPaddr2): Started rhel8-3
|
|
 |
fcdfbd |
+ * rsc_rhel8-4 (ocf:heartbeat:IPaddr2): Started rhel8-4
|
|
 |
fcdfbd |
+ * rsc_rhel8-5 (ocf:heartbeat:IPaddr2): Started rhel8-5
|
|
 |
fcdfbd |
+ * migrator (ocf:pacemaker:Dummy): Started [ rhel8-5 rhel8-2 ]
|
|
 |
fcdfbd |
+ * Clone Set: Connectivity [ping-1]:
|
|
 |
fcdfbd |
+ * Started: [ rhel8-3 rhel8-4 rhel8-5 ]
|
|
 |
fcdfbd |
+ * Stopped: [ rhel8-1 rhel8-2 ]
|
|
 |
fcdfbd |
+ * Clone Set: promotable-1 [stateful-1] (promotable):
|
|
 |
fcdfbd |
+ * Promoted: [ rhel8-3 ]
|
|
 |
fcdfbd |
+ * Unpromoted: [ rhel8-4 rhel8-5 ]
|
|
 |
fcdfbd |
+ * Stopped: [ rhel8-1 rhel8-2 ]
|
|
 |
fcdfbd |
+ * Resource Group: group-1:
|
|
 |
fcdfbd |
+ * r192.168.122.207 (ocf:heartbeat:IPaddr2): Started rhel8-3
|
|
 |
fcdfbd |
+ * petulant (service:pacemaker-cts-dummyd@10): Started rhel8-3
|
|
 |
fcdfbd |
+ * r192.168.122.208 (ocf:heartbeat:IPaddr2): Started rhel8-3
|
|
 |
fcdfbd |
+ * lsb-dummy (lsb:LSBDummy): Started rhel8-3
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
+Transition Summary:
|
|
 |
fcdfbd |
+ * Move rsc_rhel8-1 ( rhel8-3 -> rhel8-2 )
|
|
 |
fcdfbd |
+ * Move rsc_rhel8-2 ( rhel8-4 -> rhel8-2 )
|
|
 |
fcdfbd |
+ * Restart migrator ( rhel8-5 )
|
|
 |
fcdfbd |
+ * Start ping-1:3 ( rhel8-2 )
|
|
 |
fcdfbd |
+
|
|
 |
fcdfbd |
+Executing Cluster Transition:
|
|
 |
fcdfbd |
+ * Resource action: Fencing monitor on rhel8-2
|
|
 |
fcdfbd |
+ * Resource action: FencingPass monitor on rhel8-2
|
|
 |
fcdfbd |
+ * Resource action: FencingFail monitor on rhel8-2
|
|
 |
fcdfbd |
+ * Resource action: rsc_rhel8-1 stop on rhel8-3
|
|
 |
fcdfbd |
+ * Resource action: rsc_rhel8-1 monitor on rhel8-2
|
|
 |
fcdfbd |
+ * Resource action: rsc_rhel8-2 stop on rhel8-4
|
|
 |
fcdfbd |
+ * Resource action: rsc_rhel8-2 monitor on rhel8-2
|
|
 |
fcdfbd |
+ * Resource action: rsc_rhel8-3 monitor on rhel8-2
|
|
 |
fcdfbd |
+ * Resource action: rsc_rhel8-4 monitor on rhel8-2
|
|
 |
fcdfbd |
+ * Resource action: rsc_rhel8-5 monitor on rhel8-2
|
|
 |
fcdfbd |
+ * Resource action: migrator stop on rhel8-2
|
|
 |
fcdfbd |
+ * Resource action: migrator stop on rhel8-5
|
|
 |
fcdfbd |
+ * Resource action: migrator monitor on rhel8-2
|
|
 |
fcdfbd |
+ * Resource action: ping-1 monitor on rhel8-2
|
|
 |
fcdfbd |
+ * Pseudo action: Connectivity_start_0
|
|
|