From abbfc0ba9afb6c2d1ce54fea2d0cf25ce1d9108a Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Fri, 10 Jan 2020 18:18:07 -0600
Subject: [PATCH 10/10] Low: scheduler: clear resource history when appropriate
Tell the controller to clear resource history from the CIB when a resource has
a shutdown lock that expired or was cancelled because the resource is already
active elsewhere.
---
include/crm/pengine/internal.h | 6 +++++-
include/crm/pengine/status.h | 6 +++++-
lib/pengine/unpack.c | 1 +
lib/pengine/utils.c | 34 ++++++++++++++++++++++++++++++++--
pengine/allocate.c | 1 +
pengine/graph.c | 16 ++++++++++++++--
pengine/native.c | 6 ++++++
7 files changed, 64 insertions(+), 6 deletions(-)
diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index fc908e8..64b9a50 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -1,5 +1,7 @@
/*
- * Copyright 2004-2019 Andrew Beekhof <andrew@beekhof.net>
+ * Copyright 2004-2020 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
*
* This source code is licensed under the GNU Lesser General Public License
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
@@ -366,4 +368,7 @@ void pe__free_param_checks(pe_working_set_t *data_set);
bool pe__shutdown_requested(pe_node_t *node);
bool pe__resource_is_disabled(pe_resource_t *rsc);
+pe_action_t *pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node,
+ pe_working_set_t *data_set);
+
#endif
diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h
index 1e8d5bb..9f9fd3b 100644
--- a/include/crm/pengine/status.h
+++ b/include/crm/pengine/status.h
@@ -1,5 +1,7 @@
/*
- * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
+ * Copyright 2004-2020 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -280,6 +282,8 @@ enum pe_action_flags {
pe_action_reschedule = 0x02000,
pe_action_tracking = 0x04000,
+
+ pe_action_dc = 0x10000, //! Action may run on DC instead of target
};
/* *INDENT-ON* */
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index bb5efa4..9deff67 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -2306,6 +2306,7 @@ unpack_shutdown_lock(xmlNode *rsc_entry, pe_resource_t *rsc, pe_node_t *node,
> (lock_time + data_set->shutdown_lock))) {
pe_rsc_info(rsc, "Shutdown lock for %s on %s expired",
rsc->id, node->details->uname);
+ pe__clear_resource_history(rsc, node, data_set);
} else {
rsc->lock_node = node;
rsc->lock_time = lock_time;
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index c336e37..84d3399 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -490,6 +490,11 @@ custom_action(resource_t * rsc, char *key, const char *task,
}
action->uuid = strdup(key);
+ if (safe_str_eq(task, CRM_OP_LRM_DELETE)) {
+ // Resource history deletion for a node can be done on the DC
+ pe_set_action_bit(action, pe_action_dc);
+ }
+
pe_set_action_bit(action, pe_action_runnable);
if (optional) {
pe_rsc_trace(rsc, "Set optional on %s", action->uuid);
@@ -570,7 +575,8 @@ custom_action(resource_t * rsc, char *key, const char *task,
pe_set_action_bit(action, pe_action_optional);
/* action->runnable = FALSE; */
- } else if (action->node->details->online == FALSE
+ } else if (is_not_set(action->flags, pe_action_dc)
+ && !(action->node->details->online)
&& (!is_container_remote_node(action->node) || action->node->details->remote_requires_reset)) {
pe_clear_action_bit(action, pe_action_runnable);
do_crm_log(warn_level, "Action %s on %s is unrunnable (offline)",
@@ -581,7 +587,8 @@ custom_action(resource_t * rsc, char *key, const char *task,
pe_fence_node(data_set, action->node, "resource actions are unrunnable", FALSE);
}
- } else if (action->node->details->pending) {
+ } else if (is_not_set(action->flags, pe_action_dc)
+ && action->node->details->pending) {
pe_clear_action_bit(action, pe_action_runnable);
do_crm_log(warn_level, "Action %s on %s is unrunnable (pending)",
action->uuid, action->node->details->uname);
@@ -695,6 +702,8 @@ unpack_operation_on_fail(action_t * action)
value = on_fail;
}
+ } else if (safe_str_eq(action->task, CRM_OP_LRM_DELETE)) {
+ value = "ignore";
}
return value;
@@ -2566,3 +2575,24 @@ pe__resource_is_disabled(pe_resource_t *rsc)
}
return false;
}
+
+/*!
+ * \internal
+ * \brief Create an action to clear a resource's history from CIB
+ *
+ * \param[in] rsc Resource to clear
+ * \param[in] node Node to clear history on
+ *
+ * \return New action to clear resource history
+ */
+pe_action_t *
+pe__clear_resource_history(pe_resource_t *rsc, pe_node_t *node,
+ pe_working_set_t *data_set)
+{
+ char *key = NULL;
+
+ CRM_ASSERT(rsc && node);
+ key = generate_op_key(rsc->id, CRM_OP_LRM_DELETE, 0);
+ return custom_action(rsc, key, CRM_OP_LRM_DELETE, node, FALSE, TRUE,
+ data_set);
+}
diff --git a/pengine/allocate.c b/pengine/allocate.c
index 7366716..946f063 100644
--- a/pengine/allocate.c
+++ b/pengine/allocate.c
@@ -1058,6 +1058,7 @@ apply_shutdown_lock(pe_resource_t *rsc, pe_working_set_t *data_set)
pe_rsc_info(rsc,
"Cancelling shutdown lock because %s is already active",
rsc->id);
+ pe__clear_resource_history(rsc, rsc->lock_node, data_set);
rsc->lock_node = NULL;
rsc->lock_time = 0;
}
diff --git a/pengine/graph.c b/pengine/graph.c
index 33168ca..a045549 100644
--- a/pengine/graph.c
+++ b/pengine/graph.c
@@ -596,10 +596,11 @@ update_action(action_t * then)
/* 'then' is required, so we must abandon 'first'
* (e.g. a required stop cancels any reload).
- * Only used with reload actions as 'first'.
*/
set_bit(other->action->flags, pe_action_optional);
- clear_bit(first->rsc->flags, pe_rsc_reload);
+ if (!strcmp(first->task, CRMD_ACTION_RELOAD)) {
+ clear_bit(first->rsc->flags, pe_rsc_reload);
+ }
}
if (first->rsc && then->rsc && (first->rsc != then->rsc)
@@ -1051,6 +1052,11 @@ action2xml(action_t * action, gboolean as_input, pe_working_set_t *data_set)
} else if (safe_str_eq(action->task, CRM_OP_LRM_REFRESH)) {
action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT);
+ } else if (safe_str_eq(action->task, CRM_OP_LRM_DELETE)) {
+ // CIB-only clean-up for shutdown locks
+ action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT);
+ crm_xml_add(action_xml, PCMK__XA_MODE, XML_TAG_CIB);
+
/* } else if(safe_str_eq(action->task, RSC_PROBED)) { */
/* action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); */
@@ -1063,6 +1069,7 @@ action2xml(action_t * action, gboolean as_input, pe_working_set_t *data_set)
} else {
action_xml = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP);
+
#if ENABLE_VERSIONED_ATTRS
rsc_details = pe_rsc_action_details(action);
#endif
@@ -1404,6 +1411,11 @@ should_dump_action(action_t * action)
log_action(LOG_DEBUG, "Unallocated action", action, FALSE);
return FALSE;
+ } else if (is_set(action->flags, pe_action_dc)) {
+ crm_trace("Action %s (%d) should be dumped: "
+ "can run on DC instead of %s",
+ action->uuid, action->id, action->node->details->uname);
+
} else if(is_container_remote_node(action->node) && action->node->details->remote_requires_reset == FALSE) {
crm_trace("Assuming action %s for %s will be runnable", action->uuid, action->node->details->uname);
diff --git a/pengine/native.c b/pengine/native.c
index 1abaf29..b639fae 100644
--- a/pengine/native.c
+++ b/pengine/native.c
@@ -1439,6 +1439,12 @@ native_internal_constraints(resource_t * rsc, pe_working_set_t * data_set)
pe_order_runnable_left, data_set);
}
+ // Don't clear resource history if probing on same node
+ custom_action_order(rsc, generate_op_key(rsc->id, CRM_OP_LRM_DELETE, 0),
+ NULL, rsc, generate_op_key(rsc->id, RSC_STATUS, 0),
+ NULL, pe_order_same_node|pe_order_then_cancels_first,
+ data_set);
+
// Certain checks need allowed nodes
if (check_unfencing || check_utilization || rsc->container) {
allowed_nodes = allowed_nodes_as_list(rsc, data_set);
--
1.8.3.1