Blob Blame History Raw
From 4e85d3012d61dcf534a51d4f82b91fce9aef8d0b Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Fri, 6 Dec 2019 11:57:59 -0600
Subject: [PATCH 02/10] Low: scheduler: respect shutdown locks when placing
 active resources

Use new pe_resource_t members to indicate that a resource is locked to a
particular node.

For active resources (i.e. in the transition where the node is scheduled for
shutdown), these are connected by checking each lockable resource for whether
it is running on a single clean node that is shutting down.

When applying constraints, place -INFINITY location constraints for locked
resources on all nodes other than the lock node.

(Inactive resources -- i.e. in later transitions after the node is shut down --
are not yet locked.)
---
 include/crm/pengine/status.h |  2 ++
 pengine/allocate.c           | 86 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 88 insertions(+)

diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h
index c6d4bdb..1e8d5bb 100644
--- a/include/crm/pengine/status.h
+++ b/include/crm/pengine/status.h
@@ -347,6 +347,8 @@ struct resource_s {
     pe_working_set_t *cluster;
 
     pe_node_t *pending_node;    // Node on which pending_task is happening
+    pe_node_t *lock_node;       // Resource is shutdown-locked to this node
+    time_t lock_time;           // When shutdown lock started
 
 #if ENABLE_VERSIONED_ATTRS
     xmlNode *versioned_parameters;
diff --git a/pengine/allocate.c b/pengine/allocate.c
index 30d29e1..09f9e51 100644
--- a/pengine/allocate.c
+++ b/pengine/allocate.c
@@ -1009,6 +1009,86 @@ rsc_discover_filter(resource_t *rsc, node_t *node)
     }
 }
 
+static time_t
+shutdown_time(pe_node_t *node, pe_working_set_t *data_set)
+{
+    const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
+    time_t result = 0;
+
+    if (shutdown) {
+        errno = 0;
+        result = (time_t) crm_int_helper(shutdown, NULL);
+        if (errno != 0) {
+            result = 0;
+        }
+    }
+    return result? result : get_effective_time(data_set);
+}
+
+static void
+apply_shutdown_lock(pe_resource_t *rsc, pe_working_set_t *data_set)
+{
+    const char *class;
+
+    // Only primitives and (uncloned) groups may be locked
+    if (rsc->variant == pe_group) {
+        for (GList *item = rsc->children; item != NULL;
+             item = item->next) {
+            apply_shutdown_lock((pe_resource_t *) item->data, data_set);
+        }
+    } else if (rsc->variant != pe_native) {
+        return;
+    }
+
+    // Fence devices and remote connections can't be locked
+    class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
+    if ((class == NULL) || !strcmp(class, PCMK_RESOURCE_CLASS_STONITH)
+        || is_rsc_baremetal_remote_node(rsc, data_set)) {
+        return;
+    }
+
+    // Only a resource active on exactly one node can be locked
+    if (pcmk__list_of_1(rsc->running_on)) {
+        pe_node_t *node = rsc->running_on->data;
+
+        if (node->details->shutdown) {
+            if (node->details->unclean) {
+                pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown",
+                             rsc->id, node->details->uname);
+            } else {
+                rsc->lock_node = node;
+                rsc->lock_time = shutdown_time(node, data_set);
+            }
+        }
+    }
+
+    if (rsc->lock_node == NULL) {
+        // No lock needed
+        return;
+    }
+
+    if (data_set->shutdown_lock > 0) {
+        time_t lock_expiration = rsc->lock_time + data_set->shutdown_lock;
+
+        pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
+                    rsc->id, rsc->lock_node->details->uname,
+                    (long long) lock_expiration);
+    } else {
+        pe_rsc_info(rsc, "Locking %s to %s due to shutdown",
+                    rsc->id, rsc->lock_node->details->uname);
+    }
+
+    // If resource is locked to one node, ban it from all other nodes
+    for (GList *item = data_set->nodes; item != NULL; item = item->next) {
+        pe_node_t *node = item->data;
+
+        if (strcmp(node->details->uname, rsc->lock_node->details->uname)) {
+            resource_location(rsc, node, -INFINITY,
+                              XML_CONFIG_ATTR_SHUTDOWN_LOCK, data_set);
+        }
+    }
+}
+
 /*
  * \internal
  * \brief Stage 2 of cluster status: apply node-specific criteria
@@ -1020,6 +1100,12 @@ stage2(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
+    if (is_set(data_set->flags, pe_flag_shutdown_lock)) {
+        for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
+            apply_shutdown_lock((pe_resource_t *) gIter->data, data_set);
+        }
+    }
+
     if (is_not_set(data_set->flags, pe_flag_no_compat)) {
         // @COMPAT API backward compatibility
         for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
-- 
1.8.3.1