Blame SOURCES/015-node-terminology.patch

eae27e
From 2ba6773d0aea4ece57a69f5c0902ca865834aaf8 Mon Sep 17 00:00:00 2001
eae27e
From: Ken Gaillot <kgaillot@redhat.com>
eae27e
Date: Fri, 5 Aug 2016 10:23:46 -0500
eae27e
Subject: [PATCH] Low: pengine: use new "remote node" vs "guest node"
eae27e
 terminology in allocate.c
eae27e
eae27e
---
eae27e
 pengine/allocate.c | 69 ++++++++++++++++++++++++++++++------------------------
eae27e
 1 file changed, 39 insertions(+), 30 deletions(-)
eae27e
eae27e
diff --git a/pengine/allocate.c b/pengine/allocate.c
eae27e
index 999cc79..7464e9a 100644
eae27e
--- a/pengine/allocate.c
eae27e
+++ b/pengine/allocate.c
eae27e
@@ -880,14 +880,14 @@ probe_resources(pe_working_set_t * data_set)
eae27e
             continue;
eae27e
 
eae27e
         } else if (is_remote_node(node) && node->details->shutdown) {
eae27e
-            /* Don't try and probe a remote node we're shutting down.
eae27e
-             * It causes constraint conflicts to try and run any sort of action
eae27e
-             * other that 'stop' on resources living within a remote-node when
eae27e
-             * it is being shutdown. */
eae27e
+            /* Don't probe a Pacemaker Remote node we're shutting down.
eae27e
+             * It causes constraint conflicts to try to run any action
eae27e
+             * other than "stop" on resources living within such a node when
eae27e
+             * it is shutting down. */
eae27e
             continue;
eae27e
 
eae27e
         } else if (is_container_remote_node(node)) {
eae27e
-            /* TODO enable container node probes once ordered probing is implemented. */
eae27e
+            /* TODO enable guest node probes once ordered probing is implemented */
eae27e
             continue;
eae27e
 
eae27e
         } else if (node->details->rsc_discovery_enabled == FALSE) {
eae27e
@@ -1158,9 +1158,10 @@ allocate_resources(pe_working_set_t * data_set)
eae27e
                 continue;
eae27e
             }
eae27e
             pe_rsc_trace(rsc, "Allocating: %s", rsc->id);
eae27e
-            /* for remote node connection resources, always prefer the partial migration
eae27e
-             * target during resource allocation if the rsc is in the middle of a
eae27e
-             * migration */ 
eae27e
+            /* For remote node connection resources, always prefer the partial
eae27e
+             * migration target during resource allocation, if the rsc is in the
eae27e
+             * middle of a migration.
eae27e
+             */
eae27e
             rsc->cmds->allocate(rsc, rsc->partial_migration_target, data_set);
eae27e
         }
eae27e
     }
eae27e
@@ -1368,7 +1369,10 @@ stage6(pe_working_set_t * data_set)
eae27e
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
eae27e
         node_t *node = (node_t *) gIter->data;
eae27e
 
eae27e
-        /* remote-nodes associated with a container resource (such as a vm) are not fenced */
eae27e
+        /* Guest nodes are "fenced" by recovering their container resource.
eae27e
+         * The container stop may be explicit, or implied by the fencing of the
eae27e
+         * guest's host.
eae27e
+         */
eae27e
         if (is_container_remote_node(node)) {
eae27e
             /* Guest */
eae27e
             if (need_stonith
eae27e
@@ -1417,7 +1421,7 @@ stage6(pe_working_set_t * data_set)
eae27e
             }
eae27e
 
eae27e
         } else if (node->details->online && node->details->shutdown &&
eae27e
-                /* TODO define what a shutdown op means for a baremetal remote node.
eae27e
+                /* TODO define what a shutdown op means for a remote node.
eae27e
                  * For now we do not send shutdown operations for remote nodes, but
eae27e
                  * if we can come up with a good use for this in the future, we will. */
eae27e
                     is_remote_node(node) == FALSE) {
eae27e
@@ -1694,10 +1698,11 @@ apply_remote_node_ordering(pe_working_set_t *data_set)
eae27e
             action->rsc->is_remote_node &&
eae27e
             safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) {
eae27e
 
eae27e
-            /* if we are clearing the failcount of an actual remote node connect
eae27e
-             * resource, then make sure this happens before allowing the connection
eae27e
-             * to start if we are planning on starting the connection during this
eae27e
-             * transition */ 
eae27e
+            /* If we are clearing the failcount of an actual remote node
eae27e
+             * connection resource, then make sure this happens before allowing
eae27e
+             * the connection to start if we are planning on starting the
eae27e
+             * connection during this transition.
eae27e
+             */
eae27e
             custom_action_order(action->rsc,
eae27e
                 NULL,
eae27e
                 action,
eae27e
@@ -1710,10 +1715,10 @@ apply_remote_node_ordering(pe_working_set_t *data_set)
eae27e
                 continue;
eae27e
         }
eae27e
 
eae27e
-        /* detect if the action occurs on a remote node. if so create
eae27e
-         * ordering constraints that guarantee the action occurs while
eae27e
-         * the remote node is active (after start, before stop...) things
eae27e
-         * like that */ 
eae27e
+        /* If the action occurs on a Pacemaker Remote node, create
eae27e
+         * ordering constraints that guarantee the action occurs while the node
eae27e
+         * is active (after start, before stop ... things like that).
eae27e
+         */
eae27e
         if (action->node == NULL ||
eae27e
             is_remote_node(action->node) == FALSE ||
eae27e
             action->node->details->remote_rsc == NULL ||
eae27e
@@ -1747,12 +1752,13 @@ apply_remote_node_ordering(pe_working_set_t *data_set)
eae27e
              * to build a constraint between a resource's demotion and
eae27e
              * the connection resource starting... because the connection
eae27e
              * resource can not start. The connection might already be up,
eae27e
-             * but the START action would not be allowed which in turn would
eae27e
-             * block the demotion of any resournces living in the remote-node.
eae27e
+             * but the "start" action would not be allowed, which in turn would
eae27e
+             * block the demotion of any resources living in the node.
eae27e
              *
eae27e
              * In this case, only build the constraint between the demotion and
eae27e
-             * the connection's stop action. This allows the connection and all the
eae27e
-             * resources within the remote-node to be torn down properly. */
eae27e
+             * the connection's "stop" action. This allows the connection and
eae27e
+             * all the resources within the node to be torn down properly.
eae27e
+             */
eae27e
             if (remote_rsc->next_role == RSC_ROLE_STOPPED) {
eae27e
                 custom_action_order(action->rsc,
eae27e
                     NULL,
eae27e
@@ -1780,10 +1786,11 @@ apply_remote_node_ordering(pe_working_set_t *data_set)
eae27e
                    container &&
eae27e
                    is_set(container->flags, pe_rsc_failed)) {
eae27e
 
eae27e
-            /* when the container representing a remote node fails, the stop
eae27e
+            /* When the container representing a guest node fails, the stop
eae27e
              * action for all the resources living in that container is implied
eae27e
-             * by the container stopping.  This is similar to how fencing operations
eae27e
-             * work for cluster nodes. */
eae27e
+             * by the container stopping. This is similar to how fencing
eae27e
+             * operations work for cluster nodes.
eae27e
+             */
eae27e
             pe_set_action_bit(action, pe_action_pseudo);
eae27e
             custom_action_order(container,
eae27e
                 generate_op_key(container->id, RSC_STOP, 0),
eae27e
@@ -1796,14 +1803,16 @@ apply_remote_node_ordering(pe_working_set_t *data_set)
eae27e
         } else if (safe_str_eq(action->task, "stop")) {
eae27e
             gboolean after_start = FALSE;
eae27e
 
eae27e
-            /* handle special case with baremetal remote where stop actions need to be
eae27e
-             * ordered after the connection resource starts somewhere else. */
eae27e
+            /* Handle special case with remote node where stop actions need to be
eae27e
+             * ordered after the connection resource starts somewhere else.
eae27e
+             */
eae27e
             if (is_baremetal_remote_node(action->node)) {
eae27e
                 node_t *cluster_node = remote_rsc->running_on ? remote_rsc->running_on->data : NULL;
eae27e
 
eae27e
-                /* if the current cluster node a baremetal connection resource
eae27e
-                 * is residing on is unclean or went offline we can't process any
eae27e
-                 * operations on that remote node until after it starts somewhere else. */
eae27e
+                /* If the cluster node the remote connection resource resides on
eae27e
+                 * is unclean or went offline, we can't process any operations
eae27e
+                 * on that remote node until after it starts elsewhere.
eae27e
+                 */
eae27e
                 if (cluster_node == NULL ||
eae27e
                     cluster_node->details->unclean == TRUE ||
eae27e
                     cluster_node->details->online == FALSE) {
eae27e
-- 
eae27e
1.8.3.1
eae27e