Blob Blame History Raw
From 355461723733acc0f6f9d9cc1318c91ba2a0ae6c Mon Sep 17 00:00:00 2001
From: Ken Gaillot <kgaillot@redhat.com>
Date: Tue, 22 May 2018 15:55:14 -0500
Subject: [PATCH] Fix: all: prefer appropriate node when multiply active

---
 lib/pengine/container.c      |  8 +++----
 lib/pengine/native.c         |  8 +++----
 pengine/allocate.c           | 20 ++++++++---------
 pengine/clone.c              | 51 +++++++++++++++++++-----------------------
 pengine/graph.c              | 26 +++++++++++++---------
 pengine/native.c             | 17 +++++++-------
 pengine/notif.c              |  2 +-
 tools/crm_mon.c              | 14 ++++--------
 tools/crm_resource.c         | 21 +++++++++++-------
 tools/crm_resource_print.c   | 16 ++++++-------
 tools/crm_resource_runtime.c | 53 ++++++++++++++++++++++----------------------
 11 files changed, 112 insertions(+), 124 deletions(-)

diff --git a/lib/pengine/container.c b/lib/pengine/container.c
index b5340bf..d82948a 100644
--- a/lib/pengine/container.c
+++ b/lib/pengine/container.c
@@ -807,11 +807,11 @@ container_fix_remote_addr_in(resource_t *rsc, xmlNode *xml, const char *field)
     }
 
     node = tuple->docker->allocated_to;
-    if(node == NULL && tuple->docker->running_on) {
+    if (node == NULL) {
         /* If it won't be running anywhere after the
          * transition, go with where it's running now.
          */
-        node = tuple->docker->running_on->data;
+        node = pe__current_node(tuple->docker);
     }
 
     if(node == NULL) {
@@ -1289,9 +1289,7 @@ tuple_print(container_grouping_t * tuple, const char *pre_text, long options, vo
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", tuple->ipaddr);
     }
 
-    if (tuple->docker->running_on) {
-        node = tuple->docker->running_on->data;
-    }
+    node = pe__current_node(tuple->docker);
     common_print(rsc, pre_text, buffer, node, options, print_data);
 }
 
diff --git a/lib/pengine/native.c b/lib/pengine/native.c
index e01ef17..eda0355 100644
--- a/lib/pengine/native.c
+++ b/lib/pengine/native.c
@@ -457,7 +457,7 @@ native_print_xml(resource_t * rsc, const char *pre_text, long options, void *pri
     if (options & pe_print_rsconly) {
         status_print("/>\n");
         /* do nothing */
-    } else if (g_list_length(rsc->running_on) > 0) {
+    } else if (rsc->running_on != NULL) {
         GListPtr gIter = rsc->running_on;
 
         status_print(">\n");
@@ -529,7 +529,7 @@ common_print(resource_t * rsc, const char *pre_text, const char *name, node_t *n
         } else if (is_set(rsc->flags, pe_rsc_failed)) {
             status_print("<font color=\"red\">");
 
-        } else if (rsc->variant == pe_native && g_list_length(rsc->running_on) == 0) {
+        } else if (rsc->variant == pe_native && (rsc->running_on == NULL)) {
             status_print("<font color=\"red\">");
 
         } else if (g_list_length(rsc->running_on) > 1) {
@@ -742,9 +742,7 @@ native_print(resource_t * rsc, const char *pre_text, long options, void *print_d
         return;
     }
 
-    if (rsc->running_on != NULL) {
-        node = rsc->running_on->data;
-    }
+    node = pe__current_node(rsc);
     common_print(rsc, pre_text, rsc_printable_id(rsc), node, options, print_data);
 }
 
diff --git a/pengine/allocate.c b/pengine/allocate.c
index 724736c..427575b 100644
--- a/pengine/allocate.c
+++ b/pengine/allocate.c
@@ -1101,14 +1101,14 @@ sort_rsc_process_order(gconstpointer a, gconstpointer b, gpointer data)
     r2_weight = -INFINITY;
 
     if (resource1->running_on) {
-        r1_node = g_list_nth_data(resource1->running_on, 0);
+        r1_node = pe__current_node(resource1);
         r1_node = g_hash_table_lookup(r1_nodes, r1_node->details->id);
         if (r1_node != NULL) {
             r1_weight = r1_node->weight;
         }
     }
     if (resource2->running_on) {
-        r2_node = g_list_nth_data(resource2->running_on, 0);
+        r2_node = pe__current_node(resource2);
         r2_node = g_hash_table_lookup(r2_nodes, r2_node->details->id);
         if (r2_node != NULL) {
             r2_weight = r2_node->weight;
@@ -1925,10 +1925,7 @@ get_remote_node_state(pe_node_t *node)
     remote_rsc = node->details->remote_rsc;
     CRM_ASSERT(remote_rsc);
 
-    if(remote_rsc->running_on) {
-        cluster_node = remote_rsc->running_on->data;
-    }
-
+    cluster_node = pe__current_node(remote_rsc);
 
     /* If the cluster node the remote connection resource resides on
      * is unclean or went offline, we can't process any operations
@@ -1989,11 +1986,14 @@ get_remote_node_state(pe_node_t *node)
     return remote_state_alive;
 }
 
+/*!
+ * \internal
+ * \brief Order actions on remote node relative to actions for the connection
+ */
 static void
 apply_remote_ordering(action_t *action, pe_working_set_t *data_set)
 {
     resource_t *remote_rsc = NULL;
-    node_t *cluster_node = NULL;
     enum action_tasks task = text2task(action->task);
     enum remote_connection_state state = get_remote_node_state(action->node);
 
@@ -2009,10 +2009,6 @@ apply_remote_ordering(action_t *action, pe_working_set_t *data_set)
     remote_rsc = action->node->details->remote_rsc;
     CRM_ASSERT(remote_rsc);
 
-    if(remote_rsc->running_on) {
-        cluster_node = remote_rsc->running_on->data;
-    }
-
     crm_trace("Order %s action %s relative to %s%s (state: %s)",
               action->task, action->uuid,
               is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
@@ -2093,6 +2089,8 @@ apply_remote_ordering(action_t *action, pe_working_set_t *data_set)
                                         pe_order_implies_then, data_set);
 
             } else {
+                node_t *cluster_node = pe__current_node(remote_rsc);
+
                 if(task == monitor_rsc && state == remote_state_failed) {
                     /* We would only be here if we do not know the
                      * state of the resource on the remote node.
diff --git a/pengine/clone.c b/pengine/clone.c
index 3192412..1de2661 100644
--- a/pengine/clone.c
+++ b/pengine/clone.c
@@ -69,6 +69,10 @@ sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set)
     int rc = 0;
     node_t *node1 = NULL;
     node_t *node2 = NULL;
+    node_t *current_node1 = NULL;
+    node_t *current_node2 = NULL;
+    unsigned int nnodes1 = 0;
+    unsigned int nnodes2 = 0;
 
     gboolean can1 = TRUE;
     gboolean can2 = TRUE;
@@ -87,24 +91,22 @@ sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set)
      *  - inactive instances
      */
 
-    if (resource1->running_on && resource2->running_on) {
-        if (g_list_length(resource1->running_on) < g_list_length(resource2->running_on)) {
+    current_node1 = pe__find_active_on(resource1, &nnodes1, NULL);
+    current_node2 = pe__find_active_on(resource2, &nnodes2, NULL);
+
+    if (nnodes1 && nnodes2) {
+        if (nnodes1 < nnodes2) {
             crm_trace("%s < %s: running_on", resource1->id, resource2->id);
             return -1;
 
-        } else if (g_list_length(resource1->running_on) > g_list_length(resource2->running_on)) {
+        } else if (nnodes1 > nnodes2) {
             crm_trace("%s > %s: running_on", resource1->id, resource2->id);
             return 1;
         }
     }
 
-    if (resource1->running_on) {
-        node1 = resource1->running_on->data;
-    }
-    if (resource2->running_on) {
-        node2 = resource2->running_on->data;
-    }
-
+    node1 = current_node1;
+    node2 = current_node2;
     if (node1) {
         node_t *match = pe_hash_table_lookup(resource1->allowed_nodes, node1->details->id);
 
@@ -216,10 +218,10 @@ sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set)
         GHashTable *hash2 =
             g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str);
 
-        n = node_copy(resource1->running_on->data);
+        n = node_copy(current_node1);
         g_hash_table_insert(hash1, (gpointer) n->details->id, n);
 
-        n = node_copy(resource2->running_on->data);
+        n = node_copy(current_node2);
         g_hash_table_insert(hash2, (gpointer) n->details->id, n);
 
         if(resource1->parent) {
@@ -267,11 +269,8 @@ sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set)
         }
 
         /* Current location score */
-        node1 = g_list_nth_data(resource1->running_on, 0);
-        node1 = g_hash_table_lookup(hash1, node1->details->id);
-
-        node2 = g_list_nth_data(resource2->running_on, 0);
-        node2 = g_hash_table_lookup(hash2, node2->details->id);
+        node1 = g_hash_table_lookup(hash1, current_node1->details->id);
+        node2 = g_hash_table_lookup(hash2, current_node2->details->id);
 
         if (node1->weight < node2->weight) {
             if (node1->weight < 0) {
@@ -295,12 +294,8 @@ sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set)
         list1 = g_hash_table_get_values(hash1);
         list2 = g_hash_table_get_values(hash2);
 
-        list1 =
-            g_list_sort_with_data(list1, sort_node_weight,
-                                  g_list_nth_data(resource1->running_on, 0));
-        list2 =
-            g_list_sort_with_data(list2, sort_node_weight,
-                                  g_list_nth_data(resource2->running_on, 0));
+        list1 = g_list_sort_with_data(list1, sort_node_weight, current_node1);
+        list2 = g_list_sort_with_data(list2, sort_node_weight, current_node2);
         max = g_list_length(list1);
         if (max < g_list_length(list2)) {
             max = g_list_length(list2);
@@ -528,8 +523,8 @@ distribute_children(resource_t *rsc, GListPtr children, GListPtr nodes,
 
         if (child->running_on && is_set(child->flags, pe_rsc_provisional)
             && is_not_set(child->flags, pe_rsc_failed)) {
-            node_t *child_node = child->running_on->data;
-            node_t *local_node = parent_node_instance(child, child->running_on->data);
+            node_t *child_node = pe__current_node(child);
+            node_t *local_node = parent_node_instance(child, child_node);
 
             pe_rsc_trace(rsc, "Checking pre-allocation of %s to %s (%d remaining of %d)",
                          child->id, child_node->details->uname, max - allocated, max);
@@ -556,9 +551,9 @@ distribute_children(resource_t *rsc, GListPtr children, GListPtr nodes,
     for (GListPtr gIter = children; gIter != NULL; gIter = gIter->next) {
         resource_t *child = (resource_t *) gIter->data;
 
-        if (g_list_length(child->running_on) > 0) {
-            node_t *child_node = child->running_on->data;
-            node_t *local_node = parent_node_instance(child, child->running_on->data);
+        if (child->running_on != NULL) {
+            node_t *child_node = pe__current_node(child);
+            node_t *local_node = parent_node_instance(child, child_node);
 
             if (local_node == NULL) {
                 crm_err("%s is running on %s which isn't allowed",
diff --git a/pengine/graph.c b/pengine/graph.c
index 6d4e4c7..236b278 100644
--- a/pengine/graph.c
+++ b/pengine/graph.c
@@ -783,6 +783,7 @@ get_router_node(action_t *action)
     node_t *began_on = NULL;
     node_t *ended_on = NULL;
     node_t *router_node = NULL;
+    bool partial_migration = FALSE;
 
     if (safe_str_eq(action->task, CRM_OP_FENCE) || is_remote_node(action->node) == FALSE) {
         return NULL;
@@ -790,10 +791,13 @@ get_router_node(action_t *action)
 
     CRM_ASSERT(action->node->details->remote_rsc != NULL);
 
-    if (action->node->details->remote_rsc->running_on) {
-        began_on = action->node->details->remote_rsc->running_on->data;
-    }
+    began_on = pe__current_node(action->node->details->remote_rsc);
     ended_on = action->node->details->remote_rsc->allocated_to;
+    if (action->node->details->remote_rsc
+        && (action->node->details->remote_rsc->container == NULL)
+        && action->node->details->remote_rsc->partial_migration_target) {
+        partial_migration = TRUE;
+    }
 
     /* if there is only one location to choose from,
      * this is easy. Check for those conditions first */
@@ -817,6 +821,10 @@ get_router_node(action_t *action)
      *    are all required before the remote rsc stop action can occur.) In
      *    this case, we know these actions have to be routed through the initial
      *    cluster node the connection resource lived on before the move takes place.
+     *    The exception is a partial migration of a (non-guest) remote
+     *    connection resource; in that case, all actions (even these) will be
+     *    ordered after the connection's pseudo-start on the migration target,
+     *    so the target is the router node.
      *
      * 2. Everything else (start, promote, monitor, probe, refresh, clear failcount
      *    delete ....) must occur after the resource starts on the node it is
@@ -824,10 +832,10 @@ get_router_node(action_t *action)
      */
 
     /* 1. before connection rsc moves. */
-    if (safe_str_eq(action->task, "stop") ||
+    if ((safe_str_eq(action->task, "stop") ||
         safe_str_eq(action->task, "demote") ||
         safe_str_eq(action->task, "migrate_from") ||
-        safe_str_eq(action->task, "migrate_to")) {
+        safe_str_eq(action->task, "migrate_to")) && !partial_migration) {
 
         router_node = began_on;
 
@@ -1234,18 +1242,14 @@ action2xml(action_t * action, gboolean as_input, pe_working_set_t *data_set)
                 case stopped_rsc:
                 case action_demote:
                 case action_demoted:
-                    if(action->node->details->remote_rsc->container->running_on) {
-                        host = action->node->details->remote_rsc->container->running_on->data;
-                    }
+                    host = pe__current_node(action->node->details->remote_rsc->container);
                     break;
                 case start_rsc:
                 case started_rsc:
                 case monitor_rsc:
                 case action_promote:
                 case action_promoted:
-                    if(action->node->details->remote_rsc->container->allocated_to) {
-                        host = action->node->details->remote_rsc->container->allocated_to;
-                    }
+                    host = action->node->details->remote_rsc->container->allocated_to;
                     break;
                 default:
                     break;
diff --git a/pengine/native.c b/pengine/native.c
index 37ac2e4..1c26642 100644
--- a/pengine/native.c
+++ b/pengine/native.c
@@ -102,7 +102,7 @@ native_choose_node(resource_t * rsc, node_t * prefer, pe_working_set_t * data_se
     if (length > 0) {
         nodes = g_hash_table_get_values(rsc->allowed_nodes);
         nodes = g_list_sort_with_data(nodes, sort_node_weight,
-                                      g_list_nth_data(rsc->running_on, 0));
+                                      pe__current_node(rsc));
 
         // First node in sorted list has the best score
         best = g_list_nth_data(nodes, 0);
@@ -158,7 +158,7 @@ native_choose_node(resource_t * rsc, node_t * prefer, pe_working_set_t * data_se
              * remaining unallocated instances to prefer a node that's already
              * running another instance.
              */
-            node_t *running = g_list_nth_data(rsc->running_on, 0);
+            node_t *running = pe__current_node(rsc);
 
             if (running && (can_run_resources(running) == FALSE)) {
                 pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
@@ -534,16 +534,14 @@ native_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set)
         node_t *assign_to = NULL;
 
         rsc->next_role = rsc->role;
-        if (rsc->running_on == NULL) {
+        assign_to = pe__current_node(rsc);
+        if (assign_to == NULL) {
             reason = "inactive";
         } else if (rsc->role == RSC_ROLE_MASTER) {
-            assign_to = rsc->running_on->data;
             reason = "master";
         } else if (is_set(rsc->flags, pe_rsc_failed)) {
-            assign_to = rsc->running_on->data;
             reason = "failed";
         } else {
-            assign_to = rsc->running_on->data;
             reason = "active";
         }
         pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
@@ -1834,7 +1832,9 @@ rsc_ticket_constraint(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket, pe_working
                  rsc_lh->id, rsc_ticket->ticket->id, rsc_ticket->id,
                  role2text(rsc_ticket->role_lh));
 
-    if (rsc_ticket->ticket->granted == FALSE && g_list_length(rsc_lh->running_on) > 0) {
+    if ((rsc_ticket->ticket->granted == FALSE)
+        && (rsc_lh->running_on != NULL)) {
+
         GListPtr gIter = NULL;
 
         switch (rsc_ticket->loss_policy) {
@@ -1867,7 +1867,7 @@ rsc_ticket_constraint(resource_t * rsc_lh, rsc_ticket_t * rsc_ticket, pe_working
                 if (filter_rsc_ticket(rsc_lh, rsc_ticket) == FALSE) {
                     return;
                 }
-                if (g_list_length(rsc_lh->running_on) > 0) {
+                if (rsc_lh->running_on != NULL) {
                     clear_bit(rsc_lh->flags, pe_rsc_managed);
                     set_bit(rsc_lh->flags, pe_rsc_block);
                 }
@@ -1919,7 +1919,6 @@ native_update_actions(action_t * first, action_t * then, node_t * node, enum pe_
         } else if ((then_rsc_role >= RSC_ROLE_STARTED)
                    && safe_str_eq(then->task, RSC_START)
                    && then->node
-                   && then_rsc->running_on
                    && g_list_length(then_rsc->running_on) == 1
                    && then->node->details == ((node_t *) then_rsc->running_on->data)->details) {
             /* ignore... if 'then' is supposed to be started after 'first', but
diff --git a/pengine/notif.c b/pengine/notif.c
index 3013ee0..4913249 100644
--- a/pengine/notif.c
+++ b/pengine/notif.c
@@ -113,7 +113,7 @@ expand_node_list(GListPtr list, char **uname, char **metal)
             if(node->details->remote_rsc
                && node->details->remote_rsc->container
                && node->details->remote_rsc->container->running_on) {
-                node = node->details->remote_rsc->container->running_on->data;
+                node = pe__current_node(node->details->remote_rsc->container);
             }
 
             if (node->details->uname == NULL) {
diff --git a/tools/crm_mon.c b/tools/crm_mon.c
index 824b12f..7c63803 100644
--- a/tools/crm_mon.c
+++ b/tools/crm_mon.c
@@ -1953,16 +1953,10 @@ get_node_display_name(node_t *node)
 
     /* Host is displayed only if this is a guest node */
     if (is_container_remote_node(node)) {
-        if (node->details->remote_rsc->running_on) {
-            /* running_on is a list, but guest nodes will have exactly one entry
-             * unless they are in the process of migrating, in which case they
-             * will have two; either way, we can use the first item in the list
-             */
-            node_t *host_node = (node_t *) node->details->remote_rsc->running_on->data;
-
-            if (host_node && host_node->details) {
-                node_host = host_node->details->uname;
-            }
+        node_t *host_node = pe__current_node(node->details->remote_rsc);
+
+        if (host_node && host_node->details) {
+            node_host = host_node->details->uname;
         }
         if (node_host == NULL) {
             node_host = ""; /* so we at least get "uname@" to indicate guest */
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index c64432e..0557892 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1015,23 +1015,27 @@ main(int argc, char **argv)
         rc = cli_resource_ban(rsc_id, dest->details->uname, NULL, cib_conn);
 
     } else if (rsc_cmd == 'B' || rsc_cmd == 'M') {
+        pe_node_t *current = NULL;
+        unsigned int nactive = 0;
+
         rc = -EINVAL;
-        if (g_list_length(rsc->running_on) == 1) {
-            node_t *current = rsc->running_on->data;
+        current = pe__find_active_requires(rsc, &nactive);
+
+        if (nactive == 1) {
             rc = cli_resource_ban(rsc_id, current->details->uname, NULL, cib_conn);
 
         } else if(rsc->variant == pe_master) {
             int count = 0;
             GListPtr iter = NULL;
-            node_t *current = NULL;
 
+            current = NULL;
             for(iter = rsc->children; iter; iter = iter->next) {
                 resource_t *child = (resource_t *)iter->data;
                 enum rsc_role_e child_role = child->fns->state(child, TRUE);
 
                 if(child_role == RSC_ROLE_MASTER) {
                     count++;
-                    current = child->running_on->data;
+                    current = pe__current_node(child);
                 }
             }
 
@@ -1039,14 +1043,15 @@ main(int argc, char **argv)
                 rc = cli_resource_ban(rsc_id, current->details->uname, NULL, cib_conn);
 
             } else {
-                CMD_ERR("Resource '%s' not moved: active in %d locations (promoted in %d).", rsc_id, g_list_length(rsc->running_on), count);
+                CMD_ERR("Resource '%s' not moved: active in %d locations (promoted in %d).",
+                        rsc_id, nactive, count);
                 CMD_ERR("You can prevent '%s' from running on a specific location with: --ban --node <name>", rsc_id);
                 CMD_ERR("You can prevent '%s' from being promoted at a specific location with:"
                         " --ban --master --node <name>", rsc_id);
             }
 
         } else {
-            CMD_ERR("Resource '%s' not moved: active in %d locations.", rsc_id, g_list_length(rsc->running_on));
+            CMD_ERR("Resource '%s' not moved: active in %d locations.", rsc_id, nactive);
             CMD_ERR("You can prevent '%s' from running on a specific location with: --ban --node <name>", rsc_id);
         }
 
@@ -1164,12 +1169,12 @@ main(int argc, char **argv)
             node_t *node = pe_find_node(data_set.nodes, host_uname);
 
             if (node && is_remote_node(node)) {
-                if (node->details->remote_rsc == NULL || node->details->remote_rsc->running_on == NULL) {
+                node = pe__current_node(node->details->remote_rsc);
+                if (node == NULL) {
                     CMD_ERR("No lrmd connection detected to remote node %s", host_uname);
                     rc = -ENXIO;
                     goto bail;
                 }
-                node = node->details->remote_rsc->running_on->data;
                 router_node = node->details->uname;
                 attr_options |= attrd_opt_remote;
             }
diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c
index d066c42..2463fb5 100644
--- a/tools/crm_resource_print.c
+++ b/tools/crm_resource_print.c
@@ -68,6 +68,7 @@ cli_resource_print_cts(resource_t * rsc)
     const char *rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE);
     const char *rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     const char *rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
+    pe_node_t *node = pe__current_node(rsc);
 
     if (safe_str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH)) {
         xmlNode *op = NULL;
@@ -90,10 +91,8 @@ cli_resource_print_cts(resource_t * rsc)
         }
     }
 
-    if (rsc->running_on != NULL && g_list_length(rsc->running_on) == 1) {
-        node_t *tmp = rsc->running_on->data;
-
-        host = tmp->details->uname;
+    if (node != NULL) {
+        host = node->details->uname;
     }
 
     printf("Resource: %s %s %s %s %s %s %s %s %d %lld 0x%.16llx\n",
@@ -315,16 +314,15 @@ int
 cli_resource_print_attribute(resource_t *rsc, const char *attr, pe_working_set_t * data_set)
 {
     int rc = -ENXIO;
-    node_t *current = NULL;
+    unsigned int count = 0;
     GHashTable *params = NULL;
     const char *value = NULL;
+    node_t *current = pe__find_active_on(rsc, &count, NULL);
 
-    if (g_list_length(rsc->running_on) == 1) {
-        current = rsc->running_on->data;
-
-    } else if (g_list_length(rsc->running_on) > 1) {
+    if (count > 1) {
         CMD_ERR("%s is active on more than one node,"
                 " returning the default value for %s", rsc->id, crm_str(attr));
+        current = NULL;
     }
 
     params = crm_str_table_new();
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index 5e54f9e..5004935 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -473,11 +473,11 @@ send_lrm_rsc_op(crm_ipc_t * crmd_channel, const char *op,
         node_t *node = pe_find_node(data_set->nodes, host_uname);
 
         if (node && is_remote_node(node)) {
-            if (node->details->remote_rsc == NULL || node->details->remote_rsc->running_on == NULL) {
+            node = pe__current_node(node->details->remote_rsc);
+            if (node == NULL) {
                 CMD_ERR("No lrmd connection detected to remote node %s", host_uname);
                 return -ENXIO;
             }
-            node = node->details->remote_rsc->running_on->data;
             router_node = node->details->uname;
         }
     }
@@ -1648,11 +1648,16 @@ cli_resource_move(resource_t *rsc, const char *rsc_id, const char *host_name,
                   cib_t *cib, pe_working_set_t *data_set)
 {
     int rc = -EINVAL;
-    int count = 0;
+    unsigned int count = 0;
     node_t *current = NULL;
     node_t *dest = pe_find_node(data_set->nodes, host_name);
     bool cur_is_dest = FALSE;
 
+    if (dest == NULL) {
+        CMD_ERR("Error performing operation: node '%s' is unknown", host_name);
+        return -ENXIO;
+    }
+
     if (scope_master && rsc->variant != pe_master) {
         resource_t *p = uber_parent(rsc);
         if(p->variant == pe_master) {
@@ -1667,8 +1672,12 @@ cli_resource_move(resource_t *rsc, const char *rsc_id, const char *host_name,
         }
     }
 
+    current = pe__find_active_requires(rsc, &count);
+
     if(rsc->variant == pe_master) {
         GListPtr iter = NULL;
+        unsigned int master_count = 0;
+        pe_node_t *master_node = NULL;
 
         for(iter = rsc->children; iter; iter = iter->next) {
             resource_t *child = (resource_t *)iter->data;
@@ -1676,37 +1685,27 @@ cli_resource_move(resource_t *rsc, const char *rsc_id, const char *host_name,
 
             if(child_role == RSC_ROLE_MASTER) {
                 rsc = child;
-                count++;
+                master_node = pe__current_node(child);
+                master_count++;
             }
         }
-
-        if(scope_master == FALSE && count == 0) {
-            count = g_list_length(rsc->running_on);
+        if (scope_master || master_count) {
+            count = master_count;
+            current = master_node;
         }
 
-    } else if (pe_rsc_is_clone(rsc)) {
-        count = g_list_length(rsc->running_on);
-
-    } else if (g_list_length(rsc->running_on) > 1) {
-        CMD_ERR("Resource '%s' not moved: active on multiple nodes", rsc_id);
-        return rc;
-    }
-
-    if(dest == NULL) {
-        CMD_ERR("Error performing operation: node '%s' is unknown", host_name);
-        return -ENXIO;
     }
 
-    if(g_list_length(rsc->running_on) == 1) {
-        current = rsc->running_on->data;
+    if (count > 1) {
+        if (pe_rsc_is_clone(rsc)) {
+            current = NULL;
+        } else {
+            CMD_ERR("Resource '%s' not moved: active on multiple nodes", rsc_id);
+            return rc;
+        }
     }
 
-    if(current == NULL) {
-        /* Nothing to check */
-
-    } else if(scope_master && rsc->fns->state(rsc, TRUE) != RSC_ROLE_MASTER) {
-        crm_trace("%s is already active on %s but not in correct state", rsc_id, dest->details->uname);
-    } else if (safe_str_eq(current->details->uname, dest->details->uname)) {
+    if (current && (current->details == dest->details)) {
         cur_is_dest = TRUE;
         if (do_force) {
             crm_info("%s is already %s on %s, reinforcing placement with location constraint.",
@@ -1736,7 +1735,7 @@ cli_resource_move(resource_t *rsc, const char *rsc_id, const char *host_name,
             (void)cli_resource_ban(rsc_id, current->details->uname, NULL, cib);
 
         } else if(count > 1) {
-            CMD_ERR("Resource '%s' is currently %s in %d locations.  One may now move one to %s",
+            CMD_ERR("Resource '%s' is currently %s in %d locations. One may now move to %s",
                     rsc_id, scope_master?"promoted":"active", count, dest->details->uname);
             CMD_ERR("You can prevent '%s' from being %s at a specific location with:"
                     " --ban %s--host <name>", rsc_id, scope_master?"promoted":"active", scope_master?"--master ":"");
-- 
1.8.3.1